4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
36 #include <linux/moduleparam.h>
38 #include <lnet/lib-lnet.h>
40 #define D_LNI D_CONSOLE
42 struct lnet the_lnet; /* THE state of the network */
43 EXPORT_SYMBOL(the_lnet);
45 static char *ip2nets = "";
46 module_param(ip2nets, charp, 0444);
47 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
49 static char *networks = "";
50 module_param(networks, charp, 0444);
51 MODULE_PARM_DESC(networks, "local networks");
53 static char *routes = "";
54 module_param(routes, charp, 0444);
55 MODULE_PARM_DESC(routes, "routes to non-local networks");
57 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
58 module_param(rnet_htable_size, int, 0444);
59 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
61 static int use_tcp_bonding = false;
62 module_param(use_tcp_bonding, int, 0444);
63 MODULE_PARM_DESC(use_tcp_bonding,
64 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
66 unsigned int lnet_numa_range = 0;
67 module_param(lnet_numa_range, uint, 0444);
68 MODULE_PARM_DESC(lnet_numa_range,
69 "NUMA range to consider during Multi-Rail selection");
71 static int lnet_interfaces_max = LNET_INTERFACES_MAX_DEFAULT;
72 static int intf_max_set(const char *val, struct kernel_param *kp);
73 module_param_call(lnet_interfaces_max, intf_max_set, param_get_int,
74 &lnet_interfaces_max, S_IRUGO|S_IWUSR);
75 MODULE_PARM_DESC(lnet_interfaces_max,
76 "Maximum number of interfaces in a node.");
78 unsigned lnet_peer_discovery_disabled = 0;
79 static int discovery_set(const char *val, struct kernel_param *kp);
80 module_param_call(lnet_peer_discovery_disabled, discovery_set, param_get_int,
81 &lnet_peer_discovery_disabled, S_IRUGO|S_IWUSR);
82 MODULE_PARM_DESC(lnet_peer_discovery_disabled,
83 "Set to 1 to disable peer discovery on this node.");
86 * This sequence number keeps track of how many times DLC was used to
87 * update the local NIs. It is incremented when a NI is added or
88 * removed and checked when sending a message to determine if there is
89 * a need to re-run the selection algorithm. See lnet_select_pathway()
90 * for more details on its usage.
92 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
94 static int lnet_ping(struct lnet_process_id id, signed long timeout,
95 struct lnet_process_id __user *ids, int n_ids);
98 discovery_set(const char *val, struct kernel_param *kp)
103 rc = kstrtoul(val, 0, &value);
105 CERROR("Invalid module parameter value for 'lnet_peer_discovery_disabled'\n");
109 *(unsigned *)kp->arg = (value) ? 1 : 0;
115 intf_max_set(const char *val, struct kernel_param *kp)
119 rc = kstrtoint(val, 0, &value);
121 CERROR("Invalid module parameter value for 'lnet_interfaces_max'\n");
125 if (value < LNET_INTERFACES_MIN) {
126 CWARN("max interfaces provided are too small, setting to %d\n",
127 LNET_INTERFACES_MIN);
128 value = LNET_INTERFACES_MIN;
131 *(int *)kp->arg = value;
137 lnet_get_routes(void)
143 lnet_get_networks(void)
148 if (*networks != 0 && *ip2nets != 0) {
149 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
150 "'ip2nets' but not both at once\n");
155 rc = lnet_parse_ip2nets(&nets, ip2nets);
156 return (rc == 0) ? nets : NULL;
166 lnet_init_locks(void)
168 spin_lock_init(&the_lnet.ln_eq_wait_lock);
169 init_waitqueue_head(&the_lnet.ln_eq_waitq);
170 init_waitqueue_head(&the_lnet.ln_rc_waitq);
171 mutex_init(&the_lnet.ln_lnd_mutex);
172 mutex_init(&the_lnet.ln_api_mutex);
176 lnet_fini_locks(void)
180 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
181 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
185 lnet_descriptor_setup(void)
187 /* create specific kmem_cache for MEs and small MDs (i.e., originally
188 * allocated in <size-xxx> kmem_cache).
190 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(struct lnet_me),
192 if (!lnet_mes_cachep)
195 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
196 LNET_SMALL_MD_SIZE, 0, 0,
198 if (!lnet_small_mds_cachep)
205 lnet_descriptor_cleanup(void)
208 if (lnet_small_mds_cachep) {
209 kmem_cache_destroy(lnet_small_mds_cachep);
210 lnet_small_mds_cachep = NULL;
213 if (lnet_mes_cachep) {
214 kmem_cache_destroy(lnet_mes_cachep);
215 lnet_mes_cachep = NULL;
220 lnet_create_remote_nets_table(void)
223 struct list_head *hash;
225 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
226 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
227 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
229 CERROR("Failed to create remote nets hash table\n");
233 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
234 INIT_LIST_HEAD(&hash[i]);
235 the_lnet.ln_remote_nets_hash = hash;
240 lnet_destroy_remote_nets_table(void)
244 if (the_lnet.ln_remote_nets_hash == NULL)
247 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
248 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
250 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
251 LNET_REMOTE_NETS_HASH_SIZE *
252 sizeof(the_lnet.ln_remote_nets_hash[0]));
253 the_lnet.ln_remote_nets_hash = NULL;
257 lnet_destroy_locks(void)
259 if (the_lnet.ln_res_lock != NULL) {
260 cfs_percpt_lock_free(the_lnet.ln_res_lock);
261 the_lnet.ln_res_lock = NULL;
264 if (the_lnet.ln_net_lock != NULL) {
265 cfs_percpt_lock_free(the_lnet.ln_net_lock);
266 the_lnet.ln_net_lock = NULL;
273 lnet_create_locks(void)
277 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
278 if (the_lnet.ln_res_lock == NULL)
281 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
282 if (the_lnet.ln_net_lock == NULL)
288 lnet_destroy_locks();
292 static void lnet_assert_wire_constants(void)
294 /* Wire protocol assertions generated by 'wirecheck'
295 * running on Linux robert.bartonsoftware.com 2.6.8-1.521
296 * #1 Mon Aug 16 09:01:18 EDT 2004 i686 athlon i386 GNU/Linux
297 * with gcc version 3.3.3 20040412 (Red Hat Linux 3.3.3-7) */
300 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
301 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
302 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
303 CLASSERT(LNET_MSG_ACK == 0);
304 CLASSERT(LNET_MSG_PUT == 1);
305 CLASSERT(LNET_MSG_GET == 2);
306 CLASSERT(LNET_MSG_REPLY == 3);
307 CLASSERT(LNET_MSG_HELLO == 4);
309 /* Checks for struct lnet_handle_wire */
310 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
311 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
312 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
313 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
314 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
316 /* Checks for struct struct lnet_magicversion */
317 CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
318 CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
319 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
320 CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
321 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
322 CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
323 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
325 /* Checks for struct struct lnet_hdr */
326 CLASSERT((int)sizeof(struct lnet_hdr) == 72);
327 CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
328 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
329 CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
330 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
331 CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
332 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
333 CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
334 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
335 CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
336 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
337 CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
338 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
339 CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
340 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
343 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
344 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
345 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
346 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
347 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
348 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
351 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
352 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
353 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
354 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
355 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
356 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
357 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
358 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
359 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
360 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
363 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
364 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
365 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
366 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
367 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
368 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
369 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
370 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
371 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
372 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
375 CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
376 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
379 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
380 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
381 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
382 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
384 /* Checks for struct lnet_ni_status and related constants */
385 CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
386 CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
387 CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
389 /* Checks for struct lnet_ni_status */
390 CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
391 CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
392 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
393 CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
394 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
395 CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
396 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
398 /* Checks for struct lnet_ping_info and related constants */
399 CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
400 CLASSERT(LNET_PING_FEAT_INVAL == 0);
401 CLASSERT(LNET_PING_FEAT_BASE == 1);
402 CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
403 CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
404 CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
405 CLASSERT(LNET_PING_FEAT_DISCOVERY == 16);
406 CLASSERT(LNET_PING_FEAT_BITS == 31);
408 /* Checks for struct lnet_ping_info */
409 CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
410 CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
411 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
412 CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
413 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
414 CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
415 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
416 CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
417 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
418 CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
419 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
422 static struct lnet_lnd *lnet_find_lnd_by_type(__u32 type)
424 struct lnet_lnd *lnd;
425 struct list_head *tmp;
427 /* holding lnd mutex */
428 list_for_each(tmp, &the_lnet.ln_lnds) {
429 lnd = list_entry(tmp, struct lnet_lnd, lnd_list);
431 if (lnd->lnd_type == type)
438 lnet_register_lnd(struct lnet_lnd *lnd)
440 mutex_lock(&the_lnet.ln_lnd_mutex);
442 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
443 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
445 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
446 lnd->lnd_refcount = 0;
448 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
450 mutex_unlock(&the_lnet.ln_lnd_mutex);
452 EXPORT_SYMBOL(lnet_register_lnd);
455 lnet_unregister_lnd(struct lnet_lnd *lnd)
457 mutex_lock(&the_lnet.ln_lnd_mutex);
459 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
460 LASSERT(lnd->lnd_refcount == 0);
462 list_del(&lnd->lnd_list);
463 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
465 mutex_unlock(&the_lnet.ln_lnd_mutex);
467 EXPORT_SYMBOL(lnet_unregister_lnd);
470 lnet_counters_get(struct lnet_counters *counters)
472 struct lnet_counters *ctr;
475 memset(counters, 0, sizeof(*counters));
477 lnet_net_lock(LNET_LOCK_EX);
479 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
480 counters->msgs_max += ctr->msgs_max;
481 counters->msgs_alloc += ctr->msgs_alloc;
482 counters->errors += ctr->errors;
483 counters->send_count += ctr->send_count;
484 counters->recv_count += ctr->recv_count;
485 counters->route_count += ctr->route_count;
486 counters->drop_count += ctr->drop_count;
487 counters->send_length += ctr->send_length;
488 counters->recv_length += ctr->recv_length;
489 counters->route_length += ctr->route_length;
490 counters->drop_length += ctr->drop_length;
493 lnet_net_unlock(LNET_LOCK_EX);
495 EXPORT_SYMBOL(lnet_counters_get);
498 lnet_counters_reset(void)
500 struct lnet_counters *counters;
503 lnet_net_lock(LNET_LOCK_EX);
505 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
506 memset(counters, 0, sizeof(struct lnet_counters));
508 lnet_net_unlock(LNET_LOCK_EX);
512 lnet_res_type2str(int type)
517 case LNET_COOKIE_TYPE_MD:
519 case LNET_COOKIE_TYPE_ME:
521 case LNET_COOKIE_TYPE_EQ:
527 lnet_res_container_cleanup(struct lnet_res_container *rec)
531 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
534 while (!list_empty(&rec->rec_active)) {
535 struct list_head *e = rec->rec_active.next;
538 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
539 lnet_eq_free(list_entry(e, struct lnet_eq, eq_list));
541 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
542 lnet_md_free(list_entry(e, struct lnet_libmd, md_list));
544 } else { /* NB: Active MEs should be attached on portals */
551 /* Found alive MD/ME/EQ, user really should unlink/free
552 * all of them before finalize LNet, but if someone didn't,
553 * we have to recycle garbage for him */
554 CERROR("%d active elements on exit of %s container\n",
555 count, lnet_res_type2str(rec->rec_type));
558 if (rec->rec_lh_hash != NULL) {
559 LIBCFS_FREE(rec->rec_lh_hash,
560 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
561 rec->rec_lh_hash = NULL;
564 rec->rec_type = 0; /* mark it as finalized */
568 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
573 LASSERT(rec->rec_type == 0);
575 rec->rec_type = type;
576 INIT_LIST_HEAD(&rec->rec_active);
578 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
580 /* Arbitrary choice of hash table size */
581 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
582 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
583 if (rec->rec_lh_hash == NULL) {
588 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
589 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
594 CERROR("Failed to setup %s resource container\n",
595 lnet_res_type2str(type));
596 lnet_res_container_cleanup(rec);
601 lnet_res_containers_destroy(struct lnet_res_container **recs)
603 struct lnet_res_container *rec;
606 cfs_percpt_for_each(rec, i, recs)
607 lnet_res_container_cleanup(rec);
609 cfs_percpt_free(recs);
612 static struct lnet_res_container **
613 lnet_res_containers_create(int type)
615 struct lnet_res_container **recs;
616 struct lnet_res_container *rec;
620 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
622 CERROR("Failed to allocate %s resource containers\n",
623 lnet_res_type2str(type));
627 cfs_percpt_for_each(rec, i, recs) {
628 rc = lnet_res_container_setup(rec, i, type);
630 lnet_res_containers_destroy(recs);
638 struct lnet_libhandle *
639 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
641 /* ALWAYS called with lnet_res_lock held */
642 struct list_head *head;
643 struct lnet_libhandle *lh;
646 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
649 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
650 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
652 list_for_each_entry(lh, head, lh_hash_chain) {
653 if (lh->lh_cookie == cookie)
661 lnet_res_lh_initialize(struct lnet_res_container *rec,
662 struct lnet_libhandle *lh)
664 /* ALWAYS called with lnet_res_lock held */
665 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
668 lh->lh_cookie = rec->rec_lh_cookie;
669 rec->rec_lh_cookie += 1 << ibits;
671 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
673 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
676 static int lnet_unprepare(void);
679 lnet_prepare(lnet_pid_t requested_pid)
681 /* Prepare to bring up the network */
682 struct lnet_res_container **recs;
685 if (requested_pid == LNET_PID_ANY) {
686 /* Don't instantiate LNET just for me */
690 LASSERT(the_lnet.ln_refcount == 0);
692 the_lnet.ln_routing = 0;
694 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
695 the_lnet.ln_pid = requested_pid;
697 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
698 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
699 INIT_LIST_HEAD(&the_lnet.ln_nets);
700 INIT_LIST_HEAD(&the_lnet.ln_routers);
701 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
702 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
704 rc = lnet_descriptor_setup();
708 rc = lnet_create_remote_nets_table();
713 * NB the interface cookie in wire handles guards against delayed
714 * replies and ACKs appearing valid after reboot.
716 the_lnet.ln_interface_cookie = ktime_get_real_ns();
718 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
719 sizeof(struct lnet_counters));
720 if (the_lnet.ln_counters == NULL) {
721 CERROR("Failed to allocate counters for LNet\n");
726 rc = lnet_peer_tables_create();
730 rc = lnet_msg_containers_create();
734 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
735 LNET_COOKIE_TYPE_EQ);
739 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
745 the_lnet.ln_me_containers = recs;
747 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
753 the_lnet.ln_md_containers = recs;
755 rc = lnet_portals_create();
757 CERROR("Failed to create portals for LNet: %d\n", rc);
769 lnet_unprepare (void)
771 /* NB no LNET_LOCK since this is the last reference. All LND instances
772 * have shut down already, so it is safe to unlink and free all
773 * descriptors, even those that appear committed to a network op (eg MD
774 * with non-zero pending count) */
776 lnet_fail_nid(LNET_NID_ANY, 0);
778 LASSERT(the_lnet.ln_refcount == 0);
779 LASSERT(list_empty(&the_lnet.ln_test_peers));
780 LASSERT(list_empty(&the_lnet.ln_nets));
782 lnet_portals_destroy();
784 if (the_lnet.ln_md_containers != NULL) {
785 lnet_res_containers_destroy(the_lnet.ln_md_containers);
786 the_lnet.ln_md_containers = NULL;
789 if (the_lnet.ln_me_containers != NULL) {
790 lnet_res_containers_destroy(the_lnet.ln_me_containers);
791 the_lnet.ln_me_containers = NULL;
794 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
796 lnet_msg_containers_destroy();
798 lnet_rtrpools_free(0);
800 if (the_lnet.ln_counters != NULL) {
801 cfs_percpt_free(the_lnet.ln_counters);
802 the_lnet.ln_counters = NULL;
804 lnet_destroy_remote_nets_table();
805 lnet_descriptor_cleanup();
811 lnet_net2ni_locked(__u32 net_id, int cpt)
814 struct lnet_net *net;
816 LASSERT(cpt != LNET_LOCK_EX);
818 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
819 if (net->net_id == net_id) {
820 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
830 lnet_net2ni_addref(__u32 net)
835 ni = lnet_net2ni_locked(net, 0);
837 lnet_ni_addref_locked(ni, 0);
842 EXPORT_SYMBOL(lnet_net2ni_addref);
845 lnet_get_net_locked(__u32 net_id)
847 struct lnet_net *net;
849 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
850 if (net->net_id == net_id)
858 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
863 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
868 val = hash_long(key, LNET_CPT_BITS);
869 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
873 return (unsigned int)(key + val + (val >> 1)) % number;
877 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
879 struct lnet_net *net;
881 /* must called with hold of lnet_net_lock */
882 if (LNET_CPT_NUMBER == 1)
883 return 0; /* the only one */
886 * If NI is provided then use the CPT identified in the NI cpt
887 * list if one exists. If one doesn't exist, then that NI is
888 * associated with all CPTs and it follows that the net it belongs
889 * to is implicitly associated with all CPTs, so just hash the nid
893 if (ni->ni_cpts != NULL)
894 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
897 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
900 /* no NI provided so look at the net */
901 net = lnet_get_net_locked(LNET_NIDNET(nid));
903 if (net != NULL && net->net_cpts != NULL) {
904 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
907 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
911 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
916 if (LNET_CPT_NUMBER == 1)
917 return 0; /* the only one */
919 cpt = lnet_net_lock_current();
921 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
923 lnet_net_unlock(cpt);
927 EXPORT_SYMBOL(lnet_cpt_of_nid);
930 lnet_islocalnet(__u32 net_id)
932 struct lnet_net *net;
936 cpt = lnet_net_lock_current();
938 net = lnet_get_net_locked(net_id);
942 lnet_net_unlock(cpt);
948 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
950 if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
951 ni->ni_state == LNET_NI_STATE_DEGRADED)
958 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
960 struct lnet_net *net;
963 LASSERT(cpt != LNET_LOCK_EX);
965 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
966 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
967 if (ni->ni_nid == nid)
976 lnet_nid2ni_addref(lnet_nid_t nid)
981 ni = lnet_nid2ni_locked(nid, 0);
983 lnet_ni_addref_locked(ni, 0);
988 EXPORT_SYMBOL(lnet_nid2ni_addref);
991 lnet_islocalnid(lnet_nid_t nid)
996 cpt = lnet_net_lock_current();
997 ni = lnet_nid2ni_locked(nid, cpt);
998 lnet_net_unlock(cpt);
1004 lnet_count_acceptor_nets(void)
1006 /* Return the # of NIs that need the acceptor. */
1008 struct lnet_net *net;
1011 cpt = lnet_net_lock_current();
1012 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1013 /* all socklnd type networks should have the acceptor
1015 if (net->net_lnd->lnd_accept != NULL)
1019 lnet_net_unlock(cpt);
1024 struct lnet_ping_buffer *
1025 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
1027 struct lnet_ping_buffer *pbuf;
1029 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
1031 pbuf->pb_nnis = nnis;
1032 atomic_set(&pbuf->pb_refcnt, 1);
1039 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
1041 LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
1042 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
1045 static struct lnet_ping_buffer *
1046 lnet_ping_target_create(int nnis)
1048 struct lnet_ping_buffer *pbuf;
1050 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1052 CERROR("Can't allocate ping source [%d]\n", nnis);
1056 pbuf->pb_info.pi_nnis = nnis;
1057 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1058 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1059 pbuf->pb_info.pi_features = LNET_PING_FEAT_NI_STATUS;
1065 lnet_get_net_ni_count_locked(struct lnet_net *net)
1070 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1077 lnet_get_net_ni_count_pre(struct lnet_net *net)
1082 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1089 lnet_get_ni_count(void)
1092 struct lnet_net *net;
1097 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1098 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1108 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1112 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1114 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1116 /* Loopback is guaranteed to be present */
1117 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_interfaces_max)
1119 if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1125 lnet_ping_target_destroy(void)
1127 struct lnet_net *net;
1130 lnet_net_lock(LNET_LOCK_EX);
1132 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1133 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1135 ni->ni_status = NULL;
1140 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1141 the_lnet.ln_ping_target = NULL;
1143 lnet_net_unlock(LNET_LOCK_EX);
1147 lnet_ping_target_event_handler(struct lnet_event *event)
1149 struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1151 if (event->unlinked)
1152 lnet_ping_buffer_decref(pbuf);
1156 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1157 struct lnet_handle_md *ping_mdh,
1158 int ni_count, bool set_eq)
1160 struct lnet_process_id id = {
1161 .nid = LNET_NID_ANY,
1164 struct lnet_handle_me me_handle;
1165 struct lnet_md md = { NULL };
1169 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1170 &the_lnet.ln_ping_target_eq);
1172 CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1177 *ppbuf = lnet_ping_target_create(ni_count);
1178 if (*ppbuf == NULL) {
1183 /* Ping target ME/MD */
1184 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1185 LNET_PROTO_PING_MATCHBITS, 0,
1186 LNET_UNLINK, LNET_INS_AFTER,
1189 CERROR("Can't create ping target ME: %d\n", rc);
1190 goto fail_decref_ping_buffer;
1193 /* initialize md content */
1194 md.start = &(*ppbuf)->pb_info;
1195 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1196 md.threshold = LNET_MD_THRESH_INF;
1198 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1199 LNET_MD_MANAGE_REMOTE;
1200 md.eq_handle = the_lnet.ln_ping_target_eq;
1201 md.user_ptr = *ppbuf;
1203 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1205 CERROR("Can't attach ping target MD: %d\n", rc);
1206 goto fail_unlink_ping_me;
1208 lnet_ping_buffer_addref(*ppbuf);
1212 fail_unlink_ping_me:
1213 rc2 = LNetMEUnlink(me_handle);
1215 fail_decref_ping_buffer:
1216 LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1217 lnet_ping_buffer_decref(*ppbuf);
1221 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1228 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
1229 struct lnet_handle_md *ping_mdh)
1231 sigset_t blocked = cfs_block_allsigs();
1233 LNetMDUnlink(*ping_mdh);
1234 LNetInvalidateMDHandle(ping_mdh);
1236 /* NB the MD could be busy; this just starts the unlink */
1237 while (lnet_ping_buffer_numref(pbuf) > 1) {
1238 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1239 set_current_state(TASK_UNINTERRUPTIBLE);
1240 schedule_timeout(cfs_time_seconds(1));
1243 cfs_restore_sigs(blocked);
1247 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1250 struct lnet_net *net;
1251 struct lnet_ni_status *ns;
1256 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1257 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1258 LASSERT(i < pbuf->pb_nnis);
1260 ns = &pbuf->pb_info.pi_ni[i];
1262 ns->ns_nid = ni->ni_nid;
1265 ns->ns_status = (ni->ni_status != NULL) ?
1266 ni->ni_status->ns_status :
1275 * We (ab)use the ns_status of the loopback interface to
1276 * transmit the sequence number. The first interface listed
1277 * must be the loopback interface.
1279 rc = lnet_ping_info_validate(&pbuf->pb_info);
1281 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1284 LNET_PING_BUFFER_SEQNO(pbuf) =
1285 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1289 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1290 struct lnet_handle_md ping_mdh)
1292 struct lnet_ping_buffer *old_pbuf = NULL;
1293 struct lnet_handle_md old_ping_md;
1295 /* switch the NIs to point to the new ping info created */
1296 lnet_net_lock(LNET_LOCK_EX);
1298 if (!the_lnet.ln_routing)
1299 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1301 /* Ensure only known feature bits have been set. */
1302 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1303 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1305 lnet_ping_target_install_locked(pbuf);
1307 if (the_lnet.ln_ping_target) {
1308 old_pbuf = the_lnet.ln_ping_target;
1309 old_ping_md = the_lnet.ln_ping_target_md;
1311 the_lnet.ln_ping_target_md = ping_mdh;
1312 the_lnet.ln_ping_target = pbuf;
1314 lnet_net_unlock(LNET_LOCK_EX);
1317 /* unlink and free the old ping info */
1318 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1319 lnet_ping_buffer_decref(old_pbuf);
1324 lnet_ping_target_fini(void)
1328 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1329 &the_lnet.ln_ping_target_md);
1331 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1334 lnet_ping_target_destroy();
1337 /* Resize the push target. */
1338 int lnet_push_target_resize(void)
1340 lnet_process_id_t id = { LNET_NID_ANY, LNET_PID_ANY };
1341 lnet_md_t md = { NULL };
1342 lnet_handle_me_t meh;
1343 lnet_handle_md_t mdh;
1344 lnet_handle_md_t old_mdh;
1345 struct lnet_ping_buffer *pbuf;
1346 struct lnet_ping_buffer *old_pbuf;
1347 int nnis = the_lnet.ln_push_target_nnis;
1355 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1361 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1362 LNET_PROTO_PING_MATCHBITS, 0,
1363 LNET_UNLINK, LNET_INS_AFTER,
1366 CERROR("Can't create push target ME: %d\n", rc);
1367 goto fail_decref_pbuf;
1370 /* initialize md content */
1371 md.start = &pbuf->pb_info;
1372 md.length = LNET_PING_INFO_SIZE(nnis);
1373 md.threshold = LNET_MD_THRESH_INF;
1375 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1376 LNET_MD_MANAGE_REMOTE;
1378 md.eq_handle = the_lnet.ln_push_target_eq;
1380 rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1382 CERROR("Can't attach push MD: %d\n", rc);
1383 goto fail_unlink_meh;
1385 lnet_ping_buffer_addref(pbuf);
1387 lnet_net_lock(LNET_LOCK_EX);
1388 old_pbuf = the_lnet.ln_push_target;
1389 old_mdh = the_lnet.ln_push_target_md;
1390 the_lnet.ln_push_target = pbuf;
1391 the_lnet.ln_push_target_md = mdh;
1392 lnet_net_unlock(LNET_LOCK_EX);
1395 LNetMDUnlink(old_mdh);
1396 lnet_ping_buffer_decref(old_pbuf);
1399 if (nnis < the_lnet.ln_push_target_nnis)
1402 CDEBUG(D_NET, "nnis %d success\n", nnis);
1409 lnet_ping_buffer_decref(pbuf);
1411 CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1415 static void lnet_push_target_event_handler(struct lnet_event *ev)
1417 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1419 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1420 lnet_swap_pinginfo(pbuf);
1423 lnet_ping_buffer_decref(pbuf);
1426 /* Initialize the push target. */
1427 static int lnet_push_target_init(void)
1431 if (the_lnet.ln_push_target)
1434 rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1435 &the_lnet.ln_push_target_eq);
1437 CERROR("Can't allocated push target EQ: %d\n", rc);
1441 /* Start at the required minimum, we'll enlarge if required. */
1442 the_lnet.ln_push_target_nnis = LNET_INTERFACES_MIN;
1444 rc = lnet_push_target_resize();
1447 LNetEQFree(the_lnet.ln_push_target_eq);
1448 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1454 /* Clean up the push target. */
1455 static void lnet_push_target_fini(void)
1457 if (!the_lnet.ln_push_target)
1460 /* Unlink and invalidate to prevent new references. */
1461 LNetMDUnlink(the_lnet.ln_push_target_md);
1462 LNetInvalidateMDHandle(&the_lnet.ln_push_target_md);
1464 /* Wait for the unlink to complete. */
1465 while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1466 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1467 set_current_state(TASK_UNINTERRUPTIBLE);
1468 schedule_timeout(cfs_time_seconds(1));
1471 lnet_ping_buffer_decref(the_lnet.ln_push_target);
1472 the_lnet.ln_push_target = NULL;
1473 the_lnet.ln_push_target_nnis = 0;
1475 LNetEQFree(the_lnet.ln_push_target_eq);
1476 LNetInvalidateEQHandle(&the_lnet.ln_push_target_eq);
1480 lnet_ni_tq_credits(struct lnet_ni *ni)
1484 LASSERT(ni->ni_ncpts >= 1);
1486 if (ni->ni_ncpts == 1)
1487 return ni->ni_net->net_tunables.lct_max_tx_credits;
1489 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1490 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1491 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1497 lnet_ni_unlink_locked(struct lnet_ni *ni)
1499 if (!list_empty(&ni->ni_cptlist)) {
1500 list_del_init(&ni->ni_cptlist);
1501 lnet_ni_decref_locked(ni, 0);
1504 /* move it to zombie list and nobody can find it anymore */
1505 LASSERT(!list_empty(&ni->ni_netlist));
1506 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1507 lnet_ni_decref_locked(ni, 0);
1511 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1516 struct list_head *zombie_list = &net->net_ni_zombie;
1519 * Now wait for the NIs I just nuked to show up on the zombie
1520 * list and shut them down in guaranteed thread context
1523 while (!list_empty(zombie_list)) {
1527 ni = list_entry(zombie_list->next,
1528 struct lnet_ni, ni_netlist);
1529 list_del_init(&ni->ni_netlist);
1530 /* the ni should be in deleting state. If it's not it's
1532 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1533 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1536 /* still busy, add it back to zombie list */
1537 list_add(&ni->ni_netlist, zombie_list);
1541 if (!list_empty(&ni->ni_netlist)) {
1542 lnet_net_unlock(LNET_LOCK_EX);
1544 if ((i & (-i)) == i) {
1546 "Waiting for zombie LNI %s\n",
1547 libcfs_nid2str(ni->ni_nid));
1549 set_current_state(TASK_UNINTERRUPTIBLE);
1550 schedule_timeout(cfs_time_seconds(1));
1551 lnet_net_lock(LNET_LOCK_EX);
1555 lnet_net_unlock(LNET_LOCK_EX);
1557 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1559 LASSERT(!in_interrupt());
1560 (net->net_lnd->lnd_shutdown)(ni);
1563 CDEBUG(D_LNI, "Removed LNI %s\n",
1564 libcfs_nid2str(ni->ni_nid));
1568 lnet_net_lock(LNET_LOCK_EX);
1572 /* shutdown down the NI and release refcount */
1574 lnet_shutdown_lndni(struct lnet_ni *ni)
1577 struct lnet_net *net = ni->ni_net;
1579 lnet_net_lock(LNET_LOCK_EX);
1580 ni->ni_state = LNET_NI_STATE_DELETING;
1581 lnet_ni_unlink_locked(ni);
1582 lnet_incr_dlc_seq();
1583 lnet_net_unlock(LNET_LOCK_EX);
1585 /* clear messages for this NI on the lazy portal */
1586 for (i = 0; i < the_lnet.ln_nportals; i++)
1587 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1589 lnet_net_lock(LNET_LOCK_EX);
1590 lnet_clear_zombies_nis_locked(net);
1591 lnet_net_unlock(LNET_LOCK_EX);
1595 lnet_shutdown_lndnet(struct lnet_net *net)
1599 lnet_net_lock(LNET_LOCK_EX);
1601 net->net_state = LNET_NET_STATE_DELETING;
1603 list_del_init(&net->net_list);
1605 while (!list_empty(&net->net_ni_list)) {
1606 ni = list_entry(net->net_ni_list.next,
1607 struct lnet_ni, ni_netlist);
1608 lnet_net_unlock(LNET_LOCK_EX);
1609 lnet_shutdown_lndni(ni);
1610 lnet_net_lock(LNET_LOCK_EX);
1613 lnet_net_unlock(LNET_LOCK_EX);
1615 /* Do peer table cleanup for this net */
1616 lnet_peer_tables_cleanup(net);
1618 lnet_net_lock(LNET_LOCK_EX);
1620 * decrement ref count on lnd only when the entire network goes
1623 net->net_lnd->lnd_refcount--;
1625 lnet_net_unlock(LNET_LOCK_EX);
1631 lnet_shutdown_lndnets(void)
1633 struct lnet_net *net;
1635 /* NB called holding the global mutex */
1637 /* All quiet on the API front */
1638 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1639 LASSERT(the_lnet.ln_refcount == 0);
1641 lnet_net_lock(LNET_LOCK_EX);
1642 the_lnet.ln_state = LNET_STATE_STOPPING;
1644 while (!list_empty(&the_lnet.ln_nets)) {
1646 * move the nets to the zombie list to avoid them being
1647 * picked up for new work. LONET is also included in the
1648 * Nets that will be moved to the zombie list
1650 net = list_entry(the_lnet.ln_nets.next,
1651 struct lnet_net, net_list);
1652 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1655 /* Drop the cached loopback Net. */
1656 if (the_lnet.ln_loni != NULL) {
1657 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1658 the_lnet.ln_loni = NULL;
1660 lnet_net_unlock(LNET_LOCK_EX);
1662 /* iterate through the net zombie list and delete each net */
1663 while (!list_empty(&the_lnet.ln_net_zombie)) {
1664 net = list_entry(the_lnet.ln_net_zombie.next,
1665 struct lnet_net, net_list);
1666 lnet_shutdown_lndnet(net);
1669 lnet_net_lock(LNET_LOCK_EX);
1670 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1671 lnet_net_unlock(LNET_LOCK_EX);
1675 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1678 struct lnet_tx_queue *tq;
1680 struct lnet_net *net = ni->ni_net;
1682 mutex_lock(&the_lnet.ln_lnd_mutex);
1685 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1686 ni->ni_lnd_tunables_set = true;
1689 rc = (net->net_lnd->lnd_startup)(ni);
1691 mutex_unlock(&the_lnet.ln_lnd_mutex);
1694 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1695 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1696 lnet_net_lock(LNET_LOCK_EX);
1697 net->net_lnd->lnd_refcount--;
1698 lnet_net_unlock(LNET_LOCK_EX);
1702 ni->ni_state = LNET_NI_STATE_ACTIVE;
1704 /* We keep a reference on the loopback net through the loopback NI */
1705 if (net->net_lnd->lnd_type == LOLND) {
1707 LASSERT(the_lnet.ln_loni == NULL);
1708 the_lnet.ln_loni = ni;
1709 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1710 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1711 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1712 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1716 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1717 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1718 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1719 libcfs_lnd2str(net->net_lnd->lnd_type),
1720 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1722 /* shutdown the NI since if we get here then it must've already
1725 lnet_shutdown_lndni(ni);
1729 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1730 tq->tq_credits_min =
1731 tq->tq_credits_max =
1732 tq->tq_credits = lnet_ni_tq_credits(ni);
1735 atomic_set(&ni->ni_tx_credits,
1736 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1738 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1739 libcfs_nid2str(ni->ni_nid),
1740 ni->ni_net->net_tunables.lct_peer_tx_credits,
1741 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1742 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1743 ni->ni_net->net_tunables.lct_peer_timeout);
1752 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1755 struct lnet_net *net_l = NULL;
1756 struct list_head local_ni_list;
1760 struct lnet_lnd *lnd;
1762 net->net_tunables.lct_peer_timeout;
1764 net->net_tunables.lct_max_tx_credits;
1765 int peerrtrcredits =
1766 net->net_tunables.lct_peer_rtr_credits;
1768 INIT_LIST_HEAD(&local_ni_list);
1771 * make sure that this net is unique. If it isn't then
1772 * we are adding interfaces to an already existing network, and
1773 * 'net' is just a convenient way to pass in the list.
1774 * if it is unique we need to find the LND and load it if
1777 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1778 lnd_type = LNET_NETTYP(net->net_id);
1780 LASSERT(libcfs_isknown_lnd(lnd_type));
1782 mutex_lock(&the_lnet.ln_lnd_mutex);
1783 lnd = lnet_find_lnd_by_type(lnd_type);
1786 mutex_unlock(&the_lnet.ln_lnd_mutex);
1787 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1788 mutex_lock(&the_lnet.ln_lnd_mutex);
1790 lnd = lnet_find_lnd_by_type(lnd_type);
1792 mutex_unlock(&the_lnet.ln_lnd_mutex);
1793 CERROR("Can't load LND %s, module %s, rc=%d\n",
1794 libcfs_lnd2str(lnd_type),
1795 libcfs_lnd2modname(lnd_type), rc);
1796 #ifndef HAVE_MODULE_LOADING_SUPPORT
1797 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1798 "compiled with kernel module "
1799 "loading support.");
1806 lnet_net_lock(LNET_LOCK_EX);
1807 lnd->lnd_refcount++;
1808 lnet_net_unlock(LNET_LOCK_EX);
1812 mutex_unlock(&the_lnet.ln_lnd_mutex);
1818 * net_l: if the network being added is unique then net_l
1819 * will point to that network
1820 * if the network being added is not unique then
1821 * net_l points to the existing network.
1823 * When we enter the loop below, we'll pick NIs off he
1824 * network beign added and start them up, then add them to
1825 * a local ni list. Once we've successfully started all
1826 * the NIs then we join the local NI list (of started up
1827 * networks) with the net_l->net_ni_list, which should
1828 * point to the correct network to add the new ni list to
1830 * If any of the new NIs fail to start up, then we want to
1831 * iterate through the local ni list, which should include
1832 * any NIs which were successfully started up, and shut
1835 * After than we want to delete the network being added,
1836 * to avoid a memory leak.
1840 * When a network uses TCP bonding then all its interfaces
1841 * must be specified when the network is first defined: the
1842 * TCP bonding code doesn't allow for interfaces to be added
1845 if (net_l != net && net_l != NULL && use_tcp_bonding &&
1846 LNET_NETTYP(net_l->net_id) == SOCKLND) {
1851 while (!list_empty(&net->net_ni_added)) {
1852 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1854 list_del_init(&ni->ni_netlist);
1856 /* make sure that the the NI we're about to start
1857 * up is actually unique. if it's not fail. */
1858 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1859 ni->ni_interfaces[0])) {
1864 /* adjust the pointer the parent network, just in case it
1865 * the net is a duplicate */
1868 rc = lnet_startup_lndni(ni, tun);
1870 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1871 ni->ni_net->net_lnd->lnd_query != NULL);
1877 list_add_tail(&ni->ni_netlist, &local_ni_list);
1882 lnet_net_lock(LNET_LOCK_EX);
1883 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1884 lnet_incr_dlc_seq();
1885 lnet_net_unlock(LNET_LOCK_EX);
1887 /* if the network is not unique then we don't want to keep
1888 * it around after we're done. Free it. Otherwise add that
1889 * net to the global the_lnet.ln_nets */
1890 if (net_l != net && net_l != NULL) {
1892 * TODO - note. currently the tunables can not be updated
1897 net->net_state = LNET_NET_STATE_ACTIVE;
1899 * restore tunables after it has been overwitten by the
1902 if (peer_timeout != -1)
1903 net->net_tunables.lct_peer_timeout = peer_timeout;
1904 if (maxtxcredits != -1)
1905 net->net_tunables.lct_max_tx_credits = maxtxcredits;
1906 if (peerrtrcredits != -1)
1907 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1909 lnet_net_lock(LNET_LOCK_EX);
1910 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1911 lnet_net_unlock(LNET_LOCK_EX);
1918 * shutdown the new NIs that are being started up
1919 * free the NET being started
1921 while (!list_empty(&local_ni_list)) {
1922 ni = list_entry(local_ni_list.next, struct lnet_ni,
1925 lnet_shutdown_lndni(ni);
1935 lnet_startup_lndnets(struct list_head *netlist)
1937 struct lnet_net *net;
1942 * Change to running state before bringing up the LNDs. This
1943 * allows lnet_shutdown_lndnets() to assert that we've passed
1946 lnet_net_lock(LNET_LOCK_EX);
1947 the_lnet.ln_state = LNET_STATE_RUNNING;
1948 lnet_net_unlock(LNET_LOCK_EX);
1950 while (!list_empty(netlist)) {
1951 net = list_entry(netlist->next, struct lnet_net, net_list);
1952 list_del_init(&net->net_list);
1954 rc = lnet_startup_lndnet(net, NULL);
1964 lnet_shutdown_lndnets();
1970 * Initialize LNet library.
1972 * Automatically called at module loading time. Caller has to call
1973 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
1974 * latter returned 0. It must be called exactly once.
1976 * \retval 0 on success
1977 * \retval -ve on failures.
1979 int lnet_lib_init(void)
1983 lnet_assert_wire_constants();
1985 memset(&the_lnet, 0, sizeof(the_lnet));
1987 /* refer to global cfs_cpt_table for now */
1988 the_lnet.ln_cpt_table = cfs_cpt_table;
1989 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
1991 LASSERT(the_lnet.ln_cpt_number > 0);
1992 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1993 /* we are under risk of consuming all lh_cookie */
1994 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
1995 "please change setting of CPT-table and retry\n",
1996 the_lnet.ln_cpt_number, LNET_CPT_MAX);
2000 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
2001 the_lnet.ln_cpt_bits++;
2003 rc = lnet_create_locks();
2005 CERROR("Can't create LNet global locks: %d\n", rc);
2009 the_lnet.ln_refcount = 0;
2010 LNetInvalidateEQHandle(&the_lnet.ln_rc_eqh);
2011 INIT_LIST_HEAD(&the_lnet.ln_lnds);
2012 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
2013 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
2014 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
2016 /* The hash table size is the number of bits it takes to express the set
2017 * ln_num_routes, minus 1 (better to under estimate than over so we
2018 * don't waste memory). */
2019 if (rnet_htable_size <= 0)
2020 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
2021 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
2022 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
2023 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
2024 order_base_2(rnet_htable_size) - 1);
2026 /* All LNDs apart from the LOLND are in separate modules. They
2027 * register themselves when their module loads, and unregister
2028 * themselves when their module is unloaded. */
2029 lnet_register_lnd(&the_lolnd);
2034 * Finalize LNet library.
2036 * \pre lnet_lib_init() called with success.
2037 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
2039 void lnet_lib_exit(void)
2041 LASSERT(the_lnet.ln_refcount == 0);
2043 while (!list_empty(&the_lnet.ln_lnds))
2044 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
2045 struct lnet_lnd, lnd_list));
2046 lnet_destroy_locks();
2050 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2052 * Users must call this function at least once before any other functions.
2053 * For each successful call there must be a corresponding call to
2054 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2057 * The PID used by LNet may be different from the one requested.
2060 * \param requested_pid PID requested by the caller.
2062 * \return >= 0 on success, and < 0 error code on failures.
2065 LNetNIInit(lnet_pid_t requested_pid)
2067 int im_a_router = 0;
2070 struct lnet_ping_buffer *pbuf;
2071 struct lnet_handle_md ping_mdh;
2072 struct list_head net_head;
2073 struct lnet_net *net;
2075 INIT_LIST_HEAD(&net_head);
2077 mutex_lock(&the_lnet.ln_api_mutex);
2079 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2081 if (the_lnet.ln_refcount > 0) {
2082 rc = the_lnet.ln_refcount++;
2083 mutex_unlock(&the_lnet.ln_api_mutex);
2087 rc = lnet_prepare(requested_pid);
2089 mutex_unlock(&the_lnet.ln_api_mutex);
2093 /* create a network for Loopback network */
2094 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2097 goto err_empty_list;
2100 /* Add in the loopback NI */
2101 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2103 goto err_empty_list;
2106 /* If LNet is being initialized via DLC it is possible
2107 * that the user requests not to load module parameters (ones which
2108 * are supported by DLC) on initialization. Therefore, make sure not
2109 * to load networks, routes and forwarding from module parameters
2110 * in this case. On cleanup in case of failure only clean up
2111 * routes if it has been loaded */
2112 if (!the_lnet.ln_nis_from_mod_params) {
2113 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2116 goto err_empty_list;
2119 ni_count = lnet_startup_lndnets(&net_head);
2122 goto err_empty_list;
2125 if (!the_lnet.ln_nis_from_mod_params) {
2126 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2128 goto err_shutdown_lndnis;
2130 rc = lnet_check_routes();
2132 goto err_destroy_routes;
2134 rc = lnet_rtrpools_alloc(im_a_router);
2136 goto err_destroy_routes;
2139 rc = lnet_acceptor_start();
2141 goto err_destroy_routes;
2143 the_lnet.ln_refcount = 1;
2144 /* Now I may use my own API functions... */
2146 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2148 goto err_acceptor_stop;
2150 lnet_ping_target_update(pbuf, ping_mdh);
2152 rc = lnet_router_checker_start();
2156 rc = lnet_push_target_init();
2158 goto err_stop_router_checker;
2160 rc = lnet_peer_discovery_start();
2162 goto err_destroy_push_target;
2167 mutex_unlock(&the_lnet.ln_api_mutex);
2171 err_destroy_push_target:
2172 lnet_push_target_fini();
2173 err_stop_router_checker:
2174 lnet_router_checker_stop();
2176 lnet_ping_target_fini();
2178 the_lnet.ln_refcount = 0;
2179 lnet_acceptor_stop();
2181 if (!the_lnet.ln_nis_from_mod_params)
2182 lnet_destroy_routes();
2183 err_shutdown_lndnis:
2184 lnet_shutdown_lndnets();
2188 mutex_unlock(&the_lnet.ln_api_mutex);
2189 while (!list_empty(&net_head)) {
2190 struct lnet_net *net;
2192 net = list_entry(net_head.next, struct lnet_net, net_list);
2193 list_del_init(&net->net_list);
2198 EXPORT_SYMBOL(LNetNIInit);
2201 * Stop LNet interfaces, routing, and forwarding.
2203 * Users must call this function once for each successful call to LNetNIInit().
2204 * Once the LNetNIFini() operation has been started, the results of pending
2205 * API operations are undefined.
2207 * \return always 0 for current implementation.
2212 mutex_lock(&the_lnet.ln_api_mutex);
2214 LASSERT(the_lnet.ln_refcount > 0);
2216 if (the_lnet.ln_refcount != 1) {
2217 the_lnet.ln_refcount--;
2219 LASSERT(!the_lnet.ln_niinit_self);
2224 lnet_peer_discovery_stop();
2225 lnet_push_target_fini();
2226 lnet_router_checker_stop();
2227 lnet_ping_target_fini();
2229 /* Teardown fns that use my own API functions BEFORE here */
2230 the_lnet.ln_refcount = 0;
2232 lnet_acceptor_stop();
2233 lnet_destroy_routes();
2234 lnet_shutdown_lndnets();
2238 mutex_unlock(&the_lnet.ln_api_mutex);
2241 EXPORT_SYMBOL(LNetNIFini);
2244 * Grabs the ni data from the ni structure and fills the out
2247 * \param[in] ni network interface structure
2248 * \param[out] cfg_ni NI config information
2249 * \param[out] tun network and LND tunables
2252 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2253 struct lnet_ioctl_config_lnd_tunables *tun,
2254 struct lnet_ioctl_element_stats *stats,
2257 size_t min_size = 0;
2260 if (!ni || !cfg_ni || !tun)
2263 if (ni->ni_interfaces[0] != NULL) {
2264 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2265 if (ni->ni_interfaces[i] != NULL) {
2266 strncpy(cfg_ni->lic_ni_intf[i],
2267 ni->ni_interfaces[i],
2268 sizeof(cfg_ni->lic_ni_intf[i]));
2273 cfg_ni->lic_nid = ni->ni_nid;
2274 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2275 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2277 cfg_ni->lic_status = ni->ni_status->ns_status;
2278 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2279 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2281 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2284 stats->iel_send_count = atomic_read(&ni->ni_stats.send_count);
2285 stats->iel_recv_count = atomic_read(&ni->ni_stats.recv_count);
2289 * tun->lt_tun will always be present, but in order to be
2290 * backwards compatible, we need to deal with the cases when
2291 * tun->lt_tun is smaller than what the kernel has, because it
2292 * comes from an older version of a userspace program, then we'll
2293 * need to copy as much information as we have available space.
2295 min_size = tun_size - sizeof(tun->lt_cmn);
2296 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2298 /* copy over the cpts */
2299 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2300 ni->ni_cpts == NULL) {
2301 for (i = 0; i < ni->ni_ncpts; i++)
2302 cfg_ni->lic_cpts[i] = i;
2305 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2306 i < LNET_MAX_SHOW_NUM_CPT;
2308 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2310 cfg_ni->lic_ncpts = ni->ni_ncpts;
2314 * NOTE: This is a legacy function left in the code to be backwards
2315 * compatible with older userspace programs. It should eventually be
2318 * Grabs the ni data from the ni structure and fills the out
2321 * \param[in] ni network interface structure
2322 * \param[out] config config information
2325 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2326 struct lnet_ioctl_config_data *config)
2328 struct lnet_ioctl_net_config *net_config;
2329 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2330 size_t min_size, tunable_size = 0;
2336 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2340 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2341 ARRAY_SIZE(net_config->ni_interfaces));
2343 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2344 if (!ni->ni_interfaces[i])
2347 strncpy(net_config->ni_interfaces[i],
2348 ni->ni_interfaces[i],
2349 sizeof(net_config->ni_interfaces[i]));
2352 config->cfg_nid = ni->ni_nid;
2353 config->cfg_config_u.cfg_net.net_peer_timeout =
2354 ni->ni_net->net_tunables.lct_peer_timeout;
2355 config->cfg_config_u.cfg_net.net_max_tx_credits =
2356 ni->ni_net->net_tunables.lct_max_tx_credits;
2357 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2358 ni->ni_net->net_tunables.lct_peer_tx_credits;
2359 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2360 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2362 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2363 net_config->ni_status = LNET_NI_STATUS_UP;
2365 net_config->ni_status = ni->ni_status->ns_status;
2368 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2370 for (i = 0; i < num_cpts; i++)
2371 net_config->ni_cpts[i] = ni->ni_cpts[i];
2373 config->cfg_ncpts = num_cpts;
2377 * See if user land tools sent in a newer and larger version
2378 * of struct lnet_tunables than what the kernel uses.
2380 min_size = sizeof(*config) + sizeof(*net_config);
2382 if (config->cfg_hdr.ioc_len > min_size)
2383 tunable_size = config->cfg_hdr.ioc_len - min_size;
2385 /* Don't copy too much data to user space */
2386 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2387 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2389 if (lnd_cfg && min_size) {
2390 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2391 config->cfg_config_u.cfg_net.net_interface_count = 1;
2393 /* Tell user land that kernel side has less data */
2394 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2395 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2396 config->cfg_hdr.ioc_len -= min_size;
2402 lnet_get_ni_idx_locked(int idx)
2405 struct lnet_net *net;
2407 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2408 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2418 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2421 struct lnet_net *net = mynet;
2425 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2427 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2433 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2434 /* if you reached the end of the ni list and the net is
2435 * specified, then there are no more nis in that net */
2439 /* we reached the end of this net ni list. move to the
2441 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2442 /* no more nets and no more NIs. */
2445 /* get the next net */
2446 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2448 /* get the ni on it */
2449 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2455 /* there are more nis left */
2456 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2462 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2467 int idx = config->cfg_count;
2469 cpt = lnet_net_lock_current();
2471 ni = lnet_get_ni_idx_locked(idx);
2476 lnet_fill_ni_info_legacy(ni, config);
2480 lnet_net_unlock(cpt);
2485 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2486 struct lnet_ioctl_config_lnd_tunables *tun,
2487 struct lnet_ioctl_element_stats *stats,
2494 if (!cfg_ni || !tun || !stats)
2497 cpt = lnet_net_lock_current();
2499 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2504 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2508 lnet_net_unlock(cpt);
2512 static int lnet_add_net_common(struct lnet_net *net,
2513 struct lnet_ioctl_config_lnd_tunables *tun)
2516 struct lnet_ping_buffer *pbuf;
2517 struct lnet_handle_md ping_mdh;
2519 struct lnet_remotenet *rnet;
2521 int num_acceptor_nets;
2523 lnet_net_lock(LNET_LOCK_EX);
2524 rnet = lnet_find_rnet_locked(net->net_id);
2525 lnet_net_unlock(LNET_LOCK_EX);
2527 * make sure that the net added doesn't invalidate the current
2528 * configuration LNet is keeping
2531 CERROR("Adding net %s will invalidate routing configuration\n",
2532 libcfs_net2str(net->net_id));
2538 * make sure you calculate the correct number of slots in the ping
2539 * buffer. Since the ping info is a flattened list of all the NIs,
2540 * we should allocate enough slots to accomodate the number of NIs
2541 * which will be added.
2543 * since ni hasn't been configured yet, use
2544 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2546 net_ni_count = lnet_get_net_ni_count_pre(net);
2548 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2549 net_ni_count + lnet_get_ni_count(),
2557 memcpy(&net->net_tunables,
2558 &tun->lt_cmn, sizeof(net->net_tunables));
2560 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2563 * before starting this network get a count of the current TCP
2564 * networks which require the acceptor thread running. If that
2565 * count is == 0 before we start up this network, then we'd want to
2566 * start up the acceptor thread after starting up this network
2568 num_acceptor_nets = lnet_count_acceptor_nets();
2570 net_id = net->net_id;
2572 rc = lnet_startup_lndnet(net,
2573 (tun) ? &tun->lt_tun : NULL);
2577 lnet_net_lock(LNET_LOCK_EX);
2578 net = lnet_get_net_locked(net_id);
2579 lnet_net_unlock(LNET_LOCK_EX);
2584 * Start the acceptor thread if this is the first network
2585 * being added that requires the thread.
2587 if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2588 rc = lnet_acceptor_start();
2590 /* shutdown the net that we just started */
2591 CERROR("Failed to start up acceptor thread\n");
2592 lnet_shutdown_lndnet(net);
2597 lnet_net_lock(LNET_LOCK_EX);
2598 lnet_peer_net_added(net);
2599 lnet_net_unlock(LNET_LOCK_EX);
2601 lnet_ping_target_update(pbuf, ping_mdh);
2606 lnet_ping_md_unlink(pbuf, &ping_mdh);
2607 lnet_ping_buffer_decref(pbuf);
2611 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2612 struct lnet_ioctl_config_lnd_tunables *tun)
2614 struct lnet_net *net;
2617 struct list_head net_head;
2619 INIT_LIST_HEAD(&net_head);
2621 rc = lnet_parse_ip2nets(&nets, ip2nets);
2625 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2629 mutex_lock(&the_lnet.ln_api_mutex);
2630 while (!list_empty(&net_head)) {
2631 net = list_entry(net_head.next, struct lnet_net, net_list);
2632 list_del_init(&net->net_list);
2633 rc = lnet_add_net_common(net, tun);
2639 mutex_unlock(&the_lnet.ln_api_mutex);
2641 while (!list_empty(&net_head)) {
2642 net = list_entry(net_head.next, struct lnet_net, net_list);
2643 list_del_init(&net->net_list);
2649 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2651 struct lnet_net *net;
2653 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2657 /* get the tunables if they are available */
2658 if (conf->lic_cfg_hdr.ioc_len >=
2659 sizeof(*conf) + sizeof(*tun))
2660 tun = (struct lnet_ioctl_config_lnd_tunables *)
2663 /* handle legacy ip2nets from DLC */
2664 if (conf->lic_legacy_ip2nets[0] != '\0')
2665 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2668 net_id = LNET_NIDNET(conf->lic_nid);
2670 net = lnet_net_alloc(net_id, NULL);
2674 for (i = 0; i < conf->lic_ncpts; i++) {
2675 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2679 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2680 conf->lic_ni_intf[0]);
2684 mutex_lock(&the_lnet.ln_api_mutex);
2686 rc = lnet_add_net_common(net, tun);
2688 mutex_unlock(&the_lnet.ln_api_mutex);
2693 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2695 struct lnet_net *net;
2697 __u32 net_id = LNET_NIDNET(conf->lic_nid);
2698 struct lnet_ping_buffer *pbuf;
2699 struct lnet_handle_md ping_mdh;
2704 /* don't allow userspace to shutdown the LOLND */
2705 if (LNET_NETTYP(net_id) == LOLND)
2708 mutex_lock(&the_lnet.ln_api_mutex);
2712 net = lnet_get_net_locked(net_id);
2714 CERROR("net %s not found\n",
2715 libcfs_net2str(net_id));
2720 addr = LNET_NIDADDR(conf->lic_nid);
2722 /* remove the entire net */
2723 net_count = lnet_get_net_ni_count_locked(net);
2727 /* create and link a new ping info, before removing the old one */
2728 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2729 lnet_get_ni_count() - net_count,
2732 goto unlock_api_mutex;
2734 lnet_shutdown_lndnet(net);
2736 if (lnet_count_acceptor_nets() == 0)
2737 lnet_acceptor_stop();
2739 lnet_ping_target_update(pbuf, ping_mdh);
2741 goto unlock_api_mutex;
2744 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2746 CERROR("nid %s not found\n",
2747 libcfs_nid2str(conf->lic_nid));
2752 net_count = lnet_get_net_ni_count_locked(net);
2756 /* create and link a new ping info, before removing the old one */
2757 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2758 lnet_get_ni_count() - 1, false);
2760 goto unlock_api_mutex;
2762 lnet_shutdown_lndni(ni);
2764 if (lnet_count_acceptor_nets() == 0)
2765 lnet_acceptor_stop();
2767 lnet_ping_target_update(pbuf, ping_mdh);
2769 /* check if the net is empty and remove it if it is */
2771 lnet_shutdown_lndnet(net);
2773 goto unlock_api_mutex;
2778 mutex_unlock(&the_lnet.ln_api_mutex);
2784 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2785 * They are only expected to be called for unique networks.
2786 * That can be as a result of older DLC library
2787 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
2790 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
2792 struct lnet_net *net;
2793 struct list_head net_head;
2795 struct lnet_ioctl_config_lnd_tunables tun;
2796 char *nets = conf->cfg_config_u.cfg_net.net_intf;
2798 INIT_LIST_HEAD(&net_head);
2800 /* Create a net/ni structures for the network string */
2801 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2803 return rc == 0 ? -EINVAL : rc;
2805 mutex_lock(&the_lnet.ln_api_mutex);
2808 rc = -EINVAL; /* only add one network per call */
2809 goto out_unlock_clean;
2812 net = list_entry(net_head.next, struct lnet_net, net_list);
2813 list_del_init(&net->net_list);
2815 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
2817 memset(&tun, 0, sizeof(tun));
2819 tun.lt_cmn.lct_peer_timeout =
2820 conf->cfg_config_u.cfg_net.net_peer_timeout;
2821 tun.lt_cmn.lct_peer_tx_credits =
2822 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
2823 tun.lt_cmn.lct_peer_rtr_credits =
2824 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
2825 tun.lt_cmn.lct_max_tx_credits =
2826 conf->cfg_config_u.cfg_net.net_max_tx_credits;
2828 rc = lnet_add_net_common(net, &tun);
2831 mutex_unlock(&the_lnet.ln_api_mutex);
2832 while (!list_empty(&net_head)) {
2833 /* net_head list is empty in success case */
2834 net = list_entry(net_head.next, struct lnet_net, net_list);
2835 list_del_init(&net->net_list);
2842 lnet_dyn_del_net(__u32 net_id)
2844 struct lnet_net *net;
2845 struct lnet_ping_buffer *pbuf;
2846 struct lnet_handle_md ping_mdh;
2850 /* don't allow userspace to shutdown the LOLND */
2851 if (LNET_NETTYP(net_id) == LOLND)
2854 mutex_lock(&the_lnet.ln_api_mutex);
2858 net = lnet_get_net_locked(net_id);
2865 net_ni_count = lnet_get_net_ni_count_locked(net);
2869 /* create and link a new ping info, before removing the old one */
2870 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2871 lnet_get_ni_count() - net_ni_count, false);
2875 lnet_shutdown_lndnet(net);
2877 if (lnet_count_acceptor_nets() == 0)
2878 lnet_acceptor_stop();
2880 lnet_ping_target_update(pbuf, ping_mdh);
2883 mutex_unlock(&the_lnet.ln_api_mutex);
2888 void lnet_incr_dlc_seq(void)
2890 atomic_inc(&lnet_dlc_seq_no);
2893 __u32 lnet_get_dlc_seq_locked(void)
2895 return atomic_read(&lnet_dlc_seq_no);
2899 * LNet ioctl handler.
2903 LNetCtl(unsigned int cmd, void *arg)
2905 struct libcfs_ioctl_data *data = arg;
2906 struct lnet_ioctl_config_data *config;
2907 struct lnet_process_id id = {0};
2911 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
2912 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
2915 case IOC_LIBCFS_GET_NI:
2916 rc = LNetGetId(data->ioc_count, &id);
2917 data->ioc_nid = id.nid;
2920 case IOC_LIBCFS_FAIL_NID:
2921 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
2923 case IOC_LIBCFS_ADD_ROUTE:
2926 if (config->cfg_hdr.ioc_len < sizeof(*config))
2929 mutex_lock(&the_lnet.ln_api_mutex);
2930 rc = lnet_add_route(config->cfg_net,
2931 config->cfg_config_u.cfg_route.rtr_hop,
2933 config->cfg_config_u.cfg_route.
2936 rc = lnet_check_routes();
2938 lnet_del_route(config->cfg_net,
2941 mutex_unlock(&the_lnet.ln_api_mutex);
2944 case IOC_LIBCFS_DEL_ROUTE:
2947 if (config->cfg_hdr.ioc_len < sizeof(*config))
2950 mutex_lock(&the_lnet.ln_api_mutex);
2951 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
2952 mutex_unlock(&the_lnet.ln_api_mutex);
2955 case IOC_LIBCFS_GET_ROUTE:
2958 if (config->cfg_hdr.ioc_len < sizeof(*config))
2961 mutex_lock(&the_lnet.ln_api_mutex);
2962 rc = lnet_get_route(config->cfg_count,
2964 &config->cfg_config_u.cfg_route.rtr_hop,
2966 &config->cfg_config_u.cfg_route.rtr_flags,
2967 &config->cfg_config_u.cfg_route.
2969 mutex_unlock(&the_lnet.ln_api_mutex);
2972 case IOC_LIBCFS_GET_LOCAL_NI: {
2973 struct lnet_ioctl_config_ni *cfg_ni;
2974 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2975 struct lnet_ioctl_element_stats *stats;
2979 /* get the tunables if they are available */
2980 if (cfg_ni->lic_cfg_hdr.ioc_len <
2981 sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
2984 stats = (struct lnet_ioctl_element_stats *)
2986 tun = (struct lnet_ioctl_config_lnd_tunables *)
2987 (cfg_ni->lic_bulk + sizeof(*stats));
2989 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
2992 mutex_lock(&the_lnet.ln_api_mutex);
2993 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
2994 mutex_unlock(&the_lnet.ln_api_mutex);
2998 case IOC_LIBCFS_GET_NET: {
2999 size_t total = sizeof(*config) +
3000 sizeof(struct lnet_ioctl_net_config);
3003 if (config->cfg_hdr.ioc_len < total)
3006 mutex_lock(&the_lnet.ln_api_mutex);
3007 rc = lnet_get_net_config(config);
3008 mutex_unlock(&the_lnet.ln_api_mutex);
3012 case IOC_LIBCFS_GET_LNET_STATS:
3014 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
3016 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
3019 mutex_lock(&the_lnet.ln_api_mutex);
3020 lnet_counters_get(&lnet_stats->st_cntrs);
3021 mutex_unlock(&the_lnet.ln_api_mutex);
3025 case IOC_LIBCFS_CONFIG_RTR:
3028 if (config->cfg_hdr.ioc_len < sizeof(*config))
3031 mutex_lock(&the_lnet.ln_api_mutex);
3032 if (config->cfg_config_u.cfg_buffers.buf_enable) {
3033 rc = lnet_rtrpools_enable();
3034 mutex_unlock(&the_lnet.ln_api_mutex);
3037 lnet_rtrpools_disable();
3038 mutex_unlock(&the_lnet.ln_api_mutex);
3041 case IOC_LIBCFS_ADD_BUF:
3044 if (config->cfg_hdr.ioc_len < sizeof(*config))
3047 mutex_lock(&the_lnet.ln_api_mutex);
3048 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3050 config->cfg_config_u.cfg_buffers.
3052 config->cfg_config_u.cfg_buffers.
3054 mutex_unlock(&the_lnet.ln_api_mutex);
3057 case IOC_LIBCFS_SET_NUMA_RANGE: {
3058 struct lnet_ioctl_set_value *numa;
3060 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3062 lnet_net_lock(LNET_LOCK_EX);
3063 lnet_numa_range = numa->sv_value;
3064 lnet_net_unlock(LNET_LOCK_EX);
3068 case IOC_LIBCFS_GET_NUMA_RANGE: {
3069 struct lnet_ioctl_set_value *numa;
3071 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3073 numa->sv_value = lnet_numa_range;
3077 case IOC_LIBCFS_GET_BUF: {
3078 struct lnet_ioctl_pool_cfg *pool_cfg;
3079 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3083 if (config->cfg_hdr.ioc_len < total)
3086 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3088 mutex_lock(&the_lnet.ln_api_mutex);
3089 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3090 mutex_unlock(&the_lnet.ln_api_mutex);
3094 case IOC_LIBCFS_ADD_PEER_NI: {
3095 struct lnet_ioctl_peer_cfg *cfg = arg;
3097 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3100 mutex_lock(&the_lnet.ln_api_mutex);
3101 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3104 mutex_unlock(&the_lnet.ln_api_mutex);
3108 case IOC_LIBCFS_DEL_PEER_NI: {
3109 struct lnet_ioctl_peer_cfg *cfg = arg;
3111 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3114 mutex_lock(&the_lnet.ln_api_mutex);
3115 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3116 cfg->prcfg_cfg_nid);
3117 mutex_unlock(&the_lnet.ln_api_mutex);
3121 case IOC_LIBCFS_GET_PEER_INFO: {
3122 struct lnet_ioctl_peer *peer_info = arg;
3124 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3127 mutex_lock(&the_lnet.ln_api_mutex);
3128 rc = lnet_get_peer_ni_info(
3129 peer_info->pr_count,
3131 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3132 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3133 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3134 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3135 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3136 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3137 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3138 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3139 mutex_unlock(&the_lnet.ln_api_mutex);
3143 case IOC_LIBCFS_GET_PEER_NI: {
3144 struct lnet_ioctl_peer_cfg *cfg = arg;
3145 struct lnet_peer_ni_credit_info __user *lpni_cri;
3146 struct lnet_ioctl_element_stats __user *lpni_stats;
3147 size_t usr_size = sizeof(*lpni_cri) + sizeof(*lpni_stats);
3149 if ((cfg->prcfg_hdr.ioc_len != sizeof(*cfg)) ||
3150 (cfg->prcfg_size != usr_size))
3153 lpni_cri = cfg->prcfg_bulk;
3154 lpni_stats = cfg->prcfg_bulk + sizeof(*lpni_cri);
3156 mutex_lock(&the_lnet.ln_api_mutex);
3157 rc = lnet_get_peer_info(cfg->prcfg_count, &cfg->prcfg_prim_nid,
3158 &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
3159 lpni_cri, lpni_stats);
3160 mutex_unlock(&the_lnet.ln_api_mutex);
3164 case IOC_LIBCFS_NOTIFY_ROUTER: {
3165 unsigned long jiffies_passed;
3167 jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
3168 jiffies_passed = cfs_time_seconds(jiffies_passed);
3170 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
3171 jiffies - jiffies_passed);
3174 case IOC_LIBCFS_LNET_DIST:
3175 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3176 if (rc < 0 && rc != -EHOSTUNREACH)
3179 data->ioc_u32[0] = rc;
3182 case IOC_LIBCFS_TESTPROTOCOMPAT:
3183 lnet_net_lock(LNET_LOCK_EX);
3184 the_lnet.ln_testprotocompat = data->ioc_flags;
3185 lnet_net_unlock(LNET_LOCK_EX);
3188 case IOC_LIBCFS_LNET_FAULT:
3189 return lnet_fault_ctl(data->ioc_flags, data);
3191 case IOC_LIBCFS_PING: {
3192 signed long timeout;
3194 id.nid = data->ioc_nid;
3195 id.pid = data->ioc_u32[0];
3197 /* Don't block longer than 2 minutes */
3198 if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
3201 /* If timestamp is negative then disable timeout */
3202 if ((s32)data->ioc_u32[1] < 0)
3203 timeout = MAX_SCHEDULE_TIMEOUT;
3205 timeout = msecs_to_jiffies(data->ioc_u32[1]);
3207 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3208 data->ioc_plen1 / sizeof(struct lnet_process_id));
3211 data->ioc_count = rc;
3216 ni = lnet_net2ni_addref(data->ioc_net);
3220 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3223 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3230 EXPORT_SYMBOL(LNetCtl);
3232 void LNetDebugPeer(struct lnet_process_id id)
3234 lnet_debug_peer(id.nid);
3236 EXPORT_SYMBOL(LNetDebugPeer);
3239 * Determine if the specified peer \a nid is on the local node.
3241 * \param nid peer nid to check
3243 * \retval true If peer NID is on the local node.
3244 * \retval false If peer NID is not on the local node.
3246 bool LNetIsPeerLocal(lnet_nid_t nid)
3248 struct lnet_net *net;
3252 cpt = lnet_net_lock_current();
3253 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3254 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3255 if (ni->ni_nid == nid) {
3256 lnet_net_unlock(cpt);
3261 lnet_net_unlock(cpt);
3265 EXPORT_SYMBOL(LNetIsPeerLocal);
3268 * Retrieve the struct lnet_process_id ID of LNet interface at \a index.
3269 * Note that all interfaces share a same PID, as requested by LNetNIInit().
3271 * \param index Index of the interface to look up.
3272 * \param id On successful return, this location will hold the
3273 * struct lnet_process_id ID of the interface.
3275 * \retval 0 If an interface exists at \a index.
3276 * \retval -ENOENT If no interface has been found.
3279 LNetGetId(unsigned int index, struct lnet_process_id *id)
3282 struct lnet_net *net;
3286 LASSERT(the_lnet.ln_refcount > 0);
3288 cpt = lnet_net_lock_current();
3290 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3291 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3295 id->nid = ni->ni_nid;
3296 id->pid = the_lnet.ln_pid;
3302 lnet_net_unlock(cpt);
3305 EXPORT_SYMBOL(LNetGetId);
3307 static int lnet_ping(struct lnet_process_id id, signed long timeout,
3308 struct lnet_process_id __user *ids, int n_ids)
3310 struct lnet_handle_eq eqh;
3311 struct lnet_handle_md mdh;
3312 struct lnet_event event;
3313 struct lnet_md md = { NULL };
3317 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3318 struct lnet_ping_buffer *pbuf;
3319 struct lnet_process_id tmpid;
3326 /* n_ids limit is arbitrary */
3327 if (n_ids <= 0 || n_ids > lnet_interfaces_max || id.nid == LNET_NID_ANY)
3330 if (id.pid == LNET_PID_ANY)
3331 id.pid = LNET_PID_LUSTRE;
3333 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3337 /* NB 2 events max (including any unlink event) */
3338 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3340 CERROR("Can't allocate EQ: %d\n", rc);
3341 goto fail_ping_buffer_decref;
3344 /* initialize md content */
3345 md.start = &pbuf->pb_info;
3346 md.length = LNET_PING_INFO_SIZE(n_ids);
3347 md.threshold = 2; /*GET/REPLY*/
3349 md.options = LNET_MD_TRUNCATE;
3353 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3355 CERROR("Can't bind MD: %d\n", rc);
3359 rc = LNetGet(LNET_NID_ANY, mdh, id,
3360 LNET_RESERVED_PORTAL,
3361 LNET_PROTO_PING_MATCHBITS, 0);
3364 /* Don't CERROR; this could be deliberate! */
3366 rc2 = LNetMDUnlink(mdh);
3369 /* NB must wait for the UNLINK event below... */
3371 timeout = a_long_time;
3375 /* MUST block for unlink to complete */
3377 blocked = cfs_block_allsigs();
3379 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3382 cfs_restore_sigs(blocked);
3384 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3385 (rc2 <= 0) ? -1 : event.type,
3386 (rc2 <= 0) ? -1 : event.status,
3387 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3389 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
3391 if (rc2 <= 0 || event.status != 0) {
3392 /* timeout or error */
3393 if (!replied && rc == 0)
3394 rc = (rc2 < 0) ? rc2 :
3395 (rc2 == 0) ? -ETIMEDOUT :
3399 /* Ensure completion in finite time... */
3401 /* No assertion (racing with network) */
3403 timeout = a_long_time;
3404 } else if (rc2 == 0) {
3405 /* timed out waiting for unlink */
3406 CWARN("ping %s: late network completion\n",
3409 } else if (event.type == LNET_EVENT_REPLY) {
3414 } while (rc2 <= 0 || !event.unlinked);
3418 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3425 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
3427 rc = -EPROTO; /* if I can't parse... */
3430 /* can't check magic/version */
3431 CERROR("%s: ping info too short %d\n",
3432 libcfs_id2str(id), nob);
3436 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3437 lnet_swap_pinginfo(pbuf);
3438 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
3439 CERROR("%s: Unexpected magic %08x\n",
3440 libcfs_id2str(id), pbuf->pb_info.pi_magic);
3444 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3445 CERROR("%s: ping w/o NI status: 0x%x\n",
3446 libcfs_id2str(id), pbuf->pb_info.pi_features);
3450 if (nob < LNET_PING_INFO_SIZE(0)) {
3451 CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
3452 nob, (int)LNET_PING_INFO_SIZE(0));
3456 if (pbuf->pb_info.pi_nnis < n_ids)
3457 n_ids = pbuf->pb_info.pi_nnis;
3459 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
3460 CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
3461 nob, (int)LNET_PING_INFO_SIZE(n_ids));
3465 rc = -EFAULT; /* If I SEGV... */
3467 memset(&tmpid, 0, sizeof(tmpid));
3468 for (i = 0; i < n_ids; i++) {
3469 tmpid.pid = pbuf->pb_info.pi_pid;
3470 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
3471 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3474 rc = pbuf->pb_info.pi_nnis;
3477 rc2 = LNetEQFree(eqh);
3479 CERROR("rc2 %d\n", rc2);
3482 fail_ping_buffer_decref:
3483 lnet_ping_buffer_decref(pbuf);