4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LNET
34 #include <linux/log2.h>
35 #include <linux/ktime.h>
37 #include <lnet/lib-lnet.h>
39 #define D_LNI D_CONSOLE
41 lnet_t the_lnet; /* THE state of the network */
42 EXPORT_SYMBOL(the_lnet);
44 static char *ip2nets = "";
45 module_param(ip2nets, charp, 0444);
46 MODULE_PARM_DESC(ip2nets, "LNET network <- IP table");
48 static char *networks = "";
49 module_param(networks, charp, 0444);
50 MODULE_PARM_DESC(networks, "local networks");
52 static char *routes = "";
53 module_param(routes, charp, 0444);
54 MODULE_PARM_DESC(routes, "routes to non-local networks");
56 static int rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
57 module_param(rnet_htable_size, int, 0444);
58 MODULE_PARM_DESC(rnet_htable_size, "size of remote network hash table");
60 static int use_tcp_bonding = false;
61 module_param(use_tcp_bonding, int, 0444);
62 MODULE_PARM_DESC(use_tcp_bonding,
63 "Set to 1 to use socklnd bonding. 0 to use Multi-Rail");
65 unsigned int lnet_numa_range = 0;
66 module_param(lnet_numa_range, uint, 0444);
67 MODULE_PARM_DESC(lnet_numa_range,
68 "NUMA range to consider during Multi-Rail selection");
70 static int lnet_max_interfaces = LNET_MAX_INTERFACES_DEFAULT;
71 module_param(lnet_max_interfaces, int, 0444);
72 MODULE_PARM_DESC(lnet_max_interfaces,
73 "Maximum number of interfaces in a node.");
76 * This sequence number keeps track of how many times DLC was used to
77 * update the local NIs. It is incremented when a NI is added or
78 * removed and checked when sending a message to determine if there is
79 * a need to re-run the selection algorithm. See lnet_select_pathway()
80 * for more details on its usage.
82 static atomic_t lnet_dlc_seq_no = ATOMIC_INIT(0);
84 static int lnet_ping(lnet_process_id_t id, signed long timeout,
85 lnet_process_id_t __user *ids, int n_ids);
94 lnet_get_networks(void)
99 if (*networks != 0 && *ip2nets != 0) {
100 LCONSOLE_ERROR_MSG(0x101, "Please specify EITHER 'networks' or "
101 "'ip2nets' but not both at once\n");
106 rc = lnet_parse_ip2nets(&nets, ip2nets);
107 return (rc == 0) ? nets : NULL;
117 lnet_init_locks(void)
119 spin_lock_init(&the_lnet.ln_eq_wait_lock);
120 init_waitqueue_head(&the_lnet.ln_eq_waitq);
121 init_waitqueue_head(&the_lnet.ln_rc_waitq);
122 mutex_init(&the_lnet.ln_lnd_mutex);
123 mutex_init(&the_lnet.ln_api_mutex);
127 lnet_fini_locks(void)
131 struct kmem_cache *lnet_mes_cachep; /* MEs kmem_cache */
132 struct kmem_cache *lnet_small_mds_cachep; /* <= LNET_SMALL_MD_SIZE bytes
136 lnet_descriptor_setup(void)
138 /* create specific kmem_cache for MEs and small MDs (i.e., originally
139 * allocated in <size-xxx> kmem_cache).
141 lnet_mes_cachep = kmem_cache_create("lnet_MEs", sizeof(lnet_me_t),
143 if (!lnet_mes_cachep)
146 lnet_small_mds_cachep = kmem_cache_create("lnet_small_MDs",
147 LNET_SMALL_MD_SIZE, 0, 0,
149 if (!lnet_small_mds_cachep)
156 lnet_descriptor_cleanup(void)
159 if (lnet_small_mds_cachep) {
160 kmem_cache_destroy(lnet_small_mds_cachep);
161 lnet_small_mds_cachep = NULL;
164 if (lnet_mes_cachep) {
165 kmem_cache_destroy(lnet_mes_cachep);
166 lnet_mes_cachep = NULL;
171 lnet_create_remote_nets_table(void)
174 struct list_head *hash;
176 LASSERT(the_lnet.ln_remote_nets_hash == NULL);
177 LASSERT(the_lnet.ln_remote_nets_hbits > 0);
178 LIBCFS_ALLOC(hash, LNET_REMOTE_NETS_HASH_SIZE * sizeof(*hash));
180 CERROR("Failed to create remote nets hash table\n");
184 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
185 INIT_LIST_HEAD(&hash[i]);
186 the_lnet.ln_remote_nets_hash = hash;
191 lnet_destroy_remote_nets_table(void)
195 if (the_lnet.ln_remote_nets_hash == NULL)
198 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++)
199 LASSERT(list_empty(&the_lnet.ln_remote_nets_hash[i]));
201 LIBCFS_FREE(the_lnet.ln_remote_nets_hash,
202 LNET_REMOTE_NETS_HASH_SIZE *
203 sizeof(the_lnet.ln_remote_nets_hash[0]));
204 the_lnet.ln_remote_nets_hash = NULL;
208 lnet_destroy_locks(void)
210 if (the_lnet.ln_res_lock != NULL) {
211 cfs_percpt_lock_free(the_lnet.ln_res_lock);
212 the_lnet.ln_res_lock = NULL;
215 if (the_lnet.ln_net_lock != NULL) {
216 cfs_percpt_lock_free(the_lnet.ln_net_lock);
217 the_lnet.ln_net_lock = NULL;
224 lnet_create_locks(void)
228 the_lnet.ln_res_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
229 if (the_lnet.ln_res_lock == NULL)
232 the_lnet.ln_net_lock = cfs_percpt_lock_alloc(lnet_cpt_table());
233 if (the_lnet.ln_net_lock == NULL)
239 lnet_destroy_locks();
243 static void lnet_assert_wire_constants(void)
246 * Wire protocol assertions generated by 'wirecheck'
247 * running on Linux lustre-build 3.10.0-327.el7_lustre.centos.x86_64
248 * #1 SMP Fri Jul 8 13:32:15 EDT 2016 x86_64 x86_64 x86_64 GNU/Linux
249 * with gcc version 4.8.5 20150623 (Red Hat 4.8.5-4) (GCC)
254 CLASSERT(LNET_PROTO_TCP_MAGIC == 0xeebc0ded);
255 CLASSERT(LNET_PROTO_TCP_VERSION_MAJOR == 1);
256 CLASSERT(LNET_PROTO_TCP_VERSION_MINOR == 0);
257 CLASSERT(LNET_MSG_ACK == 0);
258 CLASSERT(LNET_MSG_PUT == 1);
259 CLASSERT(LNET_MSG_GET == 2);
260 CLASSERT(LNET_MSG_REPLY == 3);
261 CLASSERT(LNET_MSG_HELLO == 4);
263 /* Checks for struct lnet_handle_wire */
264 CLASSERT((int)sizeof(struct lnet_handle_wire) == 16);
265 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_interface_cookie) == 0);
266 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_interface_cookie) == 8);
267 CLASSERT((int)offsetof(struct lnet_handle_wire, wh_object_cookie) == 8);
268 CLASSERT((int)sizeof(((struct lnet_handle_wire *)0)->wh_object_cookie) == 8);
270 /* Checks for struct lnet_magicversion */
271 CLASSERT((int)sizeof(struct lnet_magicversion) == 8);
272 CLASSERT((int)offsetof(struct lnet_magicversion, magic) == 0);
273 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->magic) == 4);
274 CLASSERT((int)offsetof(struct lnet_magicversion, version_major) == 4);
275 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_major) == 2);
276 CLASSERT((int)offsetof(struct lnet_magicversion, version_minor) == 6);
277 CLASSERT((int)sizeof(((struct lnet_magicversion *)0)->version_minor) == 2);
279 /* Checks for struct lnet_hdr */
280 CLASSERT((int)sizeof(struct lnet_hdr) == 72);
281 CLASSERT((int)offsetof(struct lnet_hdr, dest_nid) == 0);
282 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_nid) == 8);
283 CLASSERT((int)offsetof(struct lnet_hdr, src_nid) == 8);
284 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_nid) == 8);
285 CLASSERT((int)offsetof(struct lnet_hdr, dest_pid) == 16);
286 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->dest_pid) == 4);
287 CLASSERT((int)offsetof(struct lnet_hdr, src_pid) == 20);
288 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->src_pid) == 4);
289 CLASSERT((int)offsetof(struct lnet_hdr, type) == 24);
290 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->type) == 4);
291 CLASSERT((int)offsetof(struct lnet_hdr, payload_length) == 28);
292 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->payload_length) == 4);
293 CLASSERT((int)offsetof(struct lnet_hdr, msg) == 32);
294 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg) == 40);
297 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.dst_wmd) == 32);
298 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.dst_wmd) == 16);
299 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.match_bits) == 48);
300 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.match_bits) == 8);
301 CLASSERT((int)offsetof(struct lnet_hdr, msg.ack.mlength) == 56);
302 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.ack.mlength) == 4);
305 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ack_wmd) == 32);
306 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ack_wmd) == 16);
307 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.match_bits) == 48);
308 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.match_bits) == 8);
309 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.hdr_data) == 56);
310 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.hdr_data) == 8);
311 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.ptl_index) == 64);
312 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.ptl_index) == 4);
313 CLASSERT((int)offsetof(struct lnet_hdr, msg.put.offset) == 68);
314 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.put.offset) == 4);
317 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.return_wmd) == 32);
318 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.return_wmd) == 16);
319 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.match_bits) == 48);
320 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.match_bits) == 8);
321 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.ptl_index) == 56);
322 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.ptl_index) == 4);
323 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.src_offset) == 60);
324 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.src_offset) == 4);
325 CLASSERT((int)offsetof(struct lnet_hdr, msg.get.sink_length) == 64);
326 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.get.sink_length) == 4);
329 CLASSERT((int)offsetof(struct lnet_hdr, msg.reply.dst_wmd) == 32);
330 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.reply.dst_wmd) == 16);
333 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.incarnation) == 32);
334 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.incarnation) == 8);
335 CLASSERT((int)offsetof(struct lnet_hdr, msg.hello.type) == 40);
336 CLASSERT((int)sizeof(((struct lnet_hdr *)0)->msg.hello.type) == 4);
338 /* Checks for struct lnet_ni_status and related constants */
339 CLASSERT(LNET_NI_STATUS_INVALID == 0x00000000);
340 CLASSERT(LNET_NI_STATUS_UP == 0x15aac0de);
341 CLASSERT(LNET_NI_STATUS_DOWN == 0xdeadface);
343 /* Checks for struct lnet_ni_status */
344 CLASSERT((int)sizeof(struct lnet_ni_status) == 16);
345 CLASSERT((int)offsetof(struct lnet_ni_status, ns_nid) == 0);
346 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_nid) == 8);
347 CLASSERT((int)offsetof(struct lnet_ni_status, ns_status) == 8);
348 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_status) == 4);
349 CLASSERT((int)offsetof(struct lnet_ni_status, ns_unused) == 12);
350 CLASSERT((int)sizeof(((struct lnet_ni_status *)0)->ns_unused) == 4);
352 /* Checks for struct lnet_ping_info and related constants */
353 CLASSERT(LNET_PROTO_PING_MAGIC == 0x70696E67);
354 CLASSERT(LNET_PING_FEAT_INVAL == 0);
355 CLASSERT(LNET_PING_FEAT_BASE == 1);
356 CLASSERT(LNET_PING_FEAT_NI_STATUS == 2);
357 CLASSERT(LNET_PING_FEAT_RTE_DISABLED == 4);
358 CLASSERT(LNET_PING_FEAT_MULTI_RAIL == 8);
359 CLASSERT(LNET_PING_FEAT_BITS == 15);
361 /* Checks for struct lnet_ping_info */
362 CLASSERT((int)sizeof(struct lnet_ping_info) == 16);
363 CLASSERT((int)offsetof(struct lnet_ping_info, pi_magic) == 0);
364 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_magic) == 4);
365 CLASSERT((int)offsetof(struct lnet_ping_info, pi_features) == 4);
366 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_features) == 4);
367 CLASSERT((int)offsetof(struct lnet_ping_info, pi_pid) == 8);
368 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_pid) == 4);
369 CLASSERT((int)offsetof(struct lnet_ping_info, pi_nnis) == 12);
370 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_nnis) == 4);
371 CLASSERT((int)offsetof(struct lnet_ping_info, pi_ni) == 16);
372 CLASSERT((int)sizeof(((struct lnet_ping_info *)0)->pi_ni) == 0);
375 static lnd_t *lnet_find_lnd_by_type(__u32 type)
378 struct list_head *tmp;
380 /* holding lnd mutex */
381 list_for_each(tmp, &the_lnet.ln_lnds) {
382 lnd = list_entry(tmp, lnd_t, lnd_list);
384 if (lnd->lnd_type == type)
391 lnet_register_lnd (lnd_t *lnd)
393 mutex_lock(&the_lnet.ln_lnd_mutex);
395 LASSERT(libcfs_isknown_lnd(lnd->lnd_type));
396 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == NULL);
398 list_add_tail(&lnd->lnd_list, &the_lnet.ln_lnds);
399 lnd->lnd_refcount = 0;
401 CDEBUG(D_NET, "%s LND registered\n", libcfs_lnd2str(lnd->lnd_type));
403 mutex_unlock(&the_lnet.ln_lnd_mutex);
405 EXPORT_SYMBOL(lnet_register_lnd);
408 lnet_unregister_lnd (lnd_t *lnd)
410 mutex_lock(&the_lnet.ln_lnd_mutex);
412 LASSERT(lnet_find_lnd_by_type(lnd->lnd_type) == lnd);
413 LASSERT(lnd->lnd_refcount == 0);
415 list_del(&lnd->lnd_list);
416 CDEBUG(D_NET, "%s LND unregistered\n", libcfs_lnd2str(lnd->lnd_type));
418 mutex_unlock(&the_lnet.ln_lnd_mutex);
420 EXPORT_SYMBOL(lnet_unregister_lnd);
423 lnet_counters_get(lnet_counters_t *counters)
425 lnet_counters_t *ctr;
428 memset(counters, 0, sizeof(*counters));
430 lnet_net_lock(LNET_LOCK_EX);
432 cfs_percpt_for_each(ctr, i, the_lnet.ln_counters) {
433 counters->msgs_max += ctr->msgs_max;
434 counters->msgs_alloc += ctr->msgs_alloc;
435 counters->errors += ctr->errors;
436 counters->send_count += ctr->send_count;
437 counters->recv_count += ctr->recv_count;
438 counters->route_count += ctr->route_count;
439 counters->drop_count += ctr->drop_count;
440 counters->send_length += ctr->send_length;
441 counters->recv_length += ctr->recv_length;
442 counters->route_length += ctr->route_length;
443 counters->drop_length += ctr->drop_length;
446 lnet_net_unlock(LNET_LOCK_EX);
448 EXPORT_SYMBOL(lnet_counters_get);
451 lnet_counters_reset(void)
453 lnet_counters_t *counters;
456 lnet_net_lock(LNET_LOCK_EX);
458 cfs_percpt_for_each(counters, i, the_lnet.ln_counters)
459 memset(counters, 0, sizeof(lnet_counters_t));
461 lnet_net_unlock(LNET_LOCK_EX);
465 lnet_res_type2str(int type)
470 case LNET_COOKIE_TYPE_MD:
472 case LNET_COOKIE_TYPE_ME:
474 case LNET_COOKIE_TYPE_EQ:
480 lnet_res_container_cleanup(struct lnet_res_container *rec)
484 if (rec->rec_type == 0) /* not set yet, it's uninitialized */
487 while (!list_empty(&rec->rec_active)) {
488 struct list_head *e = rec->rec_active.next;
491 if (rec->rec_type == LNET_COOKIE_TYPE_EQ) {
492 lnet_eq_free(list_entry(e, lnet_eq_t, eq_list));
494 } else if (rec->rec_type == LNET_COOKIE_TYPE_MD) {
495 lnet_md_free(list_entry(e, lnet_libmd_t, md_list));
497 } else { /* NB: Active MEs should be attached on portals */
504 /* Found alive MD/ME/EQ, user really should unlink/free
505 * all of them before finalize LNet, but if someone didn't,
506 * we have to recycle garbage for him */
507 CERROR("%d active elements on exit of %s container\n",
508 count, lnet_res_type2str(rec->rec_type));
511 if (rec->rec_lh_hash != NULL) {
512 LIBCFS_FREE(rec->rec_lh_hash,
513 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
514 rec->rec_lh_hash = NULL;
517 rec->rec_type = 0; /* mark it as finalized */
521 lnet_res_container_setup(struct lnet_res_container *rec, int cpt, int type)
526 LASSERT(rec->rec_type == 0);
528 rec->rec_type = type;
529 INIT_LIST_HEAD(&rec->rec_active);
531 rec->rec_lh_cookie = (cpt << LNET_COOKIE_TYPE_BITS) | type;
533 /* Arbitrary choice of hash table size */
534 LIBCFS_CPT_ALLOC(rec->rec_lh_hash, lnet_cpt_table(), cpt,
535 LNET_LH_HASH_SIZE * sizeof(rec->rec_lh_hash[0]));
536 if (rec->rec_lh_hash == NULL) {
541 for (i = 0; i < LNET_LH_HASH_SIZE; i++)
542 INIT_LIST_HEAD(&rec->rec_lh_hash[i]);
547 CERROR("Failed to setup %s resource container\n",
548 lnet_res_type2str(type));
549 lnet_res_container_cleanup(rec);
554 lnet_res_containers_destroy(struct lnet_res_container **recs)
556 struct lnet_res_container *rec;
559 cfs_percpt_for_each(rec, i, recs)
560 lnet_res_container_cleanup(rec);
562 cfs_percpt_free(recs);
565 static struct lnet_res_container **
566 lnet_res_containers_create(int type)
568 struct lnet_res_container **recs;
569 struct lnet_res_container *rec;
573 recs = cfs_percpt_alloc(lnet_cpt_table(), sizeof(*rec));
575 CERROR("Failed to allocate %s resource containers\n",
576 lnet_res_type2str(type));
580 cfs_percpt_for_each(rec, i, recs) {
581 rc = lnet_res_container_setup(rec, i, type);
583 lnet_res_containers_destroy(recs);
592 lnet_res_lh_lookup(struct lnet_res_container *rec, __u64 cookie)
594 /* ALWAYS called with lnet_res_lock held */
595 struct list_head *head;
596 lnet_libhandle_t *lh;
599 if ((cookie & LNET_COOKIE_MASK) != rec->rec_type)
602 hash = cookie >> (LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS);
603 head = &rec->rec_lh_hash[hash & LNET_LH_HASH_MASK];
605 list_for_each_entry(lh, head, lh_hash_chain) {
606 if (lh->lh_cookie == cookie)
614 lnet_res_lh_initialize(struct lnet_res_container *rec, lnet_libhandle_t *lh)
616 /* ALWAYS called with lnet_res_lock held */
617 unsigned int ibits = LNET_COOKIE_TYPE_BITS + LNET_CPT_BITS;
620 lh->lh_cookie = rec->rec_lh_cookie;
621 rec->rec_lh_cookie += 1 << ibits;
623 hash = (lh->lh_cookie >> ibits) & LNET_LH_HASH_MASK;
625 list_add(&lh->lh_hash_chain, &rec->rec_lh_hash[hash]);
628 static int lnet_unprepare(void);
631 lnet_prepare(lnet_pid_t requested_pid)
633 /* Prepare to bring up the network */
634 struct lnet_res_container **recs;
637 if (requested_pid == LNET_PID_ANY) {
638 /* Don't instantiate LNET just for me */
642 LASSERT(the_lnet.ln_refcount == 0);
644 the_lnet.ln_routing = 0;
646 LASSERT((requested_pid & LNET_PID_USERFLAG) == 0);
647 the_lnet.ln_pid = requested_pid;
649 INIT_LIST_HEAD(&the_lnet.ln_test_peers);
650 INIT_LIST_HEAD(&the_lnet.ln_remote_peer_ni_list);
651 INIT_LIST_HEAD(&the_lnet.ln_nets);
652 INIT_LIST_HEAD(&the_lnet.ln_routers);
653 INIT_LIST_HEAD(&the_lnet.ln_drop_rules);
654 INIT_LIST_HEAD(&the_lnet.ln_delay_rules);
656 rc = lnet_descriptor_setup();
660 rc = lnet_create_remote_nets_table();
665 * NB the interface cookie in wire handles guards against delayed
666 * replies and ACKs appearing valid after reboot.
668 the_lnet.ln_interface_cookie = ktime_get_real_ns();
670 the_lnet.ln_counters = cfs_percpt_alloc(lnet_cpt_table(),
671 sizeof(lnet_counters_t));
672 if (the_lnet.ln_counters == NULL) {
673 CERROR("Failed to allocate counters for LNet\n");
678 rc = lnet_peer_tables_create();
682 rc = lnet_msg_containers_create();
686 rc = lnet_res_container_setup(&the_lnet.ln_eq_container, 0,
687 LNET_COOKIE_TYPE_EQ);
691 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_ME);
697 the_lnet.ln_me_containers = recs;
699 recs = lnet_res_containers_create(LNET_COOKIE_TYPE_MD);
705 the_lnet.ln_md_containers = recs;
707 rc = lnet_portals_create();
709 CERROR("Failed to create portals for LNet: %d\n", rc);
721 lnet_unprepare (void)
723 /* NB no LNET_LOCK since this is the last reference. All LND instances
724 * have shut down already, so it is safe to unlink and free all
725 * descriptors, even those that appear committed to a network op (eg MD
726 * with non-zero pending count) */
728 lnet_fail_nid(LNET_NID_ANY, 0);
730 LASSERT(the_lnet.ln_refcount == 0);
731 LASSERT(list_empty(&the_lnet.ln_test_peers));
732 LASSERT(list_empty(&the_lnet.ln_nets));
734 lnet_portals_destroy();
736 if (the_lnet.ln_md_containers != NULL) {
737 lnet_res_containers_destroy(the_lnet.ln_md_containers);
738 the_lnet.ln_md_containers = NULL;
741 if (the_lnet.ln_me_containers != NULL) {
742 lnet_res_containers_destroy(the_lnet.ln_me_containers);
743 the_lnet.ln_me_containers = NULL;
746 lnet_res_container_cleanup(&the_lnet.ln_eq_container);
748 lnet_msg_containers_destroy();
750 lnet_rtrpools_free(0);
752 if (the_lnet.ln_counters != NULL) {
753 cfs_percpt_free(the_lnet.ln_counters);
754 the_lnet.ln_counters = NULL;
756 lnet_destroy_remote_nets_table();
757 lnet_descriptor_cleanup();
763 lnet_net2ni_locked(__u32 net_id, int cpt)
766 struct lnet_net *net;
768 LASSERT(cpt != LNET_LOCK_EX);
770 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
771 if (net->net_id == net_id) {
772 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
782 lnet_net2ni_addref(__u32 net)
787 ni = lnet_net2ni_locked(net, 0);
789 lnet_ni_addref_locked(ni, 0);
794 EXPORT_SYMBOL(lnet_net2ni_addref);
797 lnet_get_net_locked(__u32 net_id)
799 struct lnet_net *net;
801 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
802 if (net->net_id == net_id)
810 lnet_nid_cpt_hash(lnet_nid_t nid, unsigned int number)
815 LASSERT(number >= 1 && number <= LNET_CPT_NUMBER);
820 val = hash_long(key, LNET_CPT_BITS);
821 /* NB: LNET_CP_NUMBER doesn't have to be PO2 */
825 return (unsigned int)(key + val + (val >> 1)) % number;
829 lnet_cpt_of_nid_locked(lnet_nid_t nid, struct lnet_ni *ni)
831 struct lnet_net *net;
833 /* must called with hold of lnet_net_lock */
834 if (LNET_CPT_NUMBER == 1)
835 return 0; /* the only one */
838 * If NI is provided then use the CPT identified in the NI cpt
839 * list if one exists. If one doesn't exist, then that NI is
840 * associated with all CPTs and it follows that the net it belongs
841 * to is implicitly associated with all CPTs, so just hash the nid
845 if (ni->ni_cpts != NULL)
846 return ni->ni_cpts[lnet_nid_cpt_hash(nid,
849 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
852 /* no NI provided so look at the net */
853 net = lnet_get_net_locked(LNET_NIDNET(nid));
855 if (net != NULL && net->net_cpts != NULL) {
856 return net->net_cpts[lnet_nid_cpt_hash(nid, net->net_ncpts)];
859 return lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
863 lnet_cpt_of_nid(lnet_nid_t nid, struct lnet_ni *ni)
868 if (LNET_CPT_NUMBER == 1)
869 return 0; /* the only one */
871 cpt = lnet_net_lock_current();
873 cpt2 = lnet_cpt_of_nid_locked(nid, ni);
875 lnet_net_unlock(cpt);
879 EXPORT_SYMBOL(lnet_cpt_of_nid);
882 lnet_islocalnet(__u32 net_id)
884 struct lnet_net *net;
888 cpt = lnet_net_lock_current();
890 net = lnet_get_net_locked(net_id);
894 lnet_net_unlock(cpt);
900 lnet_is_ni_healthy_locked(struct lnet_ni *ni)
902 if (ni->ni_state == LNET_NI_STATE_ACTIVE ||
903 ni->ni_state == LNET_NI_STATE_DEGRADED)
910 lnet_nid2ni_locked(lnet_nid_t nid, int cpt)
912 struct lnet_net *net;
915 LASSERT(cpt != LNET_LOCK_EX);
917 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
918 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
919 if (ni->ni_nid == nid)
928 lnet_nid2ni_addref(lnet_nid_t nid)
933 ni = lnet_nid2ni_locked(nid, 0);
935 lnet_ni_addref_locked(ni, 0);
940 EXPORT_SYMBOL(lnet_nid2ni_addref);
943 lnet_islocalnid(lnet_nid_t nid)
948 cpt = lnet_net_lock_current();
949 ni = lnet_nid2ni_locked(nid, cpt);
950 lnet_net_unlock(cpt);
956 lnet_count_acceptor_nets(void)
958 /* Return the # of NIs that need the acceptor. */
960 struct lnet_net *net;
963 cpt = lnet_net_lock_current();
964 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
965 /* all socklnd type networks should have the acceptor
967 if (net->net_lnd->lnd_accept != NULL)
971 lnet_net_unlock(cpt);
976 struct lnet_ping_buffer *
977 lnet_ping_buffer_alloc(int nnis, gfp_t gfp)
979 struct lnet_ping_buffer *pbuf;
981 LIBCFS_ALLOC_GFP(pbuf, LNET_PING_BUFFER_SIZE(nnis), gfp);
983 pbuf->pb_nnis = nnis;
984 atomic_set(&pbuf->pb_refcnt, 1);
991 lnet_ping_buffer_free(struct lnet_ping_buffer *pbuf)
993 LASSERT(lnet_ping_buffer_numref(pbuf) == 0);
994 LIBCFS_FREE(pbuf, LNET_PING_BUFFER_SIZE(pbuf->pb_nnis));
997 static struct lnet_ping_buffer *
998 lnet_ping_target_create(int nnis)
1000 struct lnet_ping_buffer *pbuf;
1002 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1004 CERROR("Can't allocate ping source [%d]\n", nnis);
1008 pbuf->pb_info.pi_nnis = nnis;
1009 pbuf->pb_info.pi_pid = the_lnet.ln_pid;
1010 pbuf->pb_info.pi_magic = LNET_PROTO_PING_MAGIC;
1011 pbuf->pb_info.pi_features =
1012 LNET_PING_FEAT_NI_STATUS | LNET_PING_FEAT_MULTI_RAIL;
1018 lnet_get_net_ni_count_locked(struct lnet_net *net)
1023 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1030 lnet_get_net_ni_count_pre(struct lnet_net *net)
1035 list_for_each_entry(ni, &net->net_ni_added, ni_netlist)
1042 lnet_get_ni_count(void)
1045 struct lnet_net *net;
1050 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1051 list_for_each_entry(ni, &net->net_ni_list, ni_netlist)
1061 lnet_ping_info_validate(struct lnet_ping_info *pinfo)
1065 if (pinfo->pi_magic != LNET_PROTO_PING_MAGIC)
1067 if (!(pinfo->pi_features & LNET_PING_FEAT_NI_STATUS))
1069 /* Loopback is guaranteed to be present */
1070 if (pinfo->pi_nnis < 1 || pinfo->pi_nnis > lnet_max_interfaces)
1072 if (LNET_NETTYP(LNET_NIDNET(LNET_PING_INFO_LONI(pinfo))) != LOLND)
1078 lnet_ping_target_destroy(void)
1080 struct lnet_net *net;
1083 lnet_net_lock(LNET_LOCK_EX);
1085 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1086 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1088 ni->ni_status = NULL;
1093 lnet_ping_buffer_decref(the_lnet.ln_ping_target);
1094 the_lnet.ln_ping_target = NULL;
1096 lnet_net_unlock(LNET_LOCK_EX);
1100 lnet_ping_target_event_handler(lnet_event_t *event)
1102 struct lnet_ping_buffer *pbuf = event->md.user_ptr;
1104 if (event->unlinked)
1105 lnet_ping_buffer_decref(pbuf);
1109 lnet_ping_target_setup(struct lnet_ping_buffer **ppbuf,
1110 lnet_handle_md_t *ping_mdh, int ni_count, bool set_eq)
1112 lnet_handle_me_t me_handle;
1113 lnet_process_id_t id = {LNET_NID_ANY, LNET_PID_ANY};
1114 lnet_md_t md = {NULL};
1118 rc = LNetEQAlloc(0, lnet_ping_target_event_handler,
1119 &the_lnet.ln_ping_target_eq);
1121 CERROR("Can't allocate ping buffer EQ: %d\n", rc);
1126 *ppbuf = lnet_ping_target_create(ni_count);
1127 if (*ppbuf == NULL) {
1132 /* Ping target ME/MD */
1133 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1134 LNET_PROTO_PING_MATCHBITS, 0,
1135 LNET_UNLINK, LNET_INS_AFTER,
1138 CERROR("Can't create ping target ME: %d\n", rc);
1139 goto fail_decref_ping_buffer;
1142 /* initialize md content */
1143 md.start = &(*ppbuf)->pb_info;
1144 md.length = LNET_PING_INFO_SIZE((*ppbuf)->pb_nnis);
1145 md.threshold = LNET_MD_THRESH_INF;
1147 md.options = LNET_MD_OP_GET | LNET_MD_TRUNCATE |
1148 LNET_MD_MANAGE_REMOTE;
1149 md.eq_handle = the_lnet.ln_ping_target_eq;
1150 md.user_ptr = *ppbuf;
1152 rc = LNetMDAttach(me_handle, md, LNET_RETAIN, ping_mdh);
1154 CERROR("Can't attach ping target MD: %d\n", rc);
1155 goto fail_unlink_ping_me;
1157 lnet_ping_buffer_addref(*ppbuf);
1161 fail_unlink_ping_me:
1162 rc2 = LNetMEUnlink(me_handle);
1164 fail_decref_ping_buffer:
1165 LASSERT(lnet_ping_buffer_numref(*ppbuf) == 1);
1166 lnet_ping_buffer_decref(*ppbuf);
1170 rc2 = LNetEQFree(the_lnet.ln_ping_target_eq);
1177 lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf, lnet_handle_md_t *ping_mdh)
1179 sigset_t blocked = cfs_block_allsigs();
1181 LNetMDUnlink(*ping_mdh);
1182 LNetInvalidateHandle(ping_mdh);
1184 /* NB the MD could be busy; this just starts the unlink */
1185 while (lnet_ping_buffer_numref(pbuf) > 1) {
1186 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1187 set_current_state(TASK_UNINTERRUPTIBLE);
1188 schedule_timeout(cfs_time_seconds(1));
1191 cfs_restore_sigs(blocked);
1195 lnet_ping_target_install_locked(struct lnet_ping_buffer *pbuf)
1198 struct lnet_net *net;
1199 struct lnet_ni_status *ns;
1204 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
1205 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
1206 LASSERT(i < pbuf->pb_nnis);
1208 ns = &pbuf->pb_info.pi_ni[i];
1210 ns->ns_nid = ni->ni_nid;
1213 ns->ns_status = (ni->ni_status != NULL) ?
1214 ni->ni_status->ns_status :
1223 * We (ab)use the ns_status of the loopback interface to
1224 * transmit the sequence number. The first interface listed
1225 * must be the loopback interface.
1227 rc = lnet_ping_info_validate(&pbuf->pb_info);
1229 LCONSOLE_EMERG("Invalid ping target: %d\n", rc);
1232 LNET_PING_BUFFER_SEQNO(pbuf) =
1233 atomic_inc_return(&the_lnet.ln_ping_target_seqno);
1237 lnet_ping_target_update(struct lnet_ping_buffer *pbuf,
1238 lnet_handle_md_t ping_mdh)
1240 struct lnet_ping_buffer *old_pbuf = NULL;
1241 lnet_handle_md_t old_ping_md;
1243 /* switch the NIs to point to the new ping info created */
1244 lnet_net_lock(LNET_LOCK_EX);
1246 if (!the_lnet.ln_routing)
1247 pbuf->pb_info.pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1249 /* Ensure only known feature bits have been set. */
1250 LASSERT(pbuf->pb_info.pi_features & LNET_PING_FEAT_BITS);
1251 LASSERT(!(pbuf->pb_info.pi_features & ~LNET_PING_FEAT_BITS));
1253 lnet_ping_target_install_locked(pbuf);
1255 if (the_lnet.ln_ping_target) {
1256 old_pbuf = the_lnet.ln_ping_target;
1257 old_ping_md = the_lnet.ln_ping_target_md;
1259 the_lnet.ln_ping_target_md = ping_mdh;
1260 the_lnet.ln_ping_target = pbuf;
1262 lnet_net_unlock(LNET_LOCK_EX);
1265 /* unlink and free the old ping info */
1266 lnet_ping_md_unlink(old_pbuf, &old_ping_md);
1267 lnet_ping_buffer_decref(old_pbuf);
1272 lnet_ping_target_fini(void)
1276 lnet_ping_md_unlink(the_lnet.ln_ping_target,
1277 &the_lnet.ln_ping_target_md);
1279 rc = LNetEQFree(the_lnet.ln_ping_target_eq);
1282 lnet_ping_target_destroy();
1285 /* Resize the push target. */
1286 int lnet_push_target_resize(void)
1288 lnet_process_id_t id = { LNET_NID_ANY, LNET_PID_ANY };
1289 lnet_md_t md = { NULL };
1290 lnet_handle_me_t meh;
1291 lnet_handle_md_t mdh;
1292 lnet_handle_md_t old_mdh;
1293 struct lnet_ping_buffer *pbuf;
1294 struct lnet_ping_buffer *old_pbuf;
1295 int nnis = the_lnet.ln_push_target_nnis;
1303 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
1309 rc = LNetMEAttach(LNET_RESERVED_PORTAL, id,
1310 LNET_PROTO_PING_MATCHBITS, 0,
1311 LNET_UNLINK, LNET_INS_AFTER,
1314 CERROR("Can't create push target ME: %d\n", rc);
1315 goto fail_decref_pbuf;
1318 /* initialize md content */
1319 md.start = &pbuf->pb_info;
1320 md.length = LNET_PING_INFO_SIZE(nnis);
1321 md.threshold = LNET_MD_THRESH_INF;
1323 md.options = LNET_MD_OP_PUT | LNET_MD_TRUNCATE |
1324 LNET_MD_MANAGE_REMOTE;
1326 md.eq_handle = the_lnet.ln_push_target_eq;
1328 rc = LNetMDAttach(meh, md, LNET_RETAIN, &mdh);
1330 CERROR("Can't attach push MD: %d\n", rc);
1331 goto fail_unlink_meh;
1333 lnet_ping_buffer_addref(pbuf);
1335 lnet_net_lock(LNET_LOCK_EX);
1336 old_pbuf = the_lnet.ln_push_target;
1337 old_mdh = the_lnet.ln_push_target_md;
1338 the_lnet.ln_push_target = pbuf;
1339 the_lnet.ln_push_target_md = mdh;
1340 lnet_net_unlock(LNET_LOCK_EX);
1343 LNetMDUnlink(old_mdh);
1344 lnet_ping_buffer_decref(old_pbuf);
1347 if (nnis < the_lnet.ln_push_target_nnis)
1350 CDEBUG(D_NET, "nnis %d success\n", nnis);
1357 lnet_ping_buffer_decref(pbuf);
1359 CDEBUG(D_NET, "nnis %d error %d\n", nnis, rc);
1363 static void lnet_push_target_event_handler(struct lnet_event *ev)
1365 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1367 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1368 lnet_swap_pinginfo(pbuf);
1369 lnet_peer_push_event(ev);
1371 lnet_ping_buffer_decref(pbuf);
1374 /* Initialize the push target. */
1375 static int lnet_push_target_init(void)
1379 if (the_lnet.ln_push_target)
1382 rc = LNetEQAlloc(0, lnet_push_target_event_handler,
1383 &the_lnet.ln_push_target_eq);
1385 CERROR("Can't allocated push target EQ: %d\n", rc);
1389 /* Start at the required minimum, we'll enlarge if required. */
1390 the_lnet.ln_push_target_nnis = LNET_MIN_INTERFACES;
1392 rc = lnet_push_target_resize();
1395 LNetEQFree(the_lnet.ln_push_target_eq);
1396 LNetInvalidateHandle(&the_lnet.ln_push_target_eq);
1402 /* Clean up the push target. */
1403 static void lnet_push_target_fini(void)
1405 if (!the_lnet.ln_push_target)
1408 /* Unlink and invalidate to prevent new references. */
1409 LNetMDUnlink(the_lnet.ln_push_target_md);
1410 LNetInvalidateHandle(&the_lnet.ln_push_target_md);
1412 /* Wait for the unlink to complete. */
1413 while (lnet_ping_buffer_numref(the_lnet.ln_push_target) > 1) {
1414 CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
1415 set_current_state(TASK_UNINTERRUPTIBLE);
1416 schedule_timeout(cfs_time_seconds(1));
1419 lnet_ping_buffer_decref(the_lnet.ln_push_target);
1420 the_lnet.ln_push_target = NULL;
1421 the_lnet.ln_push_target_nnis = 0;
1423 LNetEQFree(the_lnet.ln_push_target_eq);
1424 LNetInvalidateHandle(&the_lnet.ln_push_target_eq);
1428 lnet_ni_tq_credits(lnet_ni_t *ni)
1432 LASSERT(ni->ni_ncpts >= 1);
1434 if (ni->ni_ncpts == 1)
1435 return ni->ni_net->net_tunables.lct_max_tx_credits;
1437 credits = ni->ni_net->net_tunables.lct_max_tx_credits / ni->ni_ncpts;
1438 credits = max(credits, 8 * ni->ni_net->net_tunables.lct_peer_tx_credits);
1439 credits = min(credits, ni->ni_net->net_tunables.lct_max_tx_credits);
1445 lnet_ni_unlink_locked(lnet_ni_t *ni)
1447 if (!list_empty(&ni->ni_cptlist)) {
1448 list_del_init(&ni->ni_cptlist);
1449 lnet_ni_decref_locked(ni, 0);
1452 /* move it to zombie list and nobody can find it anymore */
1453 LASSERT(!list_empty(&ni->ni_netlist));
1454 list_move(&ni->ni_netlist, &ni->ni_net->net_ni_zombie);
1455 lnet_ni_decref_locked(ni, 0);
1459 lnet_clear_zombies_nis_locked(struct lnet_net *net)
1464 struct list_head *zombie_list = &net->net_ni_zombie;
1467 * Now wait for the NIs I just nuked to show up on the zombie
1468 * list and shut them down in guaranteed thread context
1471 while (!list_empty(zombie_list)) {
1475 ni = list_entry(zombie_list->next,
1476 lnet_ni_t, ni_netlist);
1477 list_del_init(&ni->ni_netlist);
1478 /* the ni should be in deleting state. If it's not it's
1480 LASSERT(ni->ni_state == LNET_NI_STATE_DELETING);
1481 cfs_percpt_for_each(ref, j, ni->ni_refs) {
1484 /* still busy, add it back to zombie list */
1485 list_add(&ni->ni_netlist, zombie_list);
1489 if (!list_empty(&ni->ni_netlist)) {
1490 lnet_net_unlock(LNET_LOCK_EX);
1492 if ((i & (-i)) == i) {
1494 "Waiting for zombie LNI %s\n",
1495 libcfs_nid2str(ni->ni_nid));
1497 set_current_state(TASK_UNINTERRUPTIBLE);
1498 schedule_timeout(cfs_time_seconds(1));
1499 lnet_net_lock(LNET_LOCK_EX);
1503 lnet_net_unlock(LNET_LOCK_EX);
1505 islo = ni->ni_net->net_lnd->lnd_type == LOLND;
1507 LASSERT(!in_interrupt());
1508 (net->net_lnd->lnd_shutdown)(ni);
1511 CDEBUG(D_LNI, "Removed LNI %s\n",
1512 libcfs_nid2str(ni->ni_nid));
1516 lnet_net_lock(LNET_LOCK_EX);
1520 /* shutdown down the NI and release refcount */
1522 lnet_shutdown_lndni(struct lnet_ni *ni)
1525 struct lnet_net *net = ni->ni_net;
1527 lnet_net_lock(LNET_LOCK_EX);
1528 ni->ni_state = LNET_NI_STATE_DELETING;
1529 lnet_ni_unlink_locked(ni);
1530 lnet_incr_dlc_seq();
1531 lnet_net_unlock(LNET_LOCK_EX);
1533 /* clear messages for this NI on the lazy portal */
1534 for (i = 0; i < the_lnet.ln_nportals; i++)
1535 lnet_clear_lazy_portal(ni, i, "Shutting down NI");
1537 lnet_net_lock(LNET_LOCK_EX);
1538 lnet_clear_zombies_nis_locked(net);
1539 lnet_net_unlock(LNET_LOCK_EX);
1543 lnet_shutdown_lndnet(struct lnet_net *net)
1547 lnet_net_lock(LNET_LOCK_EX);
1549 net->net_state = LNET_NET_STATE_DELETING;
1551 list_del_init(&net->net_list);
1553 while (!list_empty(&net->net_ni_list)) {
1554 ni = list_entry(net->net_ni_list.next,
1555 lnet_ni_t, ni_netlist);
1556 lnet_net_unlock(LNET_LOCK_EX);
1557 lnet_shutdown_lndni(ni);
1558 lnet_net_lock(LNET_LOCK_EX);
1561 lnet_net_unlock(LNET_LOCK_EX);
1563 /* Do peer table cleanup for this net */
1564 lnet_peer_tables_cleanup(net);
1566 lnet_net_lock(LNET_LOCK_EX);
1568 * decrement ref count on lnd only when the entire network goes
1571 net->net_lnd->lnd_refcount--;
1573 lnet_net_unlock(LNET_LOCK_EX);
1579 lnet_shutdown_lndnets(void)
1581 struct lnet_net *net;
1583 /* NB called holding the global mutex */
1585 /* All quiet on the API front */
1586 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
1587 LASSERT(the_lnet.ln_refcount == 0);
1589 lnet_net_lock(LNET_LOCK_EX);
1590 the_lnet.ln_state = LNET_STATE_STOPPING;
1592 while (!list_empty(&the_lnet.ln_nets)) {
1594 * move the nets to the zombie list to avoid them being
1595 * picked up for new work. LONET is also included in the
1596 * Nets that will be moved to the zombie list
1598 net = list_entry(the_lnet.ln_nets.next,
1599 struct lnet_net, net_list);
1600 list_move(&net->net_list, &the_lnet.ln_net_zombie);
1603 /* Drop the cached loopback Net. */
1604 if (the_lnet.ln_loni != NULL) {
1605 lnet_ni_decref_locked(the_lnet.ln_loni, 0);
1606 the_lnet.ln_loni = NULL;
1608 lnet_net_unlock(LNET_LOCK_EX);
1610 /* iterate through the net zombie list and delete each net */
1611 while (!list_empty(&the_lnet.ln_net_zombie)) {
1612 net = list_entry(the_lnet.ln_net_zombie.next,
1613 struct lnet_net, net_list);
1614 lnet_shutdown_lndnet(net);
1617 lnet_net_lock(LNET_LOCK_EX);
1618 the_lnet.ln_state = LNET_STATE_SHUTDOWN;
1619 lnet_net_unlock(LNET_LOCK_EX);
1623 lnet_startup_lndni(struct lnet_ni *ni, struct lnet_lnd_tunables *tun)
1626 struct lnet_tx_queue *tq;
1628 struct lnet_net *net = ni->ni_net;
1630 mutex_lock(&the_lnet.ln_lnd_mutex);
1633 memcpy(&ni->ni_lnd_tunables, tun, sizeof(*tun));
1634 ni->ni_lnd_tunables_set = true;
1637 rc = (net->net_lnd->lnd_startup)(ni);
1639 mutex_unlock(&the_lnet.ln_lnd_mutex);
1642 LCONSOLE_ERROR_MSG(0x105, "Error %d starting up LNI %s\n",
1643 rc, libcfs_lnd2str(net->net_lnd->lnd_type));
1644 lnet_net_lock(LNET_LOCK_EX);
1645 net->net_lnd->lnd_refcount--;
1646 lnet_net_unlock(LNET_LOCK_EX);
1650 ni->ni_state = LNET_NI_STATE_ACTIVE;
1652 /* We keep a reference on the loopback net through the loopback NI */
1653 if (net->net_lnd->lnd_type == LOLND) {
1655 LASSERT(the_lnet.ln_loni == NULL);
1656 the_lnet.ln_loni = ni;
1657 ni->ni_net->net_tunables.lct_peer_tx_credits = 0;
1658 ni->ni_net->net_tunables.lct_peer_rtr_credits = 0;
1659 ni->ni_net->net_tunables.lct_max_tx_credits = 0;
1660 ni->ni_net->net_tunables.lct_peer_timeout = 0;
1664 if (ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ||
1665 ni->ni_net->net_tunables.lct_max_tx_credits == 0) {
1666 LCONSOLE_ERROR_MSG(0x107, "LNI %s has no %scredits\n",
1667 libcfs_lnd2str(net->net_lnd->lnd_type),
1668 ni->ni_net->net_tunables.lct_peer_tx_credits == 0 ?
1670 /* shutdown the NI since if we get here then it must've already
1673 lnet_shutdown_lndni(ni);
1677 cfs_percpt_for_each(tq, i, ni->ni_tx_queues) {
1678 tq->tq_credits_min =
1679 tq->tq_credits_max =
1680 tq->tq_credits = lnet_ni_tq_credits(ni);
1683 atomic_set(&ni->ni_tx_credits,
1684 lnet_ni_tq_credits(ni) * ni->ni_ncpts);
1686 CDEBUG(D_LNI, "Added LNI %s [%d/%d/%d/%d]\n",
1687 libcfs_nid2str(ni->ni_nid),
1688 ni->ni_net->net_tunables.lct_peer_tx_credits,
1689 lnet_ni_tq_credits(ni) * LNET_CPT_NUMBER,
1690 ni->ni_net->net_tunables.lct_peer_rtr_credits,
1691 ni->ni_net->net_tunables.lct_peer_timeout);
1700 lnet_startup_lndnet(struct lnet_net *net, struct lnet_lnd_tunables *tun)
1703 struct lnet_net *net_l = NULL;
1704 struct list_head local_ni_list;
1710 net->net_tunables.lct_peer_timeout;
1712 net->net_tunables.lct_max_tx_credits;
1713 int peerrtrcredits =
1714 net->net_tunables.lct_peer_rtr_credits;
1716 INIT_LIST_HEAD(&local_ni_list);
1719 * make sure that this net is unique. If it isn't then
1720 * we are adding interfaces to an already existing network, and
1721 * 'net' is just a convenient way to pass in the list.
1722 * if it is unique we need to find the LND and load it if
1725 if (lnet_net_unique(net->net_id, &the_lnet.ln_nets, &net_l)) {
1726 lnd_type = LNET_NETTYP(net->net_id);
1728 LASSERT(libcfs_isknown_lnd(lnd_type));
1730 if (lnd_type == CIBLND || lnd_type == OPENIBLND ||
1731 lnd_type == IIBLND || lnd_type == VIBLND) {
1732 CERROR("LND %s obsoleted\n", libcfs_lnd2str(lnd_type));
1737 mutex_lock(&the_lnet.ln_lnd_mutex);
1738 lnd = lnet_find_lnd_by_type(lnd_type);
1741 mutex_unlock(&the_lnet.ln_lnd_mutex);
1742 rc = request_module("%s", libcfs_lnd2modname(lnd_type));
1743 mutex_lock(&the_lnet.ln_lnd_mutex);
1745 lnd = lnet_find_lnd_by_type(lnd_type);
1747 mutex_unlock(&the_lnet.ln_lnd_mutex);
1748 CERROR("Can't load LND %s, module %s, rc=%d\n",
1749 libcfs_lnd2str(lnd_type),
1750 libcfs_lnd2modname(lnd_type), rc);
1751 #ifndef HAVE_MODULE_LOADING_SUPPORT
1752 LCONSOLE_ERROR_MSG(0x104, "Your kernel must be "
1753 "compiled with kernel module "
1754 "loading support.");
1761 lnet_net_lock(LNET_LOCK_EX);
1762 lnd->lnd_refcount++;
1763 lnet_net_unlock(LNET_LOCK_EX);
1767 mutex_unlock(&the_lnet.ln_lnd_mutex);
1773 * net_l: if the network being added is unique then net_l
1774 * will point to that network
1775 * if the network being added is not unique then
1776 * net_l points to the existing network.
1778 * When we enter the loop below, we'll pick NIs off he
1779 * network beign added and start them up, then add them to
1780 * a local ni list. Once we've successfully started all
1781 * the NIs then we join the local NI list (of started up
1782 * networks) with the net_l->net_ni_list, which should
1783 * point to the correct network to add the new ni list to
1785 * If any of the new NIs fail to start up, then we want to
1786 * iterate through the local ni list, which should include
1787 * any NIs which were successfully started up, and shut
1790 * After than we want to delete the network being added,
1791 * to avoid a memory leak.
1795 * When a network uses TCP bonding then all its interfaces
1796 * must be specified when the network is first defined: the
1797 * TCP bonding code doesn't allow for interfaces to be added
1800 if (net_l != net && net_l != NULL && use_tcp_bonding &&
1801 LNET_NETTYP(net_l->net_id) == SOCKLND) {
1806 while (!list_empty(&net->net_ni_added)) {
1807 ni = list_entry(net->net_ni_added.next, struct lnet_ni,
1809 list_del_init(&ni->ni_netlist);
1811 /* make sure that the the NI we're about to start
1812 * up is actually unique. if it's not fail. */
1813 if (!lnet_ni_unique_net(&net_l->net_ni_list,
1814 ni->ni_interfaces[0])) {
1819 /* adjust the pointer the parent network, just in case it
1820 * the net is a duplicate */
1823 rc = lnet_startup_lndni(ni, tun);
1825 LASSERT(ni->ni_net->net_tunables.lct_peer_timeout <= 0 ||
1826 ni->ni_net->net_lnd->lnd_query != NULL);
1832 list_add_tail(&ni->ni_netlist, &local_ni_list);
1837 lnet_net_lock(LNET_LOCK_EX);
1838 list_splice_tail(&local_ni_list, &net_l->net_ni_list);
1839 lnet_incr_dlc_seq();
1840 lnet_net_unlock(LNET_LOCK_EX);
1842 /* if the network is not unique then we don't want to keep
1843 * it around after we're done. Free it. Otherwise add that
1844 * net to the global the_lnet.ln_nets */
1845 if (net_l != net && net_l != NULL) {
1847 * TODO - note. currently the tunables can not be updated
1852 net->net_state = LNET_NET_STATE_ACTIVE;
1854 * restore tunables after it has been overwitten by the
1857 if (peer_timeout != -1)
1858 net->net_tunables.lct_peer_timeout = peer_timeout;
1859 if (maxtxcredits != -1)
1860 net->net_tunables.lct_max_tx_credits = maxtxcredits;
1861 if (peerrtrcredits != -1)
1862 net->net_tunables.lct_peer_rtr_credits = peerrtrcredits;
1864 lnet_net_lock(LNET_LOCK_EX);
1865 list_add_tail(&net->net_list, &the_lnet.ln_nets);
1866 lnet_net_unlock(LNET_LOCK_EX);
1873 * shutdown the new NIs that are being started up
1874 * free the NET being started
1876 while (!list_empty(&local_ni_list)) {
1877 ni = list_entry(local_ni_list.next, struct lnet_ni,
1880 lnet_shutdown_lndni(ni);
1890 lnet_startup_lndnets(struct list_head *netlist)
1892 struct lnet_net *net;
1897 * Change to running state before bringing up the LNDs. This
1898 * allows lnet_shutdown_lndnets() to assert that we've passed
1901 lnet_net_lock(LNET_LOCK_EX);
1902 the_lnet.ln_state = LNET_STATE_RUNNING;
1903 lnet_net_unlock(LNET_LOCK_EX);
1905 while (!list_empty(netlist)) {
1906 net = list_entry(netlist->next, struct lnet_net, net_list);
1907 list_del_init(&net->net_list);
1909 rc = lnet_startup_lndnet(net, NULL);
1919 lnet_shutdown_lndnets();
1925 * Initialize LNet library.
1927 * Automatically called at module loading time. Caller has to call
1928 * lnet_lib_exit() after a call to lnet_lib_init(), if and only if the
1929 * latter returned 0. It must be called exactly once.
1931 * \retval 0 on success
1932 * \retval -ve on failures.
1934 int lnet_lib_init(void)
1938 lnet_assert_wire_constants();
1940 if (lnet_max_interfaces < LNET_MIN_INTERFACES)
1941 lnet_max_interfaces = LNET_MIN_INTERFACES;
1943 memset(&the_lnet, 0, sizeof(the_lnet));
1945 /* refer to global cfs_cpt_table for now */
1946 the_lnet.ln_cpt_table = cfs_cpt_table;
1947 the_lnet.ln_cpt_number = cfs_cpt_number(cfs_cpt_table);
1949 LASSERT(the_lnet.ln_cpt_number > 0);
1950 if (the_lnet.ln_cpt_number > LNET_CPT_MAX) {
1951 /* we are under risk of consuming all lh_cookie */
1952 CERROR("Can't have %d CPTs for LNet (max allowed is %d), "
1953 "please change setting of CPT-table and retry\n",
1954 the_lnet.ln_cpt_number, LNET_CPT_MAX);
1958 while ((1 << the_lnet.ln_cpt_bits) < the_lnet.ln_cpt_number)
1959 the_lnet.ln_cpt_bits++;
1961 rc = lnet_create_locks();
1963 CERROR("Can't create LNet global locks: %d\n", rc);
1967 the_lnet.ln_refcount = 0;
1968 LNetInvalidateHandle(&the_lnet.ln_rc_eqh);
1969 INIT_LIST_HEAD(&the_lnet.ln_lnds);
1970 INIT_LIST_HEAD(&the_lnet.ln_net_zombie);
1971 INIT_LIST_HEAD(&the_lnet.ln_rcd_zombie);
1972 INIT_LIST_HEAD(&the_lnet.ln_rcd_deathrow);
1974 /* The hash table size is the number of bits it takes to express the set
1975 * ln_num_routes, minus 1 (better to under estimate than over so we
1976 * don't waste memory). */
1977 if (rnet_htable_size <= 0)
1978 rnet_htable_size = LNET_REMOTE_NETS_HASH_DEFAULT;
1979 else if (rnet_htable_size > LNET_REMOTE_NETS_HASH_MAX)
1980 rnet_htable_size = LNET_REMOTE_NETS_HASH_MAX;
1981 the_lnet.ln_remote_nets_hbits = max_t(int, 1,
1982 order_base_2(rnet_htable_size) - 1);
1984 /* All LNDs apart from the LOLND are in separate modules. They
1985 * register themselves when their module loads, and unregister
1986 * themselves when their module is unloaded. */
1987 lnet_register_lnd(&the_lolnd);
1992 * Finalize LNet library.
1994 * \pre lnet_lib_init() called with success.
1995 * \pre All LNet users called LNetNIFini() for matching LNetNIInit() calls.
1997 void lnet_lib_exit(void)
1999 LASSERT(the_lnet.ln_refcount == 0);
2001 while (!list_empty(&the_lnet.ln_lnds))
2002 lnet_unregister_lnd(list_entry(the_lnet.ln_lnds.next,
2004 lnet_destroy_locks();
2008 * Set LNet PID and start LNet interfaces, routing, and forwarding.
2010 * Users must call this function at least once before any other functions.
2011 * For each successful call there must be a corresponding call to
2012 * LNetNIFini(). For subsequent calls to LNetNIInit(), \a requested_pid is
2015 * The PID used by LNet may be different from the one requested.
2018 * \param requested_pid PID requested by the caller.
2020 * \return >= 0 on success, and < 0 error code on failures.
2023 LNetNIInit(lnet_pid_t requested_pid)
2025 int im_a_router = 0;
2028 struct lnet_ping_buffer *pbuf;
2029 lnet_handle_md_t ping_mdh;
2030 struct list_head net_head;
2031 struct lnet_net *net;
2033 INIT_LIST_HEAD(&net_head);
2035 mutex_lock(&the_lnet.ln_api_mutex);
2037 CDEBUG(D_OTHER, "refs %d\n", the_lnet.ln_refcount);
2039 if (the_lnet.ln_refcount > 0) {
2040 rc = the_lnet.ln_refcount++;
2041 mutex_unlock(&the_lnet.ln_api_mutex);
2045 rc = lnet_prepare(requested_pid);
2047 mutex_unlock(&the_lnet.ln_api_mutex);
2051 /* create a network for Loopback network */
2052 net = lnet_net_alloc(LNET_MKNET(LOLND, 0), &net_head);
2055 goto err_empty_list;
2058 /* Add in the loopback NI */
2059 if (lnet_ni_alloc(net, NULL, NULL) == NULL) {
2061 goto err_empty_list;
2064 /* If LNet is being initialized via DLC it is possible
2065 * that the user requests not to load module parameters (ones which
2066 * are supported by DLC) on initialization. Therefore, make sure not
2067 * to load networks, routes and forwarding from module parameters
2068 * in this case. On cleanup in case of failure only clean up
2069 * routes if it has been loaded */
2070 if (!the_lnet.ln_nis_from_mod_params) {
2071 rc = lnet_parse_networks(&net_head, lnet_get_networks(),
2074 goto err_empty_list;
2077 ni_count = lnet_startup_lndnets(&net_head);
2080 goto err_empty_list;
2083 if (!the_lnet.ln_nis_from_mod_params) {
2084 rc = lnet_parse_routes(lnet_get_routes(), &im_a_router);
2086 goto err_shutdown_lndnis;
2088 rc = lnet_check_routes();
2090 goto err_destroy_routes;
2092 rc = lnet_rtrpools_alloc(im_a_router);
2094 goto err_destroy_routes;
2097 rc = lnet_acceptor_start();
2099 goto err_destroy_routes;
2101 the_lnet.ln_refcount = 1;
2102 /* Now I may use my own API functions... */
2104 rc = lnet_ping_target_setup(&pbuf, &ping_mdh, ni_count, true);
2106 goto err_acceptor_stop;
2108 lnet_ping_target_update(pbuf, ping_mdh);
2110 rc = lnet_router_checker_start();
2114 rc = lnet_push_target_init();
2116 goto err_stop_router_checker;
2118 rc = lnet_peer_discovery_start();
2120 goto err_destroy_push_target;
2125 mutex_unlock(&the_lnet.ln_api_mutex);
2129 err_destroy_push_target:
2130 lnet_push_target_fini();
2131 err_stop_router_checker:
2132 lnet_router_checker_stop();
2134 lnet_ping_target_fini();
2136 the_lnet.ln_refcount = 0;
2137 lnet_acceptor_stop();
2139 if (!the_lnet.ln_nis_from_mod_params)
2140 lnet_destroy_routes();
2141 err_shutdown_lndnis:
2142 lnet_shutdown_lndnets();
2146 mutex_unlock(&the_lnet.ln_api_mutex);
2147 while (!list_empty(&net_head)) {
2148 struct lnet_net *net;
2150 net = list_entry(net_head.next, struct lnet_net, net_list);
2151 list_del_init(&net->net_list);
2156 EXPORT_SYMBOL(LNetNIInit);
2159 * Stop LNet interfaces, routing, and forwarding.
2161 * Users must call this function once for each successful call to LNetNIInit().
2162 * Once the LNetNIFini() operation has been started, the results of pending
2163 * API operations are undefined.
2165 * \return always 0 for current implementation.
2170 mutex_lock(&the_lnet.ln_api_mutex);
2172 LASSERT(the_lnet.ln_refcount > 0);
2174 if (the_lnet.ln_refcount != 1) {
2175 the_lnet.ln_refcount--;
2177 LASSERT(!the_lnet.ln_niinit_self);
2182 lnet_peer_discovery_stop();
2183 lnet_push_target_fini();
2184 lnet_router_checker_stop();
2185 lnet_ping_target_fini();
2187 /* Teardown fns that use my own API functions BEFORE here */
2188 the_lnet.ln_refcount = 0;
2190 lnet_acceptor_stop();
2191 lnet_destroy_routes();
2192 lnet_shutdown_lndnets();
2196 mutex_unlock(&the_lnet.ln_api_mutex);
2199 EXPORT_SYMBOL(LNetNIFini);
2202 * Grabs the ni data from the ni structure and fills the out
2205 * \param[in] ni network interface structure
2206 * \param[out] cfg_ni NI config information
2207 * \param[out] tun network and LND tunables
2210 lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_ni *cfg_ni,
2211 struct lnet_ioctl_config_lnd_tunables *tun,
2212 struct lnet_ioctl_element_stats *stats,
2215 size_t min_size = 0;
2218 if (!ni || !cfg_ni || !tun)
2221 if (ni->ni_interfaces[0] != NULL) {
2222 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2223 if (ni->ni_interfaces[i] != NULL) {
2224 strncpy(cfg_ni->lic_ni_intf[i],
2225 ni->ni_interfaces[i],
2226 sizeof(cfg_ni->lic_ni_intf[i]));
2231 cfg_ni->lic_nid = ni->ni_nid;
2232 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2233 cfg_ni->lic_status = LNET_NI_STATUS_UP;
2235 cfg_ni->lic_status = ni->ni_status->ns_status;
2236 cfg_ni->lic_tcp_bonding = use_tcp_bonding;
2237 cfg_ni->lic_dev_cpt = ni->ni_dev_cpt;
2239 memcpy(&tun->lt_cmn, &ni->ni_net->net_tunables, sizeof(tun->lt_cmn));
2242 stats->send_count = atomic_read(&ni->ni_stats.send_count);
2243 stats->recv_count = atomic_read(&ni->ni_stats.recv_count);
2247 * tun->lt_tun will always be present, but in order to be
2248 * backwards compatible, we need to deal with the cases when
2249 * tun->lt_tun is smaller than what the kernel has, because it
2250 * comes from an older version of a userspace program, then we'll
2251 * need to copy as much information as we have available space.
2253 min_size = tun_size - sizeof(tun->lt_cmn);
2254 memcpy(&tun->lt_tun, &ni->ni_lnd_tunables, min_size);
2256 /* copy over the cpts */
2257 if (ni->ni_ncpts == LNET_CPT_NUMBER &&
2258 ni->ni_cpts == NULL) {
2259 for (i = 0; i < ni->ni_ncpts; i++)
2260 cfg_ni->lic_cpts[i] = i;
2263 ni->ni_cpts != NULL && i < ni->ni_ncpts &&
2264 i < LNET_MAX_SHOW_NUM_CPT;
2266 cfg_ni->lic_cpts[i] = ni->ni_cpts[i];
2268 cfg_ni->lic_ncpts = ni->ni_ncpts;
2272 * NOTE: This is a legacy function left in the code to be backwards
2273 * compatible with older userspace programs. It should eventually be
2276 * Grabs the ni data from the ni structure and fills the out
2279 * \param[in] ni network interface structure
2280 * \param[out] config config information
2283 lnet_fill_ni_info_legacy(struct lnet_ni *ni,
2284 struct lnet_ioctl_config_data *config)
2286 struct lnet_ioctl_net_config *net_config;
2287 struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
2288 size_t min_size, tunable_size = 0;
2294 net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
2298 BUILD_BUG_ON(ARRAY_SIZE(ni->ni_interfaces) !=
2299 ARRAY_SIZE(net_config->ni_interfaces));
2301 for (i = 0; i < ARRAY_SIZE(ni->ni_interfaces); i++) {
2302 if (!ni->ni_interfaces[i])
2305 strncpy(net_config->ni_interfaces[i],
2306 ni->ni_interfaces[i],
2307 sizeof(net_config->ni_interfaces[i]));
2310 config->cfg_nid = ni->ni_nid;
2311 config->cfg_config_u.cfg_net.net_peer_timeout =
2312 ni->ni_net->net_tunables.lct_peer_timeout;
2313 config->cfg_config_u.cfg_net.net_max_tx_credits =
2314 ni->ni_net->net_tunables.lct_max_tx_credits;
2315 config->cfg_config_u.cfg_net.net_peer_tx_credits =
2316 ni->ni_net->net_tunables.lct_peer_tx_credits;
2317 config->cfg_config_u.cfg_net.net_peer_rtr_credits =
2318 ni->ni_net->net_tunables.lct_peer_rtr_credits;
2320 if (LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND)
2321 net_config->ni_status = LNET_NI_STATUS_UP;
2323 net_config->ni_status = ni->ni_status->ns_status;
2326 int num_cpts = min(ni->ni_ncpts, LNET_MAX_SHOW_NUM_CPT);
2328 for (i = 0; i < num_cpts; i++)
2329 net_config->ni_cpts[i] = ni->ni_cpts[i];
2331 config->cfg_ncpts = num_cpts;
2335 * See if user land tools sent in a newer and larger version
2336 * of struct lnet_tunables than what the kernel uses.
2338 min_size = sizeof(*config) + sizeof(*net_config);
2340 if (config->cfg_hdr.ioc_len > min_size)
2341 tunable_size = config->cfg_hdr.ioc_len - min_size;
2343 /* Don't copy too much data to user space */
2344 min_size = min(tunable_size, sizeof(ni->ni_lnd_tunables));
2345 lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
2347 if (lnd_cfg && min_size) {
2348 memcpy(&lnd_cfg->lt_tun, &ni->ni_lnd_tunables, min_size);
2349 config->cfg_config_u.cfg_net.net_interface_count = 1;
2351 /* Tell user land that kernel side has less data */
2352 if (tunable_size > sizeof(ni->ni_lnd_tunables)) {
2353 min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
2354 config->cfg_hdr.ioc_len -= min_size;
2360 lnet_get_ni_idx_locked(int idx)
2363 struct lnet_net *net;
2365 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
2366 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
2376 lnet_get_next_ni_locked(struct lnet_net *mynet, struct lnet_ni *prev)
2379 struct lnet_net *net = mynet;
2383 net = list_entry(the_lnet.ln_nets.next, struct lnet_net,
2385 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2391 if (prev->ni_netlist.next == &prev->ni_net->net_ni_list) {
2392 /* if you reached the end of the ni list and the net is
2393 * specified, then there are no more nis in that net */
2397 /* we reached the end of this net ni list. move to the
2399 if (prev->ni_net->net_list.next == &the_lnet.ln_nets)
2400 /* no more nets and no more NIs. */
2403 /* get the next net */
2404 net = list_entry(prev->ni_net->net_list.next, struct lnet_net,
2406 /* get the ni on it */
2407 ni = list_entry(net->net_ni_list.next, struct lnet_ni,
2413 /* there are more nis left */
2414 ni = list_entry(prev->ni_netlist.next, struct lnet_ni, ni_netlist);
2420 lnet_get_net_config(struct lnet_ioctl_config_data *config)
2425 int idx = config->cfg_count;
2427 cpt = lnet_net_lock_current();
2429 ni = lnet_get_ni_idx_locked(idx);
2434 lnet_fill_ni_info_legacy(ni, config);
2438 lnet_net_unlock(cpt);
2443 lnet_get_ni_config(struct lnet_ioctl_config_ni *cfg_ni,
2444 struct lnet_ioctl_config_lnd_tunables *tun,
2445 struct lnet_ioctl_element_stats *stats,
2452 if (!cfg_ni || !tun || !stats)
2455 cpt = lnet_net_lock_current();
2457 ni = lnet_get_ni_idx_locked(cfg_ni->lic_idx);
2462 lnet_fill_ni_info(ni, cfg_ni, tun, stats, tun_size);
2466 lnet_net_unlock(cpt);
2470 static int lnet_add_net_common(struct lnet_net *net,
2471 struct lnet_ioctl_config_lnd_tunables *tun)
2474 struct lnet_ping_buffer *pbuf;
2475 lnet_handle_md_t ping_mdh;
2477 lnet_remotenet_t *rnet;
2479 int num_acceptor_nets;
2481 lnet_net_lock(LNET_LOCK_EX);
2482 rnet = lnet_find_rnet_locked(net->net_id);
2483 lnet_net_unlock(LNET_LOCK_EX);
2485 * make sure that the net added doesn't invalidate the current
2486 * configuration LNet is keeping
2489 CERROR("Adding net %s will invalidate routing configuration\n",
2490 libcfs_net2str(net->net_id));
2496 * make sure you calculate the correct number of slots in the ping
2497 * buffer. Since the ping info is a flattened list of all the NIs,
2498 * we should allocate enough slots to accomodate the number of NIs
2499 * which will be added.
2501 * since ni hasn't been configured yet, use
2502 * lnet_get_net_ni_count_pre() which checks the net_ni_added list
2504 net_ni_count = lnet_get_net_ni_count_pre(net);
2506 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2507 net_ni_count + lnet_get_ni_count(),
2515 memcpy(&net->net_tunables,
2516 &tun->lt_cmn, sizeof(net->net_tunables));
2518 memset(&net->net_tunables, -1, sizeof(net->net_tunables));
2521 * before starting this network get a count of the current TCP
2522 * networks which require the acceptor thread running. If that
2523 * count is == 0 before we start up this network, then we'd want to
2524 * start up the acceptor thread after starting up this network
2526 num_acceptor_nets = lnet_count_acceptor_nets();
2528 net_id = net->net_id;
2530 rc = lnet_startup_lndnet(net,
2531 (tun) ? &tun->lt_tun : NULL);
2535 lnet_net_lock(LNET_LOCK_EX);
2536 net = lnet_get_net_locked(net_id);
2537 lnet_net_unlock(LNET_LOCK_EX);
2542 * Start the acceptor thread if this is the first network
2543 * being added that requires the thread.
2545 if (net->net_lnd->lnd_accept && num_acceptor_nets == 0) {
2546 rc = lnet_acceptor_start();
2548 /* shutdown the net that we just started */
2549 CERROR("Failed to start up acceptor thread\n");
2550 lnet_shutdown_lndnet(net);
2555 lnet_net_lock(LNET_LOCK_EX);
2556 lnet_peer_net_added(net);
2557 lnet_net_unlock(LNET_LOCK_EX);
2559 lnet_ping_target_update(pbuf, ping_mdh);
2564 lnet_ping_md_unlink(pbuf, &ping_mdh);
2565 lnet_ping_buffer_decref(pbuf);
2569 static int lnet_handle_legacy_ip2nets(char *ip2nets,
2570 struct lnet_ioctl_config_lnd_tunables *tun)
2572 struct lnet_net *net;
2575 struct list_head net_head;
2577 INIT_LIST_HEAD(&net_head);
2579 rc = lnet_parse_ip2nets(&nets, ip2nets);
2583 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2587 mutex_lock(&the_lnet.ln_api_mutex);
2588 while (!list_empty(&net_head)) {
2589 net = list_entry(net_head.next, struct lnet_net, net_list);
2590 list_del_init(&net->net_list);
2591 rc = lnet_add_net_common(net, tun);
2597 mutex_unlock(&the_lnet.ln_api_mutex);
2599 while (!list_empty(&net_head)) {
2600 net = list_entry(net_head.next, struct lnet_net, net_list);
2601 list_del_init(&net->net_list);
2607 int lnet_dyn_add_ni(struct lnet_ioctl_config_ni *conf)
2609 struct lnet_net *net;
2611 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2615 /* get the tunables if they are available */
2616 if (conf->lic_cfg_hdr.ioc_len >=
2617 sizeof(*conf) + sizeof(*tun))
2618 tun = (struct lnet_ioctl_config_lnd_tunables *)
2621 /* handle legacy ip2nets from DLC */
2622 if (conf->lic_legacy_ip2nets[0] != '\0')
2623 return lnet_handle_legacy_ip2nets(conf->lic_legacy_ip2nets,
2626 net_id = LNET_NIDNET(conf->lic_nid);
2628 net = lnet_net_alloc(net_id, NULL);
2632 for (i = 0; i < conf->lic_ncpts; i++) {
2633 if (conf->lic_cpts[i] >= LNET_CPT_NUMBER)
2637 ni = lnet_ni_alloc_w_cpt_array(net, conf->lic_cpts, conf->lic_ncpts,
2638 conf->lic_ni_intf[0]);
2642 mutex_lock(&the_lnet.ln_api_mutex);
2644 rc = lnet_add_net_common(net, tun);
2646 mutex_unlock(&the_lnet.ln_api_mutex);
2651 int lnet_dyn_del_ni(struct lnet_ioctl_config_ni *conf)
2653 struct lnet_net *net;
2655 __u32 net_id = LNET_NIDNET(conf->lic_nid);
2656 struct lnet_ping_buffer *pbuf;
2657 lnet_handle_md_t ping_mdh;
2662 /* don't allow userspace to shutdown the LOLND */
2663 if (LNET_NETTYP(net_id) == LOLND)
2666 mutex_lock(&the_lnet.ln_api_mutex);
2670 net = lnet_get_net_locked(net_id);
2672 CERROR("net %s not found\n",
2673 libcfs_net2str(net_id));
2678 addr = LNET_NIDADDR(conf->lic_nid);
2680 /* remove the entire net */
2681 net_count = lnet_get_net_ni_count_locked(net);
2685 /* create and link a new ping info, before removing the old one */
2686 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2687 lnet_get_ni_count() - net_count,
2690 goto unlock_api_mutex;
2692 lnet_shutdown_lndnet(net);
2694 if (lnet_count_acceptor_nets() == 0)
2695 lnet_acceptor_stop();
2697 lnet_ping_target_update(pbuf, ping_mdh);
2699 goto unlock_api_mutex;
2702 ni = lnet_nid2ni_locked(conf->lic_nid, 0);
2704 CERROR("nid %s not found\n",
2705 libcfs_nid2str(conf->lic_nid));
2710 net_count = lnet_get_net_ni_count_locked(net);
2714 /* create and link a new ping info, before removing the old one */
2715 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2716 lnet_get_ni_count() - 1, false);
2718 goto unlock_api_mutex;
2720 lnet_shutdown_lndni(ni);
2722 if (lnet_count_acceptor_nets() == 0)
2723 lnet_acceptor_stop();
2725 lnet_ping_target_update(pbuf, ping_mdh);
2727 /* check if the net is empty and remove it if it is */
2729 lnet_shutdown_lndnet(net);
2731 goto unlock_api_mutex;
2736 mutex_unlock(&the_lnet.ln_api_mutex);
2742 * lnet_dyn_add_net and lnet_dyn_del_net are now deprecated.
2743 * They are only expected to be called for unique networks.
2744 * That can be as a result of older DLC library
2745 * calls. Multi-Rail DLC and beyond no longer uses these APIs.
2748 lnet_dyn_add_net(struct lnet_ioctl_config_data *conf)
2750 struct lnet_net *net;
2751 struct list_head net_head;
2753 struct lnet_ioctl_config_lnd_tunables tun;
2754 char *nets = conf->cfg_config_u.cfg_net.net_intf;
2756 INIT_LIST_HEAD(&net_head);
2758 /* Create a net/ni structures for the network string */
2759 rc = lnet_parse_networks(&net_head, nets, use_tcp_bonding);
2761 return rc == 0 ? -EINVAL : rc;
2763 mutex_lock(&the_lnet.ln_api_mutex);
2766 rc = -EINVAL; /* only add one network per call */
2770 net = list_entry(net_head.next, struct lnet_net, net_list);
2771 list_del_init(&net->net_list);
2773 LASSERT(lnet_net_unique(net->net_id, &the_lnet.ln_nets, NULL));
2775 memset(&tun, 0, sizeof(tun));
2777 tun.lt_cmn.lct_peer_timeout =
2778 conf->cfg_config_u.cfg_net.net_peer_timeout;
2779 tun.lt_cmn.lct_peer_tx_credits =
2780 conf->cfg_config_u.cfg_net.net_peer_tx_credits;
2781 tun.lt_cmn.lct_peer_rtr_credits =
2782 conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
2783 tun.lt_cmn.lct_max_tx_credits =
2784 conf->cfg_config_u.cfg_net.net_max_tx_credits;
2786 rc = lnet_add_net_common(net, &tun);
2793 mutex_unlock(&the_lnet.ln_api_mutex);
2794 while (!list_empty(&net_head)) {
2795 net = list_entry(net_head.next, struct lnet_net, net_list);
2796 list_del_init(&net->net_list);
2803 lnet_dyn_del_net(__u32 net_id)
2805 struct lnet_net *net;
2806 struct lnet_ping_buffer *pbuf;
2807 lnet_handle_md_t ping_mdh;
2811 /* don't allow userspace to shutdown the LOLND */
2812 if (LNET_NETTYP(net_id) == LOLND)
2815 mutex_lock(&the_lnet.ln_api_mutex);
2819 net = lnet_get_net_locked(net_id);
2825 net_ni_count = lnet_get_net_ni_count_locked(net);
2829 /* create and link a new ping info, before removing the old one */
2830 rc = lnet_ping_target_setup(&pbuf, &ping_mdh,
2831 lnet_get_ni_count() - net_ni_count, false);
2835 lnet_shutdown_lndnet(net);
2837 if (lnet_count_acceptor_nets() == 0)
2838 lnet_acceptor_stop();
2840 lnet_ping_target_update(pbuf, ping_mdh);
2843 mutex_unlock(&the_lnet.ln_api_mutex);
2848 void lnet_incr_dlc_seq(void)
2850 atomic_inc(&lnet_dlc_seq_no);
2853 __u32 lnet_get_dlc_seq_locked(void)
2855 return atomic_read(&lnet_dlc_seq_no);
2859 * LNet ioctl handler.
2863 LNetCtl(unsigned int cmd, void *arg)
2865 struct libcfs_ioctl_data *data = arg;
2866 struct lnet_ioctl_config_data *config;
2867 lnet_process_id_t id = {0};
2871 BUILD_BUG_ON(sizeof(struct lnet_ioctl_net_config) +
2872 sizeof(struct lnet_ioctl_config_data) > LIBCFS_IOC_DATA_MAX);
2875 case IOC_LIBCFS_GET_NI:
2876 rc = LNetGetId(data->ioc_count, &id);
2877 data->ioc_nid = id.nid;
2880 case IOC_LIBCFS_FAIL_NID:
2881 return lnet_fail_nid(data->ioc_nid, data->ioc_count);
2883 case IOC_LIBCFS_ADD_ROUTE:
2886 if (config->cfg_hdr.ioc_len < sizeof(*config))
2889 mutex_lock(&the_lnet.ln_api_mutex);
2890 rc = lnet_add_route(config->cfg_net,
2891 config->cfg_config_u.cfg_route.rtr_hop,
2893 config->cfg_config_u.cfg_route.
2896 rc = lnet_check_routes();
2898 lnet_del_route(config->cfg_net,
2901 mutex_unlock(&the_lnet.ln_api_mutex);
2904 case IOC_LIBCFS_DEL_ROUTE:
2907 if (config->cfg_hdr.ioc_len < sizeof(*config))
2910 mutex_lock(&the_lnet.ln_api_mutex);
2911 rc = lnet_del_route(config->cfg_net, config->cfg_nid);
2912 mutex_unlock(&the_lnet.ln_api_mutex);
2915 case IOC_LIBCFS_GET_ROUTE:
2918 if (config->cfg_hdr.ioc_len < sizeof(*config))
2921 mutex_lock(&the_lnet.ln_api_mutex);
2922 rc = lnet_get_route(config->cfg_count,
2924 &config->cfg_config_u.cfg_route.rtr_hop,
2926 &config->cfg_config_u.cfg_route.rtr_flags,
2927 &config->cfg_config_u.cfg_route.
2929 mutex_unlock(&the_lnet.ln_api_mutex);
2932 case IOC_LIBCFS_GET_LOCAL_NI: {
2933 struct lnet_ioctl_config_ni *cfg_ni;
2934 struct lnet_ioctl_config_lnd_tunables *tun = NULL;
2935 struct lnet_ioctl_element_stats *stats;
2939 /* get the tunables if they are available */
2940 if (cfg_ni->lic_cfg_hdr.ioc_len <
2941 sizeof(*cfg_ni) + sizeof(*stats)+ sizeof(*tun))
2944 stats = (struct lnet_ioctl_element_stats *)
2946 tun = (struct lnet_ioctl_config_lnd_tunables *)
2947 (cfg_ni->lic_bulk + sizeof(*stats));
2949 tun_size = cfg_ni->lic_cfg_hdr.ioc_len - sizeof(*cfg_ni) -
2952 mutex_lock(&the_lnet.ln_api_mutex);
2953 rc = lnet_get_ni_config(cfg_ni, tun, stats, tun_size);
2954 mutex_unlock(&the_lnet.ln_api_mutex);
2958 case IOC_LIBCFS_GET_NET: {
2959 size_t total = sizeof(*config) +
2960 sizeof(struct lnet_ioctl_net_config);
2963 if (config->cfg_hdr.ioc_len < total)
2966 mutex_lock(&the_lnet.ln_api_mutex);
2967 rc = lnet_get_net_config(config);
2968 mutex_unlock(&the_lnet.ln_api_mutex);
2972 case IOC_LIBCFS_GET_LNET_STATS:
2974 struct lnet_ioctl_lnet_stats *lnet_stats = arg;
2976 if (lnet_stats->st_hdr.ioc_len < sizeof(*lnet_stats))
2979 mutex_lock(&the_lnet.ln_api_mutex);
2980 lnet_counters_get(&lnet_stats->st_cntrs);
2981 mutex_unlock(&the_lnet.ln_api_mutex);
2985 case IOC_LIBCFS_CONFIG_RTR:
2988 if (config->cfg_hdr.ioc_len < sizeof(*config))
2991 mutex_lock(&the_lnet.ln_api_mutex);
2992 if (config->cfg_config_u.cfg_buffers.buf_enable) {
2993 rc = lnet_rtrpools_enable();
2994 mutex_unlock(&the_lnet.ln_api_mutex);
2997 lnet_rtrpools_disable();
2998 mutex_unlock(&the_lnet.ln_api_mutex);
3001 case IOC_LIBCFS_ADD_BUF:
3004 if (config->cfg_hdr.ioc_len < sizeof(*config))
3007 mutex_lock(&the_lnet.ln_api_mutex);
3008 rc = lnet_rtrpools_adjust(config->cfg_config_u.cfg_buffers.
3010 config->cfg_config_u.cfg_buffers.
3012 config->cfg_config_u.cfg_buffers.
3014 mutex_unlock(&the_lnet.ln_api_mutex);
3017 case IOC_LIBCFS_SET_NUMA_RANGE: {
3018 struct lnet_ioctl_set_value *numa;
3020 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3022 lnet_net_lock(LNET_LOCK_EX);
3023 lnet_numa_range = numa->sv_value;
3024 lnet_net_unlock(LNET_LOCK_EX);
3028 case IOC_LIBCFS_GET_NUMA_RANGE: {
3029 struct lnet_ioctl_set_value *numa;
3031 if (numa->sv_hdr.ioc_len != sizeof(*numa))
3033 numa->sv_value = lnet_numa_range;
3037 case IOC_LIBCFS_SET_MAX_INTF: {
3038 struct lnet_ioctl_set_value *max_intf;
3040 if (max_intf->sv_hdr.ioc_len != sizeof(*max_intf) ||
3041 max_intf->sv_value < LNET_MIN_INTERFACES)
3043 mutex_lock(&the_lnet.ln_api_mutex);
3044 lnet_max_interfaces = max_intf->sv_value;
3045 mutex_unlock(&the_lnet.ln_api_mutex);
3049 case IOC_LIBCFS_GET_MAX_INTF: {
3050 struct lnet_ioctl_set_value *max_intf;
3052 if (max_intf->sv_hdr.ioc_len != sizeof(*max_intf))
3054 max_intf->sv_value = lnet_max_interfaces;
3058 case IOC_LIBCFS_SET_DISCOVERY: {
3059 struct lnet_ioctl_set_value *discovery;
3061 if (discovery->sv_hdr.ioc_len != sizeof(*discovery) ||
3062 discovery->sv_value > 1)
3064 mutex_lock(&the_lnet.ln_api_mutex);
3065 lnet_peer_discovery_enabled = discovery->sv_value;
3066 mutex_unlock(&the_lnet.ln_api_mutex);
3070 case IOC_LIBCFS_GET_DISCOVERY: {
3071 struct lnet_ioctl_set_value *discovery;
3073 if (discovery->sv_hdr.ioc_len != sizeof(*discovery))
3075 discovery->sv_value = lnet_peer_discovery_enabled;
3079 case IOC_LIBCFS_GET_BUF: {
3080 struct lnet_ioctl_pool_cfg *pool_cfg;
3081 size_t total = sizeof(*config) + sizeof(*pool_cfg);
3085 if (config->cfg_hdr.ioc_len < total)
3088 pool_cfg = (struct lnet_ioctl_pool_cfg *)config->cfg_bulk;
3090 mutex_lock(&the_lnet.ln_api_mutex);
3091 rc = lnet_get_rtr_pool_cfg(config->cfg_count, pool_cfg);
3092 mutex_unlock(&the_lnet.ln_api_mutex);
3096 case IOC_LIBCFS_ADD_PEER_NI: {
3097 struct lnet_ioctl_peer_cfg *cfg = arg;
3099 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3102 mutex_lock(&the_lnet.ln_api_mutex);
3103 rc = lnet_add_peer_ni(cfg->prcfg_prim_nid,
3106 mutex_unlock(&the_lnet.ln_api_mutex);
3110 case IOC_LIBCFS_DEL_PEER_NI: {
3111 struct lnet_ioctl_peer_cfg *cfg = arg;
3113 if (cfg->prcfg_hdr.ioc_len < sizeof(*cfg))
3116 mutex_lock(&the_lnet.ln_api_mutex);
3117 rc = lnet_del_peer_ni(cfg->prcfg_prim_nid,
3118 cfg->prcfg_cfg_nid);
3119 mutex_unlock(&the_lnet.ln_api_mutex);
3123 case IOC_LIBCFS_GET_PEER_INFO: {
3124 struct lnet_ioctl_peer *peer_info = arg;
3126 if (peer_info->pr_hdr.ioc_len < sizeof(*peer_info))
3129 mutex_lock(&the_lnet.ln_api_mutex);
3130 rc = lnet_get_peer_ni_info(
3131 peer_info->pr_count,
3133 peer_info->pr_lnd_u.pr_peer_credits.cr_aliveness,
3134 &peer_info->pr_lnd_u.pr_peer_credits.cr_ncpt,
3135 &peer_info->pr_lnd_u.pr_peer_credits.cr_refcount,
3136 &peer_info->pr_lnd_u.pr_peer_credits.cr_ni_peer_tx_credits,
3137 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_credits,
3138 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_rtr_credits,
3139 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_min_tx_credits,
3140 &peer_info->pr_lnd_u.pr_peer_credits.cr_peer_tx_qnob);
3141 mutex_unlock(&the_lnet.ln_api_mutex);
3145 case IOC_LIBCFS_GET_PEER_NI: {
3146 struct lnet_ioctl_peer_cfg *cfg = arg;
3147 struct lnet_peer_ni_credit_info *lpni_cri;
3148 struct lnet_ioctl_element_stats *lpni_stats;
3149 size_t total = sizeof(*cfg) + sizeof(*lpni_cri) +
3150 sizeof(*lpni_stats);
3152 if (cfg->prcfg_hdr.ioc_len < total)
3155 lpni_cri = (struct lnet_peer_ni_credit_info*) cfg->prcfg_bulk;
3156 lpni_stats = (struct lnet_ioctl_element_stats *)
3157 (cfg->prcfg_bulk + sizeof(*lpni_cri));
3159 mutex_lock(&the_lnet.ln_api_mutex);
3160 rc = lnet_get_peer_info(cfg->prcfg_idx, &cfg->prcfg_prim_nid,
3161 &cfg->prcfg_cfg_nid, &cfg->prcfg_mr,
3162 lpni_cri, lpni_stats);
3163 mutex_unlock(&the_lnet.ln_api_mutex);
3167 case IOC_LIBCFS_NOTIFY_ROUTER: {
3168 unsigned long jiffies_passed;
3170 jiffies_passed = ktime_get_real_seconds() - data->ioc_u64[0];
3171 jiffies_passed = cfs_time_seconds(jiffies_passed);
3173 return lnet_notify(NULL, data->ioc_nid, data->ioc_flags,
3174 jiffies - jiffies_passed);
3177 case IOC_LIBCFS_LNET_DIST:
3178 rc = LNetDist(data->ioc_nid, &data->ioc_nid, &data->ioc_u32[1]);
3179 if (rc < 0 && rc != -EHOSTUNREACH)
3182 data->ioc_u32[0] = rc;
3185 case IOC_LIBCFS_TESTPROTOCOMPAT:
3186 lnet_net_lock(LNET_LOCK_EX);
3187 the_lnet.ln_testprotocompat = data->ioc_flags;
3188 lnet_net_unlock(LNET_LOCK_EX);
3191 case IOC_LIBCFS_LNET_FAULT:
3192 return lnet_fault_ctl(data->ioc_flags, data);
3194 case IOC_LIBCFS_PING: {
3195 signed long timeout;
3197 id.nid = data->ioc_nid;
3198 id.pid = data->ioc_u32[0];
3200 /* Don't block longer than 2 minutes */
3201 if (data->ioc_u32[1] > 120 * MSEC_PER_SEC)
3204 /* If timestamp is negative then disable timeout */
3205 if ((s32)data->ioc_u32[1] < 0)
3206 timeout = MAX_SCHEDULE_TIMEOUT;
3208 timeout = msecs_to_jiffies(data->ioc_u32[1]);
3210 rc = lnet_ping(id, timeout, data->ioc_pbuf1,
3211 data->ioc_plen1 / sizeof(lnet_process_id_t));
3214 data->ioc_count = rc;
3219 ni = lnet_net2ni_addref(data->ioc_net);
3223 if (ni->ni_net->net_lnd->lnd_ctl == NULL)
3226 rc = ni->ni_net->net_lnd->lnd_ctl(ni, cmd, arg);
3233 EXPORT_SYMBOL(LNetCtl);
3235 void LNetDebugPeer(lnet_process_id_t id)
3237 lnet_debug_peer(id.nid);
3239 EXPORT_SYMBOL(LNetDebugPeer);
3242 * Retrieve the lnet_process_id_t ID of LNet interface at \a index. Note that
3243 * all interfaces share a same PID, as requested by LNetNIInit().
3245 * \param index Index of the interface to look up.
3246 * \param id On successful return, this location will hold the
3247 * lnet_process_id_t ID of the interface.
3249 * \retval 0 If an interface exists at \a index.
3250 * \retval -ENOENT If no interface has been found.
3253 LNetGetId(unsigned int index, lnet_process_id_t *id)
3256 struct lnet_net *net;
3260 LASSERT(the_lnet.ln_refcount > 0);
3262 cpt = lnet_net_lock_current();
3264 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
3265 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
3269 id->nid = ni->ni_nid;
3270 id->pid = the_lnet.ln_pid;
3276 lnet_net_unlock(cpt);
3279 EXPORT_SYMBOL(LNetGetId);
3282 * Print a string representation of handle \a h into buffer \a str of
3286 LNetSnprintHandle(char *str, int len, lnet_handle_any_t h)
3288 snprintf(str, len, "%#llx", h.cookie);
3290 EXPORT_SYMBOL(LNetSnprintHandle);
3292 static int lnet_ping(lnet_process_id_t id, signed long timeout,
3293 lnet_process_id_t __user *ids, int n_ids)
3295 lnet_handle_eq_t eqh;
3296 lnet_handle_md_t mdh;
3298 lnet_md_t md = { NULL };
3302 const signed long a_long_time = msecs_to_jiffies(60 * MSEC_PER_SEC);
3303 struct lnet_ping_buffer *pbuf;
3304 lnet_process_id_t tmpid;
3311 /* n_ids limit is arbitrary */
3312 if (n_ids <= 0 || n_ids > lnet_max_interfaces || id.nid == LNET_NID_ANY)
3315 if (id.pid == LNET_PID_ANY)
3316 id.pid = LNET_PID_LUSTRE;
3318 pbuf = lnet_ping_buffer_alloc(n_ids, GFP_NOFS);
3322 /* NB 2 events max (including any unlink event) */
3323 rc = LNetEQAlloc(2, LNET_EQ_HANDLER_NONE, &eqh);
3325 CERROR("Can't allocate EQ: %d\n", rc);
3329 /* initialize md content */
3330 md.start = &pbuf->pb_info;
3331 md.length = LNET_PING_INFO_SIZE(n_ids);
3332 md.threshold = 2; /*GET/REPLY*/
3334 md.options = LNET_MD_TRUNCATE;
3338 rc = LNetMDBind(md, LNET_UNLINK, &mdh);
3340 CERROR("Can't bind MD: %d\n", rc);
3344 rc = LNetGet(LNET_NID_ANY, mdh, id,
3345 LNET_RESERVED_PORTAL,
3346 LNET_PROTO_PING_MATCHBITS, 0);
3349 /* Don't CERROR; this could be deliberate! */
3351 rc2 = LNetMDUnlink(mdh);
3354 /* NB must wait for the UNLINK event below... */
3356 timeout = a_long_time;
3360 /* MUST block for unlink to complete */
3362 blocked = cfs_block_allsigs();
3364 rc2 = LNetEQPoll(&eqh, 1, timeout, &event, &which);
3367 cfs_restore_sigs(blocked);
3369 CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
3370 (rc2 <= 0) ? -1 : event.type,
3371 (rc2 <= 0) ? -1 : event.status,
3372 (rc2 > 0 && event.unlinked) ? " unlinked" : "");
3374 LASSERT(rc2 != -EOVERFLOW); /* can't miss anything */
3376 if (rc2 <= 0 || event.status != 0) {
3377 /* timeout or error */
3378 if (!replied && rc == 0)
3379 rc = (rc2 < 0) ? rc2 :
3380 (rc2 == 0) ? -ETIMEDOUT :
3384 /* Ensure completion in finite time... */
3386 /* No assertion (racing with network) */
3388 timeout = a_long_time;
3389 } else if (rc2 == 0) {
3390 /* timed out waiting for unlink */
3391 CWARN("ping %s: late network completion\n",
3394 } else if (event.type == LNET_EVENT_REPLY) {
3399 } while (rc2 <= 0 || !event.unlinked);
3403 CWARN("%s: Unexpected rc >= 0 but no reply!\n",
3410 LASSERT(nob >= 0 && nob <= LNET_PING_INFO_SIZE(n_ids));
3412 rc = -EPROTO; /* if I can't parse... */
3415 /* can't check magic/version */
3416 CERROR("%s: ping info too short %d\n",
3417 libcfs_id2str(id), nob);
3421 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC)) {
3422 lnet_swap_pinginfo(pbuf);
3423 } else if (pbuf->pb_info.pi_magic != LNET_PROTO_PING_MAGIC) {
3424 CERROR("%s: Unexpected magic %08x\n",
3425 libcfs_id2str(id), pbuf->pb_info.pi_magic);
3429 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_NI_STATUS) == 0) {
3430 CERROR("%s: ping w/o NI status: 0x%x\n",
3431 libcfs_id2str(id), pbuf->pb_info.pi_features);
3435 if (nob < LNET_PING_INFO_SIZE(0)) {
3436 CERROR("%s: Short reply %d(%d min)\n", libcfs_id2str(id),
3437 nob, (int)LNET_PING_INFO_SIZE(0));
3441 if (pbuf->pb_info.pi_nnis < n_ids)
3442 n_ids = pbuf->pb_info.pi_nnis;
3444 if (nob < LNET_PING_INFO_SIZE(n_ids)) {
3445 CERROR("%s: Short reply %d(%d expected)\n", libcfs_id2str(id),
3446 nob, (int)LNET_PING_INFO_SIZE(n_ids));;
3450 rc = -EFAULT; /* If I SEGV... */
3452 memset(&tmpid, 0, sizeof(tmpid));
3453 for (i = 0; i < n_ids; i++) {
3454 tmpid.pid = pbuf->pb_info.pi_pid;
3455 tmpid.nid = pbuf->pb_info.pi_ni[i].ns_nid;
3456 if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
3459 rc = pbuf->pb_info.pi_nnis;
3462 rc2 = LNetEQFree(eqh);
3464 CERROR("rc2 %d\n", rc2);
3468 lnet_ping_buffer_decref(pbuf);