4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <lnet/lib-lnet.h>
38 #include <lnet/lib-dlc.h>
41 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
43 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
44 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
45 lnet_peer_ni_decref_locked(lpni);
50 lnet_peer_tables_destroy(void)
52 struct lnet_peer_table *ptable;
53 struct list_head *hash;
57 if (!the_lnet.ln_peer_tables)
60 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
61 hash = ptable->pt_hash;
62 if (!hash) /* not intialized */
65 ptable->pt_hash = NULL;
66 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
67 LASSERT(list_empty(&hash[j]));
69 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
72 cfs_percpt_free(the_lnet.ln_peer_tables);
73 the_lnet.ln_peer_tables = NULL;
77 lnet_peer_tables_create(void)
79 struct lnet_peer_table *ptable;
80 struct list_head *hash;
84 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
86 if (the_lnet.ln_peer_tables == NULL) {
87 CERROR("Failed to allocate cpu-partition peer tables\n");
91 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
92 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
93 LNET_PEER_HASH_SIZE * sizeof(*hash));
95 CERROR("Failed to create peer hash table\n");
96 lnet_peer_tables_destroy();
100 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
101 INIT_LIST_HEAD(&hash[j]);
102 ptable->pt_hash = hash; /* sign of initialization */
108 void lnet_peer_uninit()
111 struct lnet_peer_ni *lpni, *tmp;
112 struct lnet_peer_table *ptable = NULL;
114 /* remove all peer_nis from the remote peer and he hash list */
115 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
116 lpni_on_remote_peer_ni_list) {
117 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
118 lnet_peer_ni_decref_locked(lpni);
120 cpt = lnet_cpt_of_nid_locked(lpni->lpni_nid, NULL);
121 ptable = the_lnet.ln_peer_tables[cpt];
122 ptable->pt_zombies++;
124 list_del_init(&lpni->lpni_hashlist);
125 lnet_peer_ni_decref_locked(lpni);
128 lnet_peer_tables_destroy();
132 lnet_peer_table_cleanup_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable)
135 struct lnet_peer_ni *lp;
136 struct lnet_peer_ni *tmp;
138 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
139 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
141 if (ni != NULL && ni->ni_net != lp->lpni_net)
143 list_del_init(&lp->lpni_hashlist);
144 /* Lose hash table's ref */
145 ptable->pt_zombies++;
146 lnet_peer_ni_decref_locked(lp);
152 lnet_peer_table_finalize_wait_locked(struct lnet_peer_table *ptable,
157 for (i = 3; ptable->pt_zombies != 0; i++) {
158 lnet_net_unlock(cpt_locked);
162 "Waiting for %d zombies on peer table\n",
165 set_current_state(TASK_UNINTERRUPTIBLE);
166 schedule_timeout(cfs_time_seconds(1) >> 1);
167 lnet_net_lock(cpt_locked);
172 lnet_peer_table_del_rtrs_locked(lnet_ni_t *ni, struct lnet_peer_table *ptable,
175 struct lnet_peer_ni *lp;
176 struct lnet_peer_ni *tmp;
180 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
181 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
183 if (ni->ni_net != lp->lpni_net)
186 if (lp->lpni_rtr_refcount == 0)
189 lpni_nid = lp->lpni_nid;
191 lnet_net_unlock(cpt_locked);
192 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lpni_nid);
193 lnet_net_lock(cpt_locked);
199 lnet_peer_tables_cleanup(lnet_ni_t *ni)
202 struct lnet_peer_table *ptable;
204 LASSERT(the_lnet.ln_shutdown || ni != NULL);
205 /* If just deleting the peers for a NI, get rid of any routes these
206 * peers are gateways for. */
207 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
208 lnet_net_lock(LNET_LOCK_EX);
209 lnet_peer_table_del_rtrs_locked(ni, ptable, i);
210 lnet_net_unlock(LNET_LOCK_EX);
213 /* Start the cleanup process */
214 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
215 lnet_net_lock(LNET_LOCK_EX);
216 lnet_peer_table_cleanup_locked(ni, ptable);
217 lnet_net_unlock(LNET_LOCK_EX);
220 /* Wait until all peers have been destroyed. */
221 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
222 lnet_net_lock(LNET_LOCK_EX);
223 lnet_peer_table_finalize_wait_locked(ptable, i);
224 lnet_net_unlock(LNET_LOCK_EX);
228 static struct lnet_peer_ni *
229 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
231 struct list_head *peers;
232 struct lnet_peer_ni *lp;
234 LASSERT(!the_lnet.ln_shutdown);
236 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
237 list_for_each_entry(lp, peers, lpni_hashlist) {
238 if (lp->lpni_nid == nid) {
239 lnet_peer_ni_addref_locked(lp);
247 struct lnet_peer_ni *
248 lnet_find_peer_ni_locked(lnet_nid_t nid)
250 struct lnet_peer_ni *lpni;
251 struct lnet_peer_table *ptable;
254 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
256 ptable = the_lnet.ln_peer_tables[cpt];
257 lpni = lnet_get_peer_ni_locked(ptable, nid);
263 lnet_find_or_create_peer_locked(lnet_nid_t dst_nid, int cpt, struct lnet_peer **peer)
265 struct lnet_peer_ni *lpni;
267 lpni = lnet_find_peer_ni_locked(dst_nid);
270 rc = lnet_nid2peerni_locked(&lpni, dst_nid, cpt);
275 *peer = lpni->lpni_peer_net->lpn_peer;
276 lnet_peer_ni_decref_locked(lpni);
281 struct lnet_peer_ni *
282 lnet_get_peer_ni_idx_locked(int idx, struct lnet_peer_net **lpn,
283 struct lnet_peer **lp)
285 struct lnet_peer_ni *lpni;
287 list_for_each_entry((*lp), &the_lnet.ln_peers, lp_on_lnet_peer_list) {
288 list_for_each_entry((*lpn), &((*lp)->lp_peer_nets), lpn_on_peer_list) {
289 list_for_each_entry(lpni, &((*lpn)->lpn_peer_nis),
290 lpni_on_peer_net_list)
299 struct lnet_peer_ni *
300 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
301 struct lnet_peer_net *peer_net,
302 struct lnet_peer_ni *prev)
304 struct lnet_peer_ni *lpni;
305 struct lnet_peer_net *net = peer_net;
309 net = list_entry(peer->lp_peer_nets.next,
310 struct lnet_peer_net,
312 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
313 lpni_on_peer_net_list);
318 if (prev->lpni_on_peer_net_list.next ==
319 &prev->lpni_peer_net->lpn_peer_nis) {
321 * if you reached the end of the peer ni list and the peer
322 * net is specified then there are no more peer nis in that
329 * we reached the end of this net ni list. move to the
332 if (prev->lpni_peer_net->lpn_on_peer_list.next ==
334 /* no more nets and no more NIs. */
337 /* get the next net */
338 net = list_entry(prev->lpni_peer_net->lpn_on_peer_list.next,
339 struct lnet_peer_net,
341 /* get the ni on it */
342 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
343 lpni_on_peer_net_list);
348 /* there are more nis left */
349 lpni = list_entry(prev->lpni_on_peer_net_list.next,
350 struct lnet_peer_ni, lpni_on_peer_net_list);
356 lnet_peer_is_ni_pref_locked(struct lnet_peer_ni *lpni, struct lnet_ni *ni)
360 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
361 if (lpni->lpni_pref_nids[i] == ni->ni_nid)
368 lnet_try_destroy_peer_hierarchy_locked(struct lnet_peer_ni *lpni)
370 struct lnet_peer_net *peer_net;
371 struct lnet_peer *peer;
373 /* TODO: could the below situation happen? accessing an already
375 if (lpni->lpni_peer_net == NULL ||
376 lpni->lpni_peer_net->lpn_peer == NULL)
379 peer_net = lpni->lpni_peer_net;
380 peer = lpni->lpni_peer_net->lpn_peer;
382 list_del_init(&lpni->lpni_on_peer_net_list);
383 lpni->lpni_peer_net = NULL;
385 /* if peer_net is empty, then remove it from the peer */
386 if (list_empty(&peer_net->lpn_peer_nis)) {
387 list_del_init(&peer_net->lpn_on_peer_list);
388 peer_net->lpn_peer = NULL;
389 LIBCFS_FREE(peer_net, sizeof(*peer_net));
391 /* if the peer is empty then remove it from the
392 * the_lnet.ln_peers */
393 if (list_empty(&peer->lp_peer_nets)) {
394 list_del_init(&peer->lp_on_lnet_peer_list);
395 LIBCFS_FREE(peer, sizeof(*peer));
401 lnet_build_peer_hierarchy(struct lnet_peer_ni *lpni)
403 struct lnet_peer *peer;
404 struct lnet_peer_net *peer_net;
405 __u32 lpni_net = LNET_NIDNET(lpni->lpni_nid);
410 LIBCFS_ALLOC(peer, sizeof(*peer));
414 LIBCFS_ALLOC(peer_net, sizeof(*peer_net));
415 if (peer_net == NULL) {
416 LIBCFS_FREE(peer, sizeof(*peer));
420 INIT_LIST_HEAD(&peer->lp_on_lnet_peer_list);
421 INIT_LIST_HEAD(&peer->lp_peer_nets);
422 INIT_LIST_HEAD(&peer_net->lpn_on_peer_list);
423 INIT_LIST_HEAD(&peer_net->lpn_peer_nis);
425 /* build the hierarchy */
426 peer_net->lpn_net_id = lpni_net;
427 peer_net->lpn_peer = peer;
428 lpni->lpni_peer_net = peer_net;
429 peer->lp_primary_nid = lpni->lpni_nid;
430 list_add_tail(&peer_net->lpn_on_peer_list, &peer->lp_peer_nets);
431 list_add_tail(&lpni->lpni_on_peer_net_list, &peer_net->lpn_peer_nis);
432 list_add_tail(&peer->lp_on_lnet_peer_list, &the_lnet.ln_peers);
437 struct lnet_peer_net *
438 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
440 struct lnet_peer_net *peer_net;
441 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_on_peer_list) {
442 if (peer_net->lpn_net_id == net_id)
449 * given the key nid find the peer to add the new peer NID to. If the key
450 * nid is NULL, then create a new peer, but first make sure that the NID
454 lnet_add_peer_ni_to_peer(lnet_nid_t key_nid, lnet_nid_t nid)
456 struct lnet_peer_ni *lpni, *lpni2;
457 struct lnet_peer *peer;
458 struct lnet_peer_net *peer_net, *pn;
460 struct lnet_peer_table *ptable = NULL;
461 __u32 net_id = LNET_NIDNET(nid);
463 if (nid == LNET_NID_ANY)
466 /* check that nid is unique */
467 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
469 lpni = lnet_find_peer_ni_locked(nid);
471 lnet_peer_ni_decref_locked(lpni);
472 lnet_net_unlock(cpt);
475 lnet_net_unlock(cpt);
477 if (key_nid != LNET_NID_ANY) {
478 cpt2 = lnet_nid_cpt_hash(key_nid, LNET_CPT_NUMBER);
480 lpni = lnet_find_peer_ni_locked(key_nid);
482 lnet_net_unlock(cpt2);
483 /* key_nid refers to a non-existant peer_ni.*/
486 peer = lpni->lpni_peer_net->lpn_peer;
487 peer->lp_multi_rail = true;
488 lnet_peer_ni_decref_locked(lpni);
489 lnet_net_unlock(cpt2);
491 lnet_net_lock(LNET_LOCK_EX);
492 rc = lnet_nid2peerni_locked(&lpni, nid, LNET_LOCK_EX);
494 lpni->lpni_peer_net->lpn_peer->lp_multi_rail = true;
495 lnet_peer_ni_decref_locked(lpni);
497 lnet_net_unlock(LNET_LOCK_EX);
503 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
507 INIT_LIST_HEAD(&lpni->lpni_txq);
508 INIT_LIST_HEAD(&lpni->lpni_rtrq);
509 INIT_LIST_HEAD(&lpni->lpni_routes);
510 INIT_LIST_HEAD(&lpni->lpni_hashlist);
511 INIT_LIST_HEAD(&lpni->lpni_on_peer_net_list);
512 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
514 lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */
515 lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */
516 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
517 lpni->lpni_nid = nid;
518 lpni->lpni_cpt = cpt;
519 lnet_set_peer_ni_health_locked(lpni, true);
521 /* allocate here in case we need to add a new peer_net */
523 LIBCFS_ALLOC(peer_net, sizeof(*peer_net));
524 if (peer_net == NULL) {
527 LIBCFS_FREE(lpni, sizeof(*lpni));
531 lnet_net_lock(LNET_LOCK_EX);
533 ptable = the_lnet.ln_peer_tables[cpt];
536 lpni2 = lnet_find_peer_ni_locked(nid);
538 lnet_peer_ni_decref_locked(lpni2);
539 /* sanity check that lpni2's peer is what we expect */
540 if (lpni2->lpni_peer_net->lpn_peer != peer)
546 /* another thread has already added it */
547 lnet_net_unlock(LNET_LOCK_EX);
548 LIBCFS_FREE(peer_net, sizeof(*peer_net));
552 lpni->lpni_net = lnet_get_net_locked(LNET_NIDNET(lpni->lpni_nid));
553 if (lpni->lpni_net != NULL) {
554 lpni->lpni_txcredits =
555 lpni->lpni_mintxcredits = lpni->lpni_net->net_peertxcredits;
556 lpni->lpni_rtrcredits =
557 lpni->lpni_minrtrcredits = lnet_peer_buffer_credits(lpni->lpni_net);
560 * if you're adding a peer which is not on a local network
561 * then we can't assign any of the credits. It won't be
562 * picked for sending anyway. Eventually a network can be
563 * added, in this case we need to revisit this peer and
564 * update its credits.
567 /* increment refcount for remote peer list */
568 atomic_inc(&lpni->lpni_refcount);
569 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
570 &the_lnet.ln_remote_peer_ni_list);
573 /* increment refcount for peer on hash list */
574 atomic_inc(&lpni->lpni_refcount);
576 list_add_tail(&lpni->lpni_hashlist,
577 &ptable->pt_hash[lnet_nid2peerhash(nid)]);
578 ptable->pt_version++;
580 /* add the lpni to a net */
581 list_for_each_entry(pn, &peer->lp_peer_nets, lpn_on_peer_list) {
582 if (pn->lpn_net_id == net_id) {
583 list_add_tail(&lpni->lpni_on_peer_net_list,
585 lpni->lpni_peer_net = pn;
586 lnet_net_unlock(LNET_LOCK_EX);
587 LIBCFS_FREE(peer_net, sizeof(*peer_net));
592 INIT_LIST_HEAD(&peer_net->lpn_on_peer_list);
593 INIT_LIST_HEAD(&peer_net->lpn_peer_nis);
595 /* build the hierarchy */
596 peer_net->lpn_net_id = net_id;
597 peer_net->lpn_peer = peer;
598 lpni->lpni_peer_net = peer_net;
599 list_add_tail(&lpni->lpni_on_peer_net_list, &peer_net->lpn_peer_nis);
600 list_add_tail(&peer_net->lpn_on_peer_list, &peer->lp_peer_nets);
602 lnet_net_unlock(LNET_LOCK_EX);
607 lnet_del_peer_ni_from_peer(lnet_nid_t key_nid, lnet_nid_t nid)
610 lnet_nid_t local_nid;
611 struct lnet_peer *peer;
612 struct lnet_peer_ni *lpni, *lpni2;
613 struct lnet_peer_table *ptable = NULL;
615 if (key_nid == LNET_NID_ANY)
618 local_nid = (nid != LNET_NID_ANY) ? nid : key_nid;
619 cpt = lnet_nid_cpt_hash(local_nid, LNET_CPT_NUMBER);
620 lnet_net_lock(LNET_LOCK_EX);
622 lpni = lnet_find_peer_ni_locked(local_nid);
624 lnet_net_unlock(cpt);
627 lnet_peer_ni_decref_locked(lpni);
629 peer = lpni->lpni_peer_net->lpn_peer;
630 LASSERT(peer != NULL);
632 if (peer->lp_primary_nid == lpni->lpni_nid) {
634 * deleting the primary ni is equivalent to deleting the
638 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
639 while (lpni != NULL) {
640 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
641 cpt = lnet_nid_cpt_hash(lpni->lpni_nid,
643 lnet_peer_remove_from_remote_list(lpni);
644 ptable = the_lnet.ln_peer_tables[cpt];
645 ptable->pt_zombies++;
646 list_del_init(&lpni->lpni_hashlist);
647 lnet_peer_ni_decref_locked(lpni);
650 lnet_net_unlock(LNET_LOCK_EX);
655 lnet_peer_remove_from_remote_list(lpni);
656 cpt = lnet_nid_cpt_hash(lpni->lpni_nid, LNET_CPT_NUMBER);
657 ptable = the_lnet.ln_peer_tables[cpt];
658 ptable->pt_zombies++;
659 list_del_init(&lpni->lpni_hashlist);
660 lnet_peer_ni_decref_locked(lpni);
661 lnet_net_unlock(LNET_LOCK_EX);
667 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
669 struct lnet_peer_table *ptable;
671 LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
672 LASSERT(lpni->lpni_rtr_refcount == 0);
673 LASSERT(list_empty(&lpni->lpni_txq));
674 LASSERT(list_empty(&lpni->lpni_hashlist));
675 LASSERT(lpni->lpni_txqnob == 0);
676 LASSERT(lpni->lpni_peer_net != NULL);
677 LASSERT(lpni->lpni_peer_net->lpn_peer != NULL);
679 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
680 LASSERT(ptable->pt_number > 0);
683 lpni->lpni_net = NULL;
685 lnet_try_destroy_peer_hierarchy_locked(lpni);
687 LIBCFS_FREE(lpni, sizeof(*lpni));
689 LASSERT(ptable->pt_zombies > 0);
690 ptable->pt_zombies--;
694 lnet_nid2peerni_locked(struct lnet_peer_ni **lpnip, lnet_nid_t nid, int cpt)
696 struct lnet_peer_table *ptable;
697 struct lnet_peer_ni *lpni = NULL;
698 struct lnet_peer_ni *lpni2;
703 if (the_lnet.ln_shutdown) /* it's shutting down */
707 * calculate cpt2 with the standard hash function
708 * This cpt2 becomes the slot where we'll find or create the peer.
710 cpt2 = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
713 * Any changes to the peer tables happen under exclusive write
714 * lock. Any reads to the peer tables can be done via a standard
717 if (cpt != LNET_LOCK_EX) {
718 lnet_net_unlock(cpt);
719 lnet_net_lock(LNET_LOCK_EX);
722 ptable = the_lnet.ln_peer_tables[cpt2];
723 lpni = lnet_get_peer_ni_locked(ptable, nid);
726 if (cpt != LNET_LOCK_EX) {
727 lnet_net_unlock(LNET_LOCK_EX);
734 * take extra refcount in case another thread has shutdown LNet
735 * and destroyed locks and peer-table before I finish the allocation
738 lnet_net_unlock(LNET_LOCK_EX);
740 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt2, sizeof(*lpni));
748 INIT_LIST_HEAD(&lpni->lpni_txq);
749 INIT_LIST_HEAD(&lpni->lpni_rtrq);
750 INIT_LIST_HEAD(&lpni->lpni_routes);
751 INIT_LIST_HEAD(&lpni->lpni_hashlist);
752 INIT_LIST_HEAD(&lpni->lpni_on_peer_net_list);
753 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
755 lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */
756 lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */
757 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
758 lpni->lpni_nid = nid;
759 lpni->lpni_cpt = cpt2;
760 atomic_set(&lpni->lpni_refcount, 2); /* 1 for caller; 1 for hash */
762 rc = lnet_build_peer_hierarchy(lpni);
766 lnet_net_lock(LNET_LOCK_EX);
768 if (the_lnet.ln_shutdown) {
773 lpni2 = lnet_get_peer_ni_locked(ptable, nid);
779 lpni->lpni_net = lnet_get_net_locked(LNET_NIDNET(lpni->lpni_nid));
780 if (lpni->lpni_net) {
781 lpni->lpni_txcredits =
782 lpni->lpni_mintxcredits =
783 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
784 lpni->lpni_rtrcredits =
785 lpni->lpni_minrtrcredits =
786 lnet_peer_buffer_credits(lpni->lpni_net);
789 * if you're adding a peer which is not on a local network
790 * then we can't assign any of the credits. It won't be
791 * picked for sending anyway. Eventually a network can be
792 * added, in this case we need to revisit this peer and
793 * update its credits.
796 CDEBUG(D_NET, "peer_ni %s is not directly connected\n",
797 libcfs_nid2str(nid));
798 /* increment refcount for remote peer list */
799 atomic_inc(&lpni->lpni_refcount);
800 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
801 &the_lnet.ln_remote_peer_ni_list);
804 lnet_set_peer_ni_health_locked(lpni, true);
806 list_add_tail(&lpni->lpni_hashlist,
807 &ptable->pt_hash[lnet_nid2peerhash(nid)]);
808 ptable->pt_version++;
811 if (cpt != LNET_LOCK_EX) {
812 lnet_net_unlock(LNET_LOCK_EX);
819 lnet_try_destroy_peer_hierarchy_locked(lpni);
820 LIBCFS_FREE(lpni, sizeof(*lpni));
823 if (cpt != LNET_LOCK_EX) {
824 lnet_net_unlock(LNET_LOCK_EX);
831 lnet_debug_peer(lnet_nid_t nid)
833 char *aliveness = "NA";
834 struct lnet_peer_ni *lp;
838 cpt = lnet_cpt_of_nid(nid, NULL);
841 rc = lnet_nid2peerni_locked(&lp, nid, cpt);
843 lnet_net_unlock(cpt);
844 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
848 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
849 aliveness = lp->lpni_alive ? "up" : "down";
851 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
852 libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
853 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
854 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
855 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
857 lnet_peer_ni_decref_locked(lp);
859 lnet_net_unlock(cpt);
862 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
863 char aliveness[LNET_MAX_STR_LEN],
864 __u32 *cpt_iter, __u32 *refcount,
865 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
866 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
869 struct lnet_peer_table *peer_table;
870 struct lnet_peer_ni *lp;
875 /* get the number of CPTs */
876 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
878 /* if the cpt number to be examined is >= the number of cpts in
879 * the system then indicate that there are no more cpts to examin
881 if (*cpt_iter >= lncpt)
884 /* get the current table */
885 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
886 /* if the ptable is NULL then there are no more cpts to examine */
887 if (peer_table == NULL)
890 lnet_net_lock(*cpt_iter);
892 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
893 struct list_head *peers = &peer_table->pt_hash[j];
895 list_for_each_entry(lp, peers, lpni_hashlist) {
896 if (peer_index-- > 0)
899 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
900 if (lnet_isrouter(lp) ||
901 lnet_peer_aliveness_enabled(lp))
902 snprintf(aliveness, LNET_MAX_STR_LEN,
903 lp->lpni_alive ? "up" : "down");
906 *refcount = atomic_read(&lp->lpni_refcount);
907 *ni_peer_tx_credits =
908 lp->lpni_net->net_tunables.lct_peer_tx_credits;
909 *peer_tx_credits = lp->lpni_txcredits;
910 *peer_rtr_credits = lp->lpni_rtrcredits;
911 *peer_min_rtr_credits = lp->lpni_mintxcredits;
912 *peer_tx_qnob = lp->lpni_txqnob;
918 lnet_net_unlock(*cpt_iter);
922 return found ? 0 : -ENOENT;
925 int lnet_get_peer_info(__u32 idx, lnet_nid_t *primary_nid, lnet_nid_t *nid,
926 struct lnet_peer_ni_credit_info *peer_ni_info)
928 struct lnet_peer_ni *lpni = NULL;
929 struct lnet_peer_net *lpn = NULL;
930 struct lnet_peer *lp = NULL;
932 lpni = lnet_get_peer_ni_idx_locked(idx, &lpn, &lp);
937 *primary_nid = lp->lp_primary_nid;
938 *nid = lpni->lpni_nid;
939 snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
940 if (lnet_isrouter(lpni) ||
941 lnet_peer_aliveness_enabled(lpni))
942 snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN,
943 lpni->lpni_alive ? "up" : "down");
945 peer_ni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
946 peer_ni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
947 lpni->lpni_net->net_peertxcredits : 0;
948 peer_ni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
949 peer_ni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
950 peer_ni_info->cr_peer_min_rtr_credits = lpni->lpni_mintxcredits;
951 peer_ni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;