4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <lnet/lib-lnet.h>
38 #include <lnet/lib-dlc.h>
41 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
43 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
44 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
45 lnet_peer_ni_decref_locked(lpni);
50 lnet_peer_net_added(struct lnet_net *net)
52 struct lnet_peer_ni *lpni, *tmp;
54 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
55 lpni_on_remote_peer_ni_list) {
57 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
59 lpni->lpni_txcredits =
60 lpni->lpni_mintxcredits =
61 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
62 lpni->lpni_rtrcredits =
63 lpni->lpni_minrtrcredits =
64 lnet_peer_buffer_credits(lpni->lpni_net);
66 lnet_peer_remove_from_remote_list(lpni);
72 lnet_peer_tables_destroy(void)
74 struct lnet_peer_table *ptable;
75 struct list_head *hash;
79 if (!the_lnet.ln_peer_tables)
82 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
83 hash = ptable->pt_hash;
84 if (!hash) /* not intialized */
87 LASSERT(list_empty(&ptable->pt_zombie_list));
89 ptable->pt_hash = NULL;
90 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
91 LASSERT(list_empty(&hash[j]));
93 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
96 cfs_percpt_free(the_lnet.ln_peer_tables);
97 the_lnet.ln_peer_tables = NULL;
101 lnet_peer_tables_create(void)
103 struct lnet_peer_table *ptable;
104 struct list_head *hash;
108 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
110 if (the_lnet.ln_peer_tables == NULL) {
111 CERROR("Failed to allocate cpu-partition peer tables\n");
115 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
116 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
117 LNET_PEER_HASH_SIZE * sizeof(*hash));
119 CERROR("Failed to create peer hash table\n");
120 lnet_peer_tables_destroy();
124 spin_lock_init(&ptable->pt_zombie_lock);
125 INIT_LIST_HEAD(&ptable->pt_zombie_list);
127 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
128 INIT_LIST_HEAD(&hash[j]);
129 ptable->pt_hash = hash; /* sign of initialization */
135 static struct lnet_peer_ni *
136 lnet_peer_ni_alloc(lnet_nid_t nid)
138 struct lnet_peer_ni *lpni;
139 struct lnet_net *net;
142 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
144 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
148 INIT_LIST_HEAD(&lpni->lpni_txq);
149 INIT_LIST_HEAD(&lpni->lpni_rtrq);
150 INIT_LIST_HEAD(&lpni->lpni_routes);
151 INIT_LIST_HEAD(&lpni->lpni_hashlist);
152 INIT_LIST_HEAD(&lpni->lpni_on_peer_net_list);
153 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
155 spin_lock_init(&lpni->lpni_lock);
157 lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */
158 lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */
159 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
160 lpni->lpni_nid = nid;
161 lpni->lpni_cpt = cpt;
162 lnet_set_peer_ni_health_locked(lpni, true);
164 net = lnet_get_net_locked(LNET_NIDNET(nid));
165 lpni->lpni_net = net;
167 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
168 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
169 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
170 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
173 * This peer_ni is not on a local network, so we
174 * cannot add the credits here. In case the net is
175 * added later, add the peer_ni to the remote peer ni
176 * list so it can be easily found and revisited.
178 /* FIXME: per-net implementation instead? */
179 atomic_inc(&lpni->lpni_refcount);
180 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
181 &the_lnet.ln_remote_peer_ni_list);
184 /* TODO: update flags */
189 static struct lnet_peer_net *
190 lnet_peer_net_alloc(__u32 net_id)
192 struct lnet_peer_net *lpn;
194 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
198 INIT_LIST_HEAD(&lpn->lpn_on_peer_list);
199 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
200 lpn->lpn_net_id = net_id;
205 static struct lnet_peer *
206 lnet_peer_alloc(lnet_nid_t nid)
208 struct lnet_peer *lp;
210 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
214 INIT_LIST_HEAD(&lp->lp_on_lnet_peer_list);
215 INIT_LIST_HEAD(&lp->lp_peer_nets);
216 lp->lp_primary_nid = nid;
218 /* TODO: update flags */
225 lnet_try_destroy_peer_hierarchy_locked(struct lnet_peer_ni *lpni)
227 struct lnet_peer_net *peer_net;
228 struct lnet_peer *peer;
230 /* TODO: could the below situation happen? accessing an already
232 if (lpni->lpni_peer_net == NULL ||
233 lpni->lpni_peer_net->lpn_peer == NULL)
236 peer_net = lpni->lpni_peer_net;
237 peer = lpni->lpni_peer_net->lpn_peer;
239 list_del_init(&lpni->lpni_on_peer_net_list);
240 lpni->lpni_peer_net = NULL;
242 /* if peer_net is empty, then remove it from the peer */
243 if (list_empty(&peer_net->lpn_peer_nis)) {
244 list_del_init(&peer_net->lpn_on_peer_list);
245 peer_net->lpn_peer = NULL;
246 LIBCFS_FREE(peer_net, sizeof(*peer_net));
248 /* if the peer is empty then remove it from the
249 * the_lnet.ln_peers */
250 if (list_empty(&peer->lp_peer_nets)) {
251 list_del_init(&peer->lp_on_lnet_peer_list);
252 LIBCFS_FREE(peer, sizeof(*peer));
257 /* called with lnet_net_lock LNET_LOCK_EX held */
259 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni)
261 struct lnet_peer_table *ptable = NULL;
263 lnet_peer_remove_from_remote_list(lpni);
265 /* remove peer ni from the hash list. */
266 list_del_init(&lpni->lpni_hashlist);
268 /* decrement the ref count on the peer table */
269 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
270 LASSERT(atomic_read(&ptable->pt_number) > 0);
271 atomic_dec(&ptable->pt_number);
274 * The peer_ni can no longer be found with a lookup. But there
275 * can be current users, so keep track of it on the zombie
276 * list until the reference count has gone to zero.
278 * The last reference may be lost in a place where the
279 * lnet_net_lock locks only a single cpt, and that cpt may not
280 * be lpni->lpni_cpt. So the zombie list of this peer_table
283 spin_lock(&ptable->pt_zombie_lock);
284 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
285 ptable->pt_zombies++;
286 spin_unlock(&ptable->pt_zombie_lock);
288 /* no need to keep this peer on the hierarchy anymore */
289 lnet_try_destroy_peer_hierarchy_locked(lpni);
291 /* decrement reference on peer */
292 lnet_peer_ni_decref_locked(lpni);
295 void lnet_peer_uninit()
297 struct lnet_peer_ni *lpni, *tmp;
299 lnet_net_lock(LNET_LOCK_EX);
301 /* remove all peer_nis from the remote peer and the hash list */
302 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
303 lpni_on_remote_peer_ni_list)
304 lnet_peer_ni_del_locked(lpni);
306 lnet_peer_tables_destroy();
308 lnet_net_unlock(LNET_LOCK_EX);
312 lnet_peer_del_locked(struct lnet_peer *peer)
314 struct lnet_peer_ni *lpni = NULL, *lpni2;
316 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
317 while (lpni != NULL) {
318 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
319 lnet_peer_ni_del_locked(lpni);
325 lnet_peer_table_cleanup_locked(struct lnet_net *net,
326 struct lnet_peer_table *ptable)
329 struct lnet_peer_ni *lpni;
330 struct lnet_peer_ni *tmp;
331 struct lnet_peer *peer;
333 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
334 list_for_each_entry_safe(lpni, tmp, &ptable->pt_hash[i],
336 if (net != NULL && net != lpni->lpni_net)
340 * check if by removing this peer ni we should be
341 * removing the entire peer.
343 peer = lpni->lpni_peer_net->lpn_peer;
345 if (peer->lp_primary_nid == lpni->lpni_nid)
346 lnet_peer_del_locked(peer);
348 lnet_peer_ni_del_locked(lpni);
354 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
358 spin_lock(&ptable->pt_zombie_lock);
359 while (ptable->pt_zombies) {
360 spin_unlock(&ptable->pt_zombie_lock);
364 "Waiting for %d zombies on peer table\n",
367 set_current_state(TASK_UNINTERRUPTIBLE);
368 schedule_timeout(cfs_time_seconds(1) >> 1);
369 spin_lock(&ptable->pt_zombie_lock);
371 spin_unlock(&ptable->pt_zombie_lock);
375 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
376 struct lnet_peer_table *ptable)
378 struct lnet_peer_ni *lp;
379 struct lnet_peer_ni *tmp;
383 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
384 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
386 if (net != lp->lpni_net)
389 if (lp->lpni_rtr_refcount == 0)
392 lpni_nid = lp->lpni_nid;
394 lnet_net_unlock(LNET_LOCK_EX);
395 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lpni_nid);
396 lnet_net_lock(LNET_LOCK_EX);
402 lnet_peer_tables_cleanup(struct lnet_net *net)
405 struct lnet_peer_table *ptable;
407 LASSERT(the_lnet.ln_shutdown || net != NULL);
408 /* If just deleting the peers for a NI, get rid of any routes these
409 * peers are gateways for. */
410 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
411 lnet_net_lock(LNET_LOCK_EX);
412 lnet_peer_table_del_rtrs_locked(net, ptable);
413 lnet_net_unlock(LNET_LOCK_EX);
416 /* Start the cleanup process */
417 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
418 lnet_net_lock(LNET_LOCK_EX);
419 lnet_peer_table_cleanup_locked(net, ptable);
420 lnet_net_unlock(LNET_LOCK_EX);
423 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
424 lnet_peer_ni_finalize_wait(ptable);
427 static struct lnet_peer_ni *
428 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
430 struct list_head *peers;
431 struct lnet_peer_ni *lp;
433 LASSERT(!the_lnet.ln_shutdown);
435 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
436 list_for_each_entry(lp, peers, lpni_hashlist) {
437 if (lp->lpni_nid == nid) {
438 lnet_peer_ni_addref_locked(lp);
446 struct lnet_peer_ni *
447 lnet_find_peer_ni_locked(lnet_nid_t nid)
449 struct lnet_peer_ni *lpni;
450 struct lnet_peer_table *ptable;
453 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
455 ptable = the_lnet.ln_peer_tables[cpt];
456 lpni = lnet_get_peer_ni_locked(ptable, nid);
462 lnet_find_or_create_peer_locked(lnet_nid_t dst_nid, int cpt)
464 struct lnet_peer_ni *lpni;
465 struct lnet_peer *lp;
467 lpni = lnet_find_peer_ni_locked(dst_nid);
469 lpni = lnet_nid2peerni_locked(dst_nid, cpt);
471 return ERR_CAST(lpni);
474 lp = lpni->lpni_peer_net->lpn_peer;
475 lnet_peer_ni_decref_locked(lpni);
480 struct lnet_peer_ni *
481 lnet_get_peer_ni_idx_locked(int idx, struct lnet_peer_net **lpn,
482 struct lnet_peer **lp)
484 struct lnet_peer_ni *lpni;
486 list_for_each_entry((*lp), &the_lnet.ln_peers, lp_on_lnet_peer_list) {
487 list_for_each_entry((*lpn), &((*lp)->lp_peer_nets), lpn_on_peer_list) {
488 list_for_each_entry(lpni, &((*lpn)->lpn_peer_nis),
489 lpni_on_peer_net_list)
498 struct lnet_peer_ni *
499 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
500 struct lnet_peer_net *peer_net,
501 struct lnet_peer_ni *prev)
503 struct lnet_peer_ni *lpni;
504 struct lnet_peer_net *net = peer_net;
508 net = list_entry(peer->lp_peer_nets.next,
509 struct lnet_peer_net,
511 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
512 lpni_on_peer_net_list);
517 if (prev->lpni_on_peer_net_list.next ==
518 &prev->lpni_peer_net->lpn_peer_nis) {
520 * if you reached the end of the peer ni list and the peer
521 * net is specified then there are no more peer nis in that
528 * we reached the end of this net ni list. move to the
531 if (prev->lpni_peer_net->lpn_on_peer_list.next ==
533 /* no more nets and no more NIs. */
536 /* get the next net */
537 net = list_entry(prev->lpni_peer_net->lpn_on_peer_list.next,
538 struct lnet_peer_net,
540 /* get the ni on it */
541 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
542 lpni_on_peer_net_list);
547 /* there are more nis left */
548 lpni = list_entry(prev->lpni_on_peer_net_list.next,
549 struct lnet_peer_ni, lpni_on_peer_net_list);
555 lnet_peer_is_ni_pref_locked(struct lnet_peer_ni *lpni, struct lnet_ni *ni)
559 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
560 if (lpni->lpni_pref_nids[i] == ni->ni_nid)
567 lnet_peer_primary_nid(lnet_nid_t nid)
569 struct lnet_peer_ni *lpni;
570 lnet_nid_t primary_nid = nid;
573 cpt = lnet_net_lock_current();
574 lpni = lnet_find_peer_ni_locked(nid);
576 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
577 lnet_peer_ni_decref_locked(lpni);
579 lnet_net_unlock(cpt);
584 struct lnet_peer_net *
585 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
587 struct lnet_peer_net *peer_net;
588 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_on_peer_list) {
589 if (peer_net->lpn_net_id == net_id)
596 lnet_peer_setup_hierarchy(struct lnet_peer *lp, struct lnet_peer_ni *lpni,
599 struct lnet_peer_net *lpn = NULL;
600 struct lnet_peer_table *ptable;
601 __u32 net_id = LNET_NIDNET(nid);
604 * Create the peer_ni, peer_net, and peer if they don't exist
608 lpn = lnet_peer_get_net_locked(lp, net_id);
610 lp = lnet_peer_alloc(nid);
616 lpn = lnet_peer_net_alloc(net_id);
618 goto out_maybe_free_lp;
622 lpni = lnet_peer_ni_alloc(nid);
624 goto out_maybe_free_lpn;
627 /* Install the new peer_ni */
628 lnet_net_lock(LNET_LOCK_EX);
629 /* Add peer_ni to global peer table hash, if necessary. */
630 if (list_empty(&lpni->lpni_hashlist)) {
631 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
632 list_add_tail(&lpni->lpni_hashlist,
633 &ptable->pt_hash[lnet_nid2peerhash(nid)]);
634 ptable->pt_version++;
635 atomic_inc(&ptable->pt_number);
636 atomic_inc(&lpni->lpni_refcount);
639 /* Detach the peer_ni from an existing peer, if necessary. */
640 if (lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer != lp)
641 lnet_try_destroy_peer_hierarchy_locked(lpni);
643 /* Add peer_ni to peer_net */
644 lpni->lpni_peer_net = lpn;
645 list_add_tail(&lpni->lpni_on_peer_net_list, &lpn->lpn_peer_nis);
647 /* Add peer_net to peer */
648 if (!lpn->lpn_peer) {
650 list_add_tail(&lpn->lpn_on_peer_list, &lp->lp_peer_nets);
653 /* Add peer to global peer list */
654 if (list_empty(&lp->lp_on_lnet_peer_list))
655 list_add_tail(&lp->lp_on_lnet_peer_list, &the_lnet.ln_peers);
656 lnet_net_unlock(LNET_LOCK_EX);
661 if (list_empty(&lpn->lpn_on_peer_list))
662 LIBCFS_FREE(lpn, sizeof(*lpn));
664 if (list_empty(&lp->lp_on_lnet_peer_list))
665 LIBCFS_FREE(lp, sizeof(*lp));
671 lnet_add_prim_lpni(lnet_nid_t nid)
674 struct lnet_peer *peer;
675 struct lnet_peer_ni *lpni;
677 LASSERT(nid != LNET_NID_ANY);
680 * lookup the NID and its peer
681 * if the peer doesn't exist, create it.
682 * if this is a non-MR peer then change its state to MR and exit.
683 * if this is an MR peer and it's a primary NI: NO-OP.
684 * if this is an MR peer and it's not a primary NI. Operation not
687 * The adding and deleting of peer nis is being serialized through
688 * the api_mutex. So we can look up peers with the mutex locked
689 * safely. Only when we need to change the ptable, do we need to
690 * exclusively lock the lnet_net_lock()
692 lpni = lnet_find_peer_ni_locked(nid);
694 rc = lnet_peer_setup_hierarchy(NULL, NULL, nid);
697 lpni = lnet_find_peer_ni_locked(nid);
702 lnet_peer_ni_decref_locked(lpni);
704 peer = lpni->lpni_peer_net->lpn_peer;
707 * If we found a lpni with the same nid as the NID we're trying to
708 * create, then we're trying to create an already existing lpni
709 * that belongs to a different peer
711 if (peer->lp_primary_nid != nid)
715 * if we found an lpni that is not a multi-rail, which could occur
716 * if lpni is already created as a non-mr lpni or we just created
717 * it, then make sure you indicate that this lpni is a primary mr
720 * TODO: update flags if necessary
722 if (!peer->lp_multi_rail && peer->lp_primary_nid == nid)
723 peer->lp_multi_rail = true;
729 lnet_add_peer_ni_to_prim_lpni(lnet_nid_t key_nid, lnet_nid_t nid)
731 struct lnet_peer *peer, *primary_peer;
732 struct lnet_peer_ni *lpni = NULL, *klpni = NULL;
734 LASSERT(key_nid != LNET_NID_ANY && nid != LNET_NID_ANY);
737 * key nid must be created by this point. If not then this
738 * operation is not permitted
740 klpni = lnet_find_peer_ni_locked(key_nid);
744 lnet_peer_ni_decref_locked(klpni);
746 primary_peer = klpni->lpni_peer_net->lpn_peer;
748 lpni = lnet_find_peer_ni_locked(nid);
750 lnet_peer_ni_decref_locked(lpni);
752 peer = lpni->lpni_peer_net->lpn_peer;
754 * lpni already exists in the system but it belongs to
755 * a different peer. We can't re-added it
757 if (peer->lp_primary_nid != key_nid && peer->lp_multi_rail) {
758 CERROR("Cannot add NID %s owned by peer %s to peer %s\n",
759 libcfs_nid2str(lpni->lpni_nid),
760 libcfs_nid2str(peer->lp_primary_nid),
761 libcfs_nid2str(key_nid));
763 } else if (peer->lp_primary_nid == key_nid) {
765 * found a peer_ni that is already part of the
766 * peer. This is a no-op operation.
772 * TODO: else if (peer->lp_primary_nid != key_nid &&
773 * !peer->lp_multi_rail)
774 * peer is not an MR peer and it will be moved in the next
775 * step to klpni, so update its flags accordingly.
776 * lnet_move_peer_ni()
780 * TODO: call lnet_update_peer() from here to update the
781 * flags. This is the case when the lpni you're trying to
782 * add is already part of the peer. This could've been
783 * added by the DD previously, so go ahead and do any
784 * updates to the state if necessary
790 * When we get here we either have found an existing lpni, which
791 * we can switch to the new peer. Or we need to create one and
792 * add it to the new peer
794 return lnet_peer_setup_hierarchy(primary_peer, lpni, nid);
798 * lpni creation initiated due to traffic either sending or receiving.
801 lnet_peer_ni_traffic_add(lnet_nid_t nid)
803 struct lnet_peer_ni *lpni;
806 if (nid == LNET_NID_ANY)
809 /* lnet_net_lock is not needed here because ln_api_lock is held */
810 lpni = lnet_find_peer_ni_locked(nid);
813 * TODO: lnet_update_primary_nid() but not all of it
814 * only indicate if we're converting this to MR capable
815 * Can happen due to DD
817 lnet_peer_ni_decref_locked(lpni);
819 rc = lnet_peer_setup_hierarchy(NULL, NULL, nid);
827 lnet_peer_ni_add_non_mr(lnet_nid_t nid)
829 struct lnet_peer_ni *lpni;
831 lpni = lnet_find_peer_ni_locked(nid);
833 CERROR("Cannot add %s as non-mr when it already exists\n",
834 libcfs_nid2str(nid));
835 lnet_peer_ni_decref_locked(lpni);
839 return lnet_peer_setup_hierarchy(NULL, NULL, nid);
843 * This API handles the following combinations:
844 * Create a primary NI if only the key_nid is provided
845 * Create or add an lpni to a primary NI. Primary NI must've already
847 * Create a non-MR peer.
850 lnet_add_peer_ni_to_peer(lnet_nid_t key_nid, lnet_nid_t nid, bool mr)
853 * Caller trying to setup an MR like peer hierarchy but
854 * specifying it to be non-MR. This is not allowed.
856 if (key_nid != LNET_NID_ANY &&
857 nid != LNET_NID_ANY && !mr)
860 /* Add the primary NID of a peer */
861 if (key_nid != LNET_NID_ANY &&
862 nid == LNET_NID_ANY && mr)
863 return lnet_add_prim_lpni(key_nid);
865 /* Add a NID to an existing peer */
866 if (key_nid != LNET_NID_ANY &&
867 nid != LNET_NID_ANY && mr)
868 return lnet_add_peer_ni_to_prim_lpni(key_nid, nid);
870 /* Add a non-MR peer NI */
871 if (((key_nid != LNET_NID_ANY &&
872 nid == LNET_NID_ANY) ||
873 (key_nid == LNET_NID_ANY &&
874 nid != LNET_NID_ANY)) && !mr)
875 return lnet_peer_ni_add_non_mr(key_nid != LNET_NID_ANY ?
882 lnet_del_peer_ni_from_peer(lnet_nid_t key_nid, lnet_nid_t nid)
884 lnet_nid_t local_nid;
885 struct lnet_peer *peer;
886 struct lnet_peer_ni *lpni;
888 if (key_nid == LNET_NID_ANY)
891 local_nid = (nid != LNET_NID_ANY) ? nid : key_nid;
893 lpni = lnet_find_peer_ni_locked(local_nid);
896 lnet_peer_ni_decref_locked(lpni);
898 peer = lpni->lpni_peer_net->lpn_peer;
899 LASSERT(peer != NULL);
901 if (peer->lp_primary_nid == lpni->lpni_nid) {
903 * deleting the primary ni is equivalent to deleting the
906 lnet_net_lock(LNET_LOCK_EX);
907 lnet_peer_del_locked(peer);
908 lnet_net_unlock(LNET_LOCK_EX);
913 lnet_net_lock(LNET_LOCK_EX);
914 lnet_peer_ni_del_locked(lpni);
915 lnet_net_unlock(LNET_LOCK_EX);
921 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
923 struct lnet_peer_table *ptable;
925 LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
926 LASSERT(lpni->lpni_rtr_refcount == 0);
927 LASSERT(list_empty(&lpni->lpni_txq));
928 LASSERT(lpni->lpni_txqnob == 0);
930 lpni->lpni_net = NULL;
932 /* remove the peer ni from the zombie list */
933 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
934 spin_lock(&ptable->pt_zombie_lock);
935 list_del_init(&lpni->lpni_hashlist);
936 ptable->pt_zombies--;
937 spin_unlock(&ptable->pt_zombie_lock);
939 LIBCFS_FREE(lpni, sizeof(*lpni));
942 struct lnet_peer_ni *
943 lnet_nid2peerni_locked(lnet_nid_t nid, int cpt)
945 struct lnet_peer_table *ptable;
946 struct lnet_peer_ni *lpni = NULL;
950 if (the_lnet.ln_shutdown) /* it's shutting down */
951 return ERR_PTR(-ESHUTDOWN);
954 * calculate cpt2 with the standard hash function
955 * This cpt2 is the slot where we'll find or create the peer.
957 cpt2 = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
958 ptable = the_lnet.ln_peer_tables[cpt2];
959 lpni = lnet_get_peer_ni_locked(ptable, nid);
963 /* Slow path: serialized using the ln_api_mutex. */
964 lnet_net_unlock(cpt);
965 mutex_lock(&the_lnet.ln_api_mutex);
967 * Shutdown is only set under the ln_api_lock, so a single
968 * check here is sufficent.
970 * lnet_add_nid_to_peer() also handles the case where we've
971 * raced and a different thread added the NID.
973 if (the_lnet.ln_shutdown) {
974 lpni = ERR_PTR(-ESHUTDOWN);
975 goto out_mutex_unlock;
978 rc = lnet_peer_ni_traffic_add(nid);
981 goto out_mutex_unlock;
984 lpni = lnet_get_peer_ni_locked(ptable, nid);
988 mutex_unlock(&the_lnet.ln_api_mutex);
995 lnet_debug_peer(lnet_nid_t nid)
997 char *aliveness = "NA";
998 struct lnet_peer_ni *lp;
1001 cpt = lnet_cpt_of_nid(nid, NULL);
1004 lp = lnet_nid2peerni_locked(nid, cpt);
1006 lnet_net_unlock(cpt);
1007 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
1011 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
1012 aliveness = lp->lpni_alive ? "up" : "down";
1014 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
1015 libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
1016 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
1017 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
1018 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
1020 lnet_peer_ni_decref_locked(lp);
1022 lnet_net_unlock(cpt);
1025 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
1026 char aliveness[LNET_MAX_STR_LEN],
1027 __u32 *cpt_iter, __u32 *refcount,
1028 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
1029 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
1030 __u32 *peer_tx_qnob)
1032 struct lnet_peer_table *peer_table;
1033 struct lnet_peer_ni *lp;
1038 /* get the number of CPTs */
1039 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
1041 /* if the cpt number to be examined is >= the number of cpts in
1042 * the system then indicate that there are no more cpts to examin
1044 if (*cpt_iter >= lncpt)
1047 /* get the current table */
1048 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
1049 /* if the ptable is NULL then there are no more cpts to examine */
1050 if (peer_table == NULL)
1053 lnet_net_lock(*cpt_iter);
1055 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
1056 struct list_head *peers = &peer_table->pt_hash[j];
1058 list_for_each_entry(lp, peers, lpni_hashlist) {
1059 if (peer_index-- > 0)
1062 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
1063 if (lnet_isrouter(lp) ||
1064 lnet_peer_aliveness_enabled(lp))
1065 snprintf(aliveness, LNET_MAX_STR_LEN,
1066 lp->lpni_alive ? "up" : "down");
1068 *nid = lp->lpni_nid;
1069 *refcount = atomic_read(&lp->lpni_refcount);
1070 *ni_peer_tx_credits =
1071 lp->lpni_net->net_tunables.lct_peer_tx_credits;
1072 *peer_tx_credits = lp->lpni_txcredits;
1073 *peer_rtr_credits = lp->lpni_rtrcredits;
1074 *peer_min_rtr_credits = lp->lpni_mintxcredits;
1075 *peer_tx_qnob = lp->lpni_txqnob;
1081 lnet_net_unlock(*cpt_iter);
1085 return found ? 0 : -ENOENT;
1088 int lnet_get_peer_info(__u32 idx, lnet_nid_t *primary_nid, lnet_nid_t *nid,
1089 bool *mr, struct lnet_peer_ni_credit_info *peer_ni_info,
1090 struct lnet_ioctl_element_stats *peer_ni_stats)
1092 struct lnet_peer_ni *lpni = NULL;
1093 struct lnet_peer_net *lpn = NULL;
1094 struct lnet_peer *lp = NULL;
1096 lpni = lnet_get_peer_ni_idx_locked(idx, &lpn, &lp);
1101 *primary_nid = lp->lp_primary_nid;
1102 *mr = lp->lp_multi_rail;
1103 *nid = lpni->lpni_nid;
1104 snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
1105 if (lnet_isrouter(lpni) ||
1106 lnet_peer_aliveness_enabled(lpni))
1107 snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN,
1108 lpni->lpni_alive ? "up" : "down");
1110 peer_ni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
1111 peer_ni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
1112 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
1113 peer_ni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
1114 peer_ni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
1115 peer_ni_info->cr_peer_min_rtr_credits = lpni->lpni_mintxcredits;
1116 peer_ni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
1118 peer_ni_stats->send_count = atomic_read(&lpni->lpni_stats.send_count);
1119 peer_ni_stats->recv_count = atomic_read(&lpni->lpni_stats.recv_count);
1120 peer_ni_stats->drop_count = atomic_read(&lpni->lpni_stats.drop_count);