4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2014, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <lnet/lib-lnet.h>
38 #include <lnet/lib-dlc.h>
40 unsigned lnet_peer_discovery_enabled = 1;
41 module_param(lnet_peer_discovery_enabled, uint, 0644);
42 MODULE_PARM_DESC(lnet_peer_discovery_enabled,
43 "Explicitly enable/disable peer discovery");
45 /* Value indicating that recovery needs to re-check a peer immediately. */
46 #define LNET_REDISCOVER_PEER (1)
48 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
51 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
54 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
55 lnet_peer_ni_decref_locked(lpni);
60 lnet_peer_net_added(struct lnet_net *net)
62 struct lnet_peer_ni *lpni, *tmp;
64 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
65 lpni_on_remote_peer_ni_list) {
67 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
70 spin_lock(&lpni->lpni_lock);
71 lpni->lpni_txcredits =
72 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
73 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
74 lpni->lpni_rtrcredits =
75 lnet_peer_buffer_credits(lpni->lpni_net);
76 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
77 spin_unlock(&lpni->lpni_lock);
79 lnet_peer_remove_from_remote_list(lpni);
85 lnet_peer_tables_destroy(void)
87 struct lnet_peer_table *ptable;
88 struct list_head *hash;
92 if (!the_lnet.ln_peer_tables)
95 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
96 hash = ptable->pt_hash;
97 if (!hash) /* not intialized */
100 LASSERT(list_empty(&ptable->pt_zombie_list));
102 ptable->pt_hash = NULL;
103 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
104 LASSERT(list_empty(&hash[j]));
106 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
109 cfs_percpt_free(the_lnet.ln_peer_tables);
110 the_lnet.ln_peer_tables = NULL;
114 lnet_peer_tables_create(void)
116 struct lnet_peer_table *ptable;
117 struct list_head *hash;
121 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123 if (the_lnet.ln_peer_tables == NULL) {
124 CERROR("Failed to allocate cpu-partition peer tables\n");
128 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
129 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
130 LNET_PEER_HASH_SIZE * sizeof(*hash));
132 CERROR("Failed to create peer hash table\n");
133 lnet_peer_tables_destroy();
137 spin_lock_init(&ptable->pt_zombie_lock);
138 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140 INIT_LIST_HEAD(&ptable->pt_peer_list);
142 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
143 INIT_LIST_HEAD(&hash[j]);
144 ptable->pt_hash = hash; /* sign of initialization */
150 static struct lnet_peer_ni *
151 lnet_peer_ni_alloc(lnet_nid_t nid)
153 struct lnet_peer_ni *lpni;
154 struct lnet_net *net;
157 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
163 INIT_LIST_HEAD(&lpni->lpni_txq);
164 INIT_LIST_HEAD(&lpni->lpni_rtrq);
165 INIT_LIST_HEAD(&lpni->lpni_routes);
166 INIT_LIST_HEAD(&lpni->lpni_hashlist);
167 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
170 spin_lock_init(&lpni->lpni_lock);
172 lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */
173 lpni->lpni_last_alive = cfs_time_current(); /* assumes alive */
174 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
175 lpni->lpni_nid = nid;
176 lpni->lpni_cpt = cpt;
177 lnet_set_peer_ni_health_locked(lpni, true);
179 net = lnet_get_net_locked(LNET_NIDNET(nid));
180 lpni->lpni_net = net;
182 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
183 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
184 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
185 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
188 * This peer_ni is not on a local network, so we
189 * cannot add the credits here. In case the net is
190 * added later, add the peer_ni to the remote peer ni
191 * list so it can be easily found and revisited.
193 /* FIXME: per-net implementation instead? */
194 atomic_inc(&lpni->lpni_refcount);
195 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
196 &the_lnet.ln_remote_peer_ni_list);
199 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
204 static struct lnet_peer_net *
205 lnet_peer_net_alloc(__u32 net_id)
207 struct lnet_peer_net *lpn;
209 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
213 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
214 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
215 lpn->lpn_net_id = net_id;
217 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
223 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
225 struct lnet_peer *lp;
227 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
229 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
230 LASSERT(list_empty(&lpn->lpn_peer_nis));
231 LASSERT(list_empty(&lpn->lpn_peer_nets));
233 lpn->lpn_peer = NULL;
234 LIBCFS_FREE(lpn, sizeof(*lpn));
236 lnet_peer_decref_locked(lp);
239 static struct lnet_peer *
240 lnet_peer_alloc(lnet_nid_t nid)
242 struct lnet_peer *lp;
244 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
248 INIT_LIST_HEAD(&lp->lp_peer_list);
249 INIT_LIST_HEAD(&lp->lp_peer_nets);
250 INIT_LIST_HEAD(&lp->lp_dc_list);
251 init_waitqueue_head(&lp->lp_dc_waitq);
252 spin_lock_init(&lp->lp_lock);
253 lp->lp_primary_nid = nid;
254 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
256 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
262 lnet_destroy_peer_locked(struct lnet_peer *lp)
264 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
266 LASSERT(atomic_read(&lp->lp_refcount) == 0);
267 LASSERT(list_empty(&lp->lp_peer_nets));
268 LASSERT(list_empty(&lp->lp_peer_list));
269 LASSERT(list_empty(&lp->lp_dc_list));
272 lnet_ping_buffer_decref(lp->lp_data);
274 LIBCFS_FREE(lp, sizeof(*lp));
278 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
279 * that peer_net, detach the peer_net from the peer.
281 * Call with lnet_net_lock/EX held
284 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
286 struct lnet_peer_table *ptable;
287 struct lnet_peer_net *lpn;
288 struct lnet_peer *lp;
291 * Belts and suspenders: gracefully handle teardown of a
292 * partially connected peer_ni.
294 lpn = lpni->lpni_peer_net;
296 list_del_init(&lpni->lpni_peer_nis);
298 * If there are no lpni's left, we detach lpn from
299 * lp_peer_nets, so it cannot be found anymore.
301 if (list_empty(&lpn->lpn_peer_nis))
302 list_del_init(&lpn->lpn_peer_nets);
304 /* Update peer NID count. */
306 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
308 ptable->pt_peer_nnids--;
311 * If there are no more peer nets, make the peer unfindable
312 * via the peer_tables.
314 * Otherwise, if the peer is DISCOVERED, tell discovery to
315 * take another look at it. This is a no-op if discovery for
316 * this peer did the detaching.
318 if (list_empty(&lp->lp_peer_nets)) {
319 list_del_init(&lp->lp_peer_list);
321 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
322 /* Discovery isn't running, nothing to do here. */
323 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
324 lnet_peer_queue_for_discovery(lp);
325 wake_up(&the_lnet.ln_dc_waitq);
327 CDEBUG(D_NET, "peer %s NID %s\n",
328 libcfs_nid2str(lp->lp_primary_nid),
329 libcfs_nid2str(lpni->lpni_nid));
332 /* called with lnet_net_lock LNET_LOCK_EX held */
334 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni)
336 struct lnet_peer_table *ptable = NULL;
338 /* don't remove a peer_ni if it's also a gateway */
339 if (lpni->lpni_rtr_refcount > 0) {
340 CERROR("Peer NI %s is a gateway. Can not delete it\n",
341 libcfs_nid2str(lpni->lpni_nid));
345 lnet_peer_remove_from_remote_list(lpni);
347 /* remove peer ni from the hash list. */
348 list_del_init(&lpni->lpni_hashlist);
350 /* decrement the ref count on the peer table */
351 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
352 LASSERT(ptable->pt_number > 0);
356 * The peer_ni can no longer be found with a lookup. But there
357 * can be current users, so keep track of it on the zombie
358 * list until the reference count has gone to zero.
360 * The last reference may be lost in a place where the
361 * lnet_net_lock locks only a single cpt, and that cpt may not
362 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
365 spin_lock(&ptable->pt_zombie_lock);
366 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
367 ptable->pt_zombies++;
368 spin_unlock(&ptable->pt_zombie_lock);
370 /* no need to keep this peer_ni on the hierarchy anymore */
371 lnet_peer_detach_peer_ni_locked(lpni);
373 /* remove hashlist reference on peer_ni */
374 lnet_peer_ni_decref_locked(lpni);
379 void lnet_peer_uninit(void)
381 struct lnet_peer_ni *lpni, *tmp;
383 lnet_net_lock(LNET_LOCK_EX);
385 /* remove all peer_nis from the remote peer and the hash list */
386 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
387 lpni_on_remote_peer_ni_list)
388 lnet_peer_ni_del_locked(lpni);
390 lnet_peer_tables_destroy();
392 lnet_net_unlock(LNET_LOCK_EX);
396 lnet_peer_del_locked(struct lnet_peer *peer)
398 struct lnet_peer_ni *lpni = NULL, *lpni2;
401 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
403 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
404 while (lpni != NULL) {
405 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
406 rc = lnet_peer_ni_del_locked(lpni);
416 lnet_peer_del(struct lnet_peer *peer)
418 lnet_net_lock(LNET_LOCK_EX);
419 lnet_peer_del_locked(peer);
420 lnet_net_unlock(LNET_LOCK_EX);
426 * Delete a NID from a peer. Call with ln_api_mutex held.
429 * -EPERM: Non-DLC deletion from DLC-configured peer.
430 * -ENOENT: No lnet_peer_ni corresponding to the nid.
431 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
432 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
435 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
437 struct lnet_peer_ni *lpni;
438 lnet_nid_t primary_nid = lp->lp_primary_nid;
441 if (!(flags & LNET_PEER_CONFIGURED)) {
442 if (lp->lp_state & LNET_PEER_CONFIGURED) {
447 lpni = lnet_find_peer_ni_locked(nid);
452 lnet_peer_ni_decref_locked(lpni);
453 if (lp != lpni->lpni_peer_net->lpn_peer) {
459 * This function only allows deletion of the primary NID if it
462 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1) {
467 lnet_net_lock(LNET_LOCK_EX);
468 lnet_peer_ni_del_locked(lpni);
469 lnet_net_unlock(LNET_LOCK_EX);
472 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
473 libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
479 lnet_peer_table_cleanup_locked(struct lnet_net *net,
480 struct lnet_peer_table *ptable)
483 struct lnet_peer_ni *next;
484 struct lnet_peer_ni *lpni;
485 struct lnet_peer *peer;
487 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
488 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
490 if (net != NULL && net != lpni->lpni_net)
493 peer = lpni->lpni_peer_net->lpn_peer;
494 if (peer->lp_primary_nid != lpni->lpni_nid) {
495 lnet_peer_ni_del_locked(lpni);
499 * Removing the primary NID implies removing
500 * the entire peer. Advance next beyond any
501 * peer_ni that belongs to the same peer.
503 list_for_each_entry_from(next, &ptable->pt_hash[i],
505 if (next->lpni_peer_net->lpn_peer != peer)
508 lnet_peer_del_locked(peer);
514 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
518 spin_lock(&ptable->pt_zombie_lock);
519 while (ptable->pt_zombies) {
520 spin_unlock(&ptable->pt_zombie_lock);
524 "Waiting for %d zombies on peer table\n",
527 set_current_state(TASK_UNINTERRUPTIBLE);
528 schedule_timeout(cfs_time_seconds(1) >> 1);
529 spin_lock(&ptable->pt_zombie_lock);
531 spin_unlock(&ptable->pt_zombie_lock);
535 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
536 struct lnet_peer_table *ptable)
538 struct lnet_peer_ni *lp;
539 struct lnet_peer_ni *tmp;
543 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
544 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
546 if (net != lp->lpni_net)
549 if (lp->lpni_rtr_refcount == 0)
552 lpni_nid = lp->lpni_nid;
554 lnet_net_unlock(LNET_LOCK_EX);
555 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lpni_nid);
556 lnet_net_lock(LNET_LOCK_EX);
562 lnet_peer_tables_cleanup(struct lnet_net *net)
565 struct lnet_peer_table *ptable;
567 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
568 /* If just deleting the peers for a NI, get rid of any routes these
569 * peers are gateways for. */
570 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
571 lnet_net_lock(LNET_LOCK_EX);
572 lnet_peer_table_del_rtrs_locked(net, ptable);
573 lnet_net_unlock(LNET_LOCK_EX);
576 /* Start the cleanup process */
577 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
578 lnet_net_lock(LNET_LOCK_EX);
579 lnet_peer_table_cleanup_locked(net, ptable);
580 lnet_net_unlock(LNET_LOCK_EX);
583 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
584 lnet_peer_ni_finalize_wait(ptable);
587 static struct lnet_peer_ni *
588 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
590 struct list_head *peers;
591 struct lnet_peer_ni *lp;
593 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
595 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
596 list_for_each_entry(lp, peers, lpni_hashlist) {
597 if (lp->lpni_nid == nid) {
598 lnet_peer_ni_addref_locked(lp);
606 struct lnet_peer_ni *
607 lnet_find_peer_ni_locked(lnet_nid_t nid)
609 struct lnet_peer_ni *lpni;
610 struct lnet_peer_table *ptable;
613 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
615 ptable = the_lnet.ln_peer_tables[cpt];
616 lpni = lnet_get_peer_ni_locked(ptable, nid);
622 lnet_find_peer(lnet_nid_t nid)
624 struct lnet_peer_ni *lpni;
625 struct lnet_peer *lp = NULL;
628 cpt = lnet_net_lock_current();
629 lpni = lnet_find_peer_ni_locked(nid);
631 lp = lpni->lpni_peer_net->lpn_peer;
632 lnet_peer_addref_locked(lp);
633 lnet_peer_ni_decref_locked(lpni);
635 lnet_net_unlock(cpt);
640 struct lnet_peer_ni *
641 lnet_get_peer_ni_idx_locked(int idx, struct lnet_peer_net **lpn,
642 struct lnet_peer **lp)
644 struct lnet_peer_table *ptable;
645 struct lnet_peer_ni *lpni;
649 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
651 for (cpt = 0; cpt < lncpt; cpt++) {
652 ptable = the_lnet.ln_peer_tables[cpt];
653 if (ptable->pt_peer_nnids > idx)
655 idx -= ptable->pt_peer_nnids;
660 list_for_each_entry((*lp), &ptable->pt_peer_list, lp_peer_list) {
661 if ((*lp)->lp_nnis <= idx) {
662 idx -= (*lp)->lp_nnis;
665 list_for_each_entry((*lpn), &((*lp)->lp_peer_nets),
667 list_for_each_entry(lpni, &((*lpn)->lpn_peer_nis),
678 struct lnet_peer_ni *
679 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
680 struct lnet_peer_net *peer_net,
681 struct lnet_peer_ni *prev)
683 struct lnet_peer_ni *lpni;
684 struct lnet_peer_net *net = peer_net;
688 if (list_empty(&peer->lp_peer_nets))
691 net = list_entry(peer->lp_peer_nets.next,
692 struct lnet_peer_net,
695 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
701 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
703 * if you reached the end of the peer ni list and the peer
704 * net is specified then there are no more peer nis in that
711 * we reached the end of this net ni list. move to the
714 if (prev->lpni_peer_net->lpn_peer_nets.next ==
716 /* no more nets and no more NIs. */
719 /* get the next net */
720 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
721 struct lnet_peer_net,
723 /* get the ni on it */
724 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
730 /* there are more nis left */
731 lpni = list_entry(prev->lpni_peer_nis.next,
732 struct lnet_peer_ni, lpni_peer_nis);
738 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
739 * this is a preferred point-to-point path. Call with lnet_net_lock in
743 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
747 if (lpni->lpni_pref_nnids == 0)
749 if (lpni->lpni_pref_nnids == 1)
750 return lpni->lpni_pref.nid == nid;
751 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
752 if (lpni->lpni_pref.nids[i] == nid)
759 * Set a single ni as preferred, provided no preferred ni is already
760 * defined. Only to be used for non-multi-rail peer_ni.
763 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
767 spin_lock(&lpni->lpni_lock);
768 if (nid == LNET_NID_ANY) {
770 } else if (lpni->lpni_pref_nnids > 0) {
772 } else if (lpni->lpni_pref_nnids == 0) {
773 lpni->lpni_pref.nid = nid;
774 lpni->lpni_pref_nnids = 1;
775 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
777 spin_unlock(&lpni->lpni_lock);
779 CDEBUG(D_NET, "peer %s nid %s: %d\n",
780 libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
785 * Clear the preferred NID from a non-multi-rail peer_ni, provided
786 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
789 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
793 spin_lock(&lpni->lpni_lock);
794 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
795 lpni->lpni_pref_nnids = 0;
796 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
797 } else if (lpni->lpni_pref_nnids == 0) {
802 spin_unlock(&lpni->lpni_lock);
804 CDEBUG(D_NET, "peer %s: %d\n",
805 libcfs_nid2str(lpni->lpni_nid), rc);
810 * Clear the preferred NIDs from a non-multi-rail peer.
813 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
815 struct lnet_peer_ni *lpni = NULL;
817 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
818 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
822 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
824 lnet_nid_t *nids = NULL;
825 lnet_nid_t *oldnids = NULL;
826 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
831 if (nid == LNET_NID_ANY) {
836 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
841 /* A non-MR node may have only one preferred NI per peer_ni */
842 if (lpni->lpni_pref_nnids > 0) {
843 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
849 if (lpni->lpni_pref_nnids != 0) {
850 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
851 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
856 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
857 if (lpni->lpni_pref.nids[i] == nid) {
858 LIBCFS_FREE(nids, size);
862 nids[i] = lpni->lpni_pref.nids[i];
867 lnet_net_lock(LNET_LOCK_EX);
868 spin_lock(&lpni->lpni_lock);
869 if (lpni->lpni_pref_nnids == 0) {
870 lpni->lpni_pref.nid = nid;
872 oldnids = lpni->lpni_pref.nids;
873 lpni->lpni_pref.nids = nids;
875 lpni->lpni_pref_nnids++;
876 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
877 spin_unlock(&lpni->lpni_lock);
878 lnet_net_unlock(LNET_LOCK_EX);
881 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
882 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
885 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
886 spin_lock(&lpni->lpni_lock);
887 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
888 spin_unlock(&lpni->lpni_lock);
890 CDEBUG(D_NET, "peer %s nid %s: %d\n",
891 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
896 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
898 lnet_nid_t *nids = NULL;
899 lnet_nid_t *oldnids = NULL;
900 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
905 if (lpni->lpni_pref_nnids == 0) {
910 if (lpni->lpni_pref_nnids == 1) {
911 if (lpni->lpni_pref.nid != nid) {
915 } else if (lpni->lpni_pref_nnids == 2) {
916 if (lpni->lpni_pref.nids[0] != nid &&
917 lpni->lpni_pref.nids[1] != nid) {
922 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
923 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
928 for (i = 0, j = 0; i < lpni->lpni_pref_nnids; i++) {
929 if (lpni->lpni_pref.nids[i] != nid)
931 nids[j++] = lpni->lpni_pref.nids[i];
933 /* Check if we actually removed a nid. */
934 if (j == lpni->lpni_pref_nnids) {
935 LIBCFS_FREE(nids, size);
941 lnet_net_lock(LNET_LOCK_EX);
942 spin_lock(&lpni->lpni_lock);
943 if (lpni->lpni_pref_nnids == 1) {
944 lpni->lpni_pref.nid = LNET_NID_ANY;
945 } else if (lpni->lpni_pref_nnids == 2) {
946 oldnids = lpni->lpni_pref.nids;
947 if (oldnids[0] == nid)
948 lpni->lpni_pref.nid = oldnids[1];
950 lpni->lpni_pref.nid = oldnids[2];
952 oldnids = lpni->lpni_pref.nids;
953 lpni->lpni_pref.nids = nids;
955 lpni->lpni_pref_nnids--;
956 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
957 spin_unlock(&lpni->lpni_lock);
958 lnet_net_unlock(LNET_LOCK_EX);
961 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
962 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
965 CDEBUG(D_NET, "peer %s nid %s: %d\n",
966 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
971 lnet_peer_primary_nid(lnet_nid_t nid)
973 struct lnet_peer_ni *lpni;
974 lnet_nid_t primary_nid = nid;
977 cpt = lnet_net_lock_current();
978 lpni = lnet_find_peer_ni_locked(nid);
980 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
981 lnet_peer_ni_decref_locked(lpni);
983 lnet_net_unlock(cpt);
989 LNetPrimaryNID(lnet_nid_t nid)
991 struct lnet_peer *lp;
992 struct lnet_peer_ni *lpni;
993 lnet_nid_t primary_nid = nid;
997 cpt = lnet_net_lock_current();
998 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1003 lp = lpni->lpni_peer_net->lpn_peer;
1004 while (!lnet_peer_is_uptodate(lp)) {
1005 rc = lnet_discover_peer_locked(lpni, cpt);
1008 lp = lpni->lpni_peer_net->lpn_peer;
1010 primary_nid = lp->lp_primary_nid;
1012 lnet_peer_ni_decref_locked(lpni);
1014 lnet_net_unlock(cpt);
1016 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1017 libcfs_nid2str(primary_nid), rc);
1020 EXPORT_SYMBOL(LNetPrimaryNID);
1022 struct lnet_peer_net *
1023 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1025 struct lnet_peer_net *peer_net;
1026 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1027 if (peer_net->lpn_net_id == net_id)
1034 * Attach a peer_ni to a peer_net and peer. This function assumes
1035 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1036 * may be attached to a different peer, in which case it will be
1037 * properly detached first. The whole operation is done atomically.
1039 * Always returns 0. This is the last function called from functions
1040 * that do return an int, so returning 0 here allows the compiler to
1044 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1045 struct lnet_peer_net *lpn,
1046 struct lnet_peer_ni *lpni,
1049 struct lnet_peer_table *ptable;
1051 /* Install the new peer_ni */
1052 lnet_net_lock(LNET_LOCK_EX);
1053 /* Add peer_ni to global peer table hash, if necessary. */
1054 if (list_empty(&lpni->lpni_hashlist)) {
1055 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1057 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1058 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1059 ptable->pt_version++;
1060 ptable->pt_number++;
1061 /* This is the 1st refcount on lpni. */
1062 atomic_inc(&lpni->lpni_refcount);
1065 /* Detach the peer_ni from an existing peer, if necessary. */
1066 if (lpni->lpni_peer_net) {
1067 LASSERT(lpni->lpni_peer_net != lpn);
1068 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1069 lnet_peer_detach_peer_ni_locked(lpni);
1070 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1071 lpni->lpni_peer_net = NULL;
1074 /* Add peer_ni to peer_net */
1075 lpni->lpni_peer_net = lpn;
1076 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1077 lnet_peer_net_addref_locked(lpn);
1079 /* Add peer_net to peer */
1080 if (!lpn->lpn_peer) {
1082 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1083 lnet_peer_addref_locked(lp);
1086 /* Add peer to global peer list, if necessary */
1087 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1088 if (list_empty(&lp->lp_peer_list)) {
1089 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1094 /* Update peer state */
1095 spin_lock(&lp->lp_lock);
1096 if (flags & LNET_PEER_CONFIGURED) {
1097 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1098 lp->lp_state |= LNET_PEER_CONFIGURED;
1100 if (flags & LNET_PEER_MULTI_RAIL) {
1101 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1102 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1103 lnet_peer_clr_non_mr_pref_nids(lp);
1106 spin_unlock(&lp->lp_lock);
1109 the_lnet.ln_peer_tables[lp->lp_cpt]->pt_peer_nnids++;
1110 lnet_net_unlock(LNET_LOCK_EX);
1112 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1113 libcfs_nid2str(lp->lp_primary_nid),
1114 libcfs_nid2str(lpni->lpni_nid), flags);
1120 * Create a new peer, with nid as its primary nid.
1122 * Call with the lnet_api_mutex held.
1125 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1127 struct lnet_peer *lp;
1128 struct lnet_peer_net *lpn;
1129 struct lnet_peer_ni *lpni;
1132 LASSERT(nid != LNET_NID_ANY);
1135 * No need for the lnet_net_lock here, because the
1136 * lnet_api_mutex is held.
1138 lpni = lnet_find_peer_ni_locked(nid);
1140 /* A peer with this NID already exists. */
1141 lp = lpni->lpni_peer_net->lpn_peer;
1142 lnet_peer_ni_decref_locked(lpni);
1144 * This is an error if the peer was configured and the
1145 * primary NID differs or an attempt is made to change
1146 * the Multi-Rail flag. Otherwise the assumption is
1147 * that an existing peer is being modified.
1149 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1150 if (lp->lp_primary_nid != nid)
1152 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1156 /* Delete and recreate as a configured peer. */
1160 /* Create peer, peer_net, and peer_ni. */
1162 lp = lnet_peer_alloc(nid);
1165 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1168 lpni = lnet_peer_ni_alloc(nid);
1172 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1175 LIBCFS_FREE(lpn, sizeof(*lpn));
1177 LIBCFS_FREE(lp, sizeof(*lp));
1179 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1180 libcfs_nid2str(nid), flags, rc);
1185 * Add a NID to a peer. Call with ln_api_mutex held.
1188 * -EPERM: Non-DLC addition to a DLC-configured peer.
1189 * -EEXIST: The NID was configured by DLC for a different peer.
1190 * -ENOMEM: Out of memory.
1191 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1192 * non-multi-rail peer.
1195 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1197 struct lnet_peer_net *lpn;
1198 struct lnet_peer_ni *lpni;
1202 LASSERT(nid != LNET_NID_ANY);
1204 /* A configured peer can only be updated through configuration. */
1205 if (!(flags & LNET_PEER_CONFIGURED)) {
1206 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1213 * The MULTI_RAIL flag can be set but not cleared, because
1214 * that would leave the peer struct in an invalid state.
1216 if (flags & LNET_PEER_MULTI_RAIL) {
1217 spin_lock(&lp->lp_lock);
1218 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1219 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1220 lnet_peer_clr_non_mr_pref_nids(lp);
1222 spin_unlock(&lp->lp_lock);
1223 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1228 lpni = lnet_find_peer_ni_locked(nid);
1231 * A peer_ni already exists. This is only a problem if
1232 * it is not connected to this peer and was configured
1235 lnet_peer_ni_decref_locked(lpni);
1236 if (lpni->lpni_peer_net->lpn_peer == lp)
1238 if (lnet_peer_ni_is_configured(lpni)) {
1242 /* If this is the primary NID, destroy the peer. */
1243 if (lnet_peer_ni_is_primary(lpni)) {
1244 lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1245 lpni = lnet_peer_ni_alloc(nid);
1252 lpni = lnet_peer_ni_alloc(nid);
1260 * Get the peer_net. Check that we're not adding a second
1261 * peer_ni on a peer_net of a non-multi-rail peer.
1263 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1265 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1270 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1275 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1278 /* If the peer_ni was allocated above its peer_net pointer is NULL */
1279 if (!lpni->lpni_peer_net)
1280 LIBCFS_FREE(lpni, sizeof(*lpni));
1282 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1283 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1289 * Update the primary NID of a peer, if possible.
1291 * Call with the lnet_api_mutex held.
1294 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1296 lnet_nid_t old = lp->lp_primary_nid;
1299 if (lp->lp_primary_nid == nid)
1301 rc = lnet_peer_add_nid(lp, nid, flags);
1304 lp->lp_primary_nid = nid;
1306 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1307 libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1312 * lpni creation initiated due to traffic either sending or receiving.
1315 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1317 struct lnet_peer *lp;
1318 struct lnet_peer_net *lpn;
1319 struct lnet_peer_ni *lpni;
1323 if (nid == LNET_NID_ANY) {
1328 /* lnet_net_lock is not needed here because ln_api_lock is held */
1329 lpni = lnet_find_peer_ni_locked(nid);
1332 * We must have raced with another thread. Since we
1333 * know next to nothing about a peer_ni created by
1334 * traffic, we just assume everything is ok and
1337 lnet_peer_ni_decref_locked(lpni);
1341 /* Create peer, peer_net, and peer_ni. */
1343 lp = lnet_peer_alloc(nid);
1346 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1349 lpni = lnet_peer_ni_alloc(nid);
1352 if (pref != LNET_NID_ANY)
1353 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1355 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1358 LIBCFS_FREE(lpn, sizeof(*lpn));
1360 LIBCFS_FREE(lp, sizeof(*lp));
1362 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1367 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1369 * This API handles the following combinations:
1370 * Create a peer with its primary NI if only the prim_nid is provided
1371 * Add a NID to a peer identified by the prim_nid. The peer identified
1372 * by the prim_nid must already exist.
1373 * The peer being created may be non-MR.
1375 * The caller must hold ln_api_mutex. This prevents the peer from
1376 * being created/modified/deleted by a different thread.
1379 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1381 struct lnet_peer *lp = NULL;
1382 struct lnet_peer_ni *lpni;
1385 /* The prim_nid must always be specified */
1386 if (prim_nid == LNET_NID_ANY)
1389 flags = LNET_PEER_CONFIGURED;
1391 flags |= LNET_PEER_MULTI_RAIL;
1394 * If nid isn't specified, we must create a new peer with
1395 * prim_nid as its primary nid.
1397 if (nid == LNET_NID_ANY)
1398 return lnet_peer_add(prim_nid, flags);
1400 /* Look up the prim_nid, which must exist. */
1401 lpni = lnet_find_peer_ni_locked(prim_nid);
1404 lnet_peer_ni_decref_locked(lpni);
1405 lp = lpni->lpni_peer_net->lpn_peer;
1407 /* Peer must have been configured. */
1408 if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1409 CDEBUG(D_NET, "peer %s was not configured\n",
1410 libcfs_nid2str(prim_nid));
1414 /* Primary NID must match */
1415 if (lp->lp_primary_nid != prim_nid) {
1416 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1417 libcfs_nid2str(prim_nid),
1418 libcfs_nid2str(lp->lp_primary_nid));
1422 /* Multi-Rail flag must match. */
1423 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1424 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1425 libcfs_nid2str(prim_nid));
1429 return lnet_peer_add_nid(lp, nid, flags);
1433 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1435 * This API handles the following combinations:
1436 * Delete a NI from a peer if both prim_nid and nid are provided.
1437 * Delete a peer if only prim_nid is provided.
1438 * Delete a peer if its primary nid is provided.
1440 * The caller must hold ln_api_mutex. This prevents the peer from
1441 * being modified/deleted by a different thread.
1444 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1446 struct lnet_peer *lp;
1447 struct lnet_peer_ni *lpni;
1450 if (prim_nid == LNET_NID_ANY)
1453 lpni = lnet_find_peer_ni_locked(prim_nid);
1456 lnet_peer_ni_decref_locked(lpni);
1457 lp = lpni->lpni_peer_net->lpn_peer;
1459 if (prim_nid != lp->lp_primary_nid) {
1460 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1461 libcfs_nid2str(prim_nid),
1462 libcfs_nid2str(lp->lp_primary_nid));
1466 if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1467 return lnet_peer_del(lp);
1469 flags = LNET_PEER_CONFIGURED;
1470 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1471 flags |= LNET_PEER_MULTI_RAIL;
1473 return lnet_peer_del_nid(lp, nid, flags);
1477 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1479 struct lnet_peer_table *ptable;
1480 struct lnet_peer_net *lpn;
1482 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1484 LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1485 LASSERT(lpni->lpni_rtr_refcount == 0);
1486 LASSERT(list_empty(&lpni->lpni_txq));
1487 LASSERT(lpni->lpni_txqnob == 0);
1489 lpn = lpni->lpni_peer_net;
1490 lpni->lpni_peer_net = NULL;
1491 lpni->lpni_net = NULL;
1493 /* remove the peer ni from the zombie list */
1494 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1495 spin_lock(&ptable->pt_zombie_lock);
1496 list_del_init(&lpni->lpni_hashlist);
1497 ptable->pt_zombies--;
1498 spin_unlock(&ptable->pt_zombie_lock);
1500 if (lpni->lpni_pref_nnids > 1) {
1501 LIBCFS_FREE(lpni->lpni_pref.nids,
1502 sizeof(*lpni->lpni_pref.nids) * lpni->lpni_pref_nnids);
1504 LIBCFS_FREE(lpni, sizeof(*lpni));
1506 lnet_peer_net_decref_locked(lpn);
1509 struct lnet_peer_ni *
1510 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1512 struct lnet_peer_ni *lpni = NULL;
1515 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1516 return ERR_PTR(-ESHUTDOWN);
1519 * find if a peer_ni already exists.
1520 * If so then just return that.
1522 lpni = lnet_find_peer_ni_locked(nid);
1526 lnet_net_unlock(cpt);
1528 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1531 goto out_net_relock;
1534 lpni = lnet_find_peer_ni_locked(nid);
1544 * Get a peer_ni for the given nid, create it if necessary. Takes a
1545 * hold on the peer_ni.
1547 struct lnet_peer_ni *
1548 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1550 struct lnet_peer_ni *lpni = NULL;
1553 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1554 return ERR_PTR(-ESHUTDOWN);
1557 * find if a peer_ni already exists.
1558 * If so then just return that.
1560 lpni = lnet_find_peer_ni_locked(nid);
1566 * use the lnet_api_mutex to serialize the creation of the peer_ni
1567 * and the creation/deletion of the local ni/net. When a local ni is
1568 * created, if there exists a set of peer_nis on that network,
1569 * they need to be traversed and updated. When a local NI is
1570 * deleted, which could result in a network being deleted, then
1571 * all peer nis on that network need to be removed as well.
1573 * Creation through traffic should also be serialized with
1574 * creation through DLC.
1576 lnet_net_unlock(cpt);
1577 mutex_lock(&the_lnet.ln_api_mutex);
1579 * Shutdown is only set under the ln_api_lock, so a single
1580 * check here is sufficent.
1582 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1583 lpni = ERR_PTR(-ESHUTDOWN);
1584 goto out_mutex_unlock;
1587 rc = lnet_peer_ni_traffic_add(nid, pref);
1590 goto out_mutex_unlock;
1593 lpni = lnet_find_peer_ni_locked(nid);
1597 mutex_unlock(&the_lnet.ln_api_mutex);
1600 /* Lock has been dropped, check again for shutdown. */
1601 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1603 lnet_peer_ni_decref_locked(lpni);
1604 lpni = ERR_PTR(-ESHUTDOWN);
1615 lnet_peer_is_uptodate(struct lnet_peer *lp)
1619 spin_lock(&lp->lp_lock);
1620 if (lnet_peer_needs_push(lp)) {
1622 } else if (lp->lp_state & LNET_PEER_DISCOVERING) {
1624 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
1625 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
1629 } else if (lp->lp_state & LNET_PEER_UNDISCOVERED) {
1630 if (lnet_peer_discovery_enabled)
1637 spin_unlock(&lp->lp_lock);
1643 * Queue a peer for the attention of the discovery thread. Call with
1644 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
1645 * -EALREADY if the peer was already queued.
1647 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
1651 spin_lock(&lp->lp_lock);
1652 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
1653 lp->lp_state |= LNET_PEER_DISCOVERING;
1654 if (!(lp->lp_state & LNET_PEER_QUEUED)) {
1655 lp->lp_state |= LNET_PEER_QUEUED;
1656 spin_unlock(&lp->lp_lock);
1657 lnet_peer_addref_locked(lp);
1658 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
1659 wake_up(&the_lnet.ln_dc_waitq);
1662 spin_unlock(&lp->lp_lock);
1666 CDEBUG(D_NET, "Queue peer %s: %d\n",
1667 libcfs_nid2str(lp->lp_primary_nid), rc);
1673 * Discovery of a peer is complete. Wake all waiters on the peer.
1674 * Call with lnet_net_lock/EX held.
1676 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
1678 CDEBUG(D_NET, "Dequeue peer %s\n",
1679 libcfs_nid2str(lp->lp_primary_nid));
1681 spin_lock(&lp->lp_lock);
1682 LASSERT(lp->lp_state & LNET_PEER_QUEUED);
1683 lp->lp_state &= ~LNET_PEER_QUEUED;
1684 spin_unlock(&lp->lp_lock);
1685 list_del_init(&lp->lp_dc_list);
1686 wake_up_all(&lp->lp_dc_waitq);
1687 lnet_peer_decref_locked(lp);
1691 * Handle inbound push.
1692 * Like any event handler, called with lnet_res_lock/CPT held.
1694 void lnet_peer_push_event(struct lnet_event *ev)
1696 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1697 struct lnet_peer *lp;
1699 /* lnet_find_peer() adds a refcount */
1700 lp = lnet_find_peer(ev->source.nid);
1702 CERROR("Push Put from unknown %s (source %s)\n",
1703 libcfs_nid2str(ev->initiator.nid),
1704 libcfs_nid2str(ev->source.nid));
1708 /* Ensure peer state remains consistent while we modify it. */
1709 spin_lock(&lp->lp_lock);
1712 * If some kind of error happened the contents of the message
1713 * cannot be used. Clear the NIDS_UPTODATE and set the
1714 * PING_REQUIRED flag to trigger a ping.
1717 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1718 lp->lp_state |= LNET_PEER_PING_REQUIRED;
1719 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
1721 libcfs_nid2str(lp->lp_primary_nid),
1722 libcfs_nid2str(ev->source.nid));
1727 * A push with invalid or corrupted info. Clear the UPTODATE
1728 * flag to trigger a ping.
1730 if (lnet_ping_info_validate(&pbuf->pb_info)) {
1731 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1732 lp->lp_state |= LNET_PEER_PING_REQUIRED;
1733 CDEBUG(D_NET, "Corrupted Push from %s\n",
1734 libcfs_nid2str(lp->lp_primary_nid));
1739 * Make sure we'll allocate the correct size ping buffer when
1742 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
1743 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
1746 * A non-Multi-Rail peer is not supposed to be capable of
1749 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
1750 CERROR("Push from non-Multi-Rail peer %s dropped\n",
1751 libcfs_nid2str(lp->lp_primary_nid));
1756 * Set the MULTIRAIL flag. Complain if the peer was DLC
1757 * configured without it. This is the one place where
1758 * discovery will override DLC.
1760 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1761 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1762 CERROR("Push says %s is Multi-Rail, DLC says not\n",
1763 libcfs_nid2str(lp->lp_primary_nid));
1765 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1766 lnet_peer_clr_non_mr_pref_nids(lp);
1770 * Check for truncation of the Put message. Clear the
1771 * NIDS_UPTODATE flag and set PING_REQUIRED to trigger a ping,
1772 * and tell discovery to allocate a bigger buffer.
1774 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
1775 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
1776 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
1777 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1778 lp->lp_state |= LNET_PEER_PING_REQUIRED;
1779 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
1780 libcfs_nid2str(lp->lp_primary_nid),
1781 pbuf->pb_info.pi_nnis);
1786 * Check whether the Put data is stale. Stale data can just be
1789 if (pbuf->pb_info.pi_nnis > 1 &&
1790 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid &&
1791 LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno) {
1792 CDEBUG(D_NET, "Stale Push from %s: got %u have %u\n",
1793 libcfs_nid2str(lp->lp_primary_nid),
1794 LNET_PING_BUFFER_SEQNO(pbuf),
1800 * Check whether the Put data is new, in which case we clear
1801 * the UPTODATE flag and prepare to process it.
1803 * If the Put data is current, and the peer is UPTODATE then
1804 * we assome everything is all right and drop the data as
1807 if (LNET_PING_BUFFER_SEQNO(pbuf) > lp->lp_peer_seqno) {
1808 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
1809 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1810 } else if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
1811 CDEBUG(D_NET, "Stale Push from %s: got %u have %u\n",
1812 libcfs_nid2str(lp->lp_primary_nid),
1813 LNET_PING_BUFFER_SEQNO(pbuf),
1819 * If there is data present that hasn't been processed yet,
1820 * we'll replace it if the Put contained newer data and it
1821 * fits. We're racing with a Ping or earlier Push in this
1824 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
1825 if (LNET_PING_BUFFER_SEQNO(pbuf) >
1826 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
1827 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
1828 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
1829 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
1830 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
1831 libcfs_nid2str(lp->lp_primary_nid),
1832 LNET_PING_BUFFER_SEQNO(pbuf),
1833 LNET_PING_BUFFER_SEQNO(lp->lp_data));
1839 * Allocate a buffer to copy the data. On a failure we drop
1840 * the Push and set PING_REQUIRED to force the discovery
1841 * thread to fix the problem by pinging the peer.
1843 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
1845 lp->lp_state |= LNET_PEER_PING_REQUIRED;
1846 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
1847 libcfs_nid2str(lp->lp_primary_nid),
1848 LNET_PING_BUFFER_SEQNO(pbuf));
1853 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
1854 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
1855 lp->lp_state |= LNET_PEER_DATA_PRESENT;
1856 CDEBUG(D_NET, "Received Push %s %u\n",
1857 libcfs_nid2str(lp->lp_primary_nid),
1858 LNET_PING_BUFFER_SEQNO(pbuf));
1862 * Queue the peer for discovery, and wake the discovery thread
1863 * if the peer was already queued, because its status changed.
1865 spin_unlock(&lp->lp_lock);
1866 lnet_net_lock(LNET_LOCK_EX);
1867 if (lnet_peer_queue_for_discovery(lp))
1868 wake_up(&the_lnet.ln_dc_waitq);
1869 /* Drop refcount from lookup */
1870 lnet_peer_decref_locked(lp);
1871 lnet_net_unlock(LNET_LOCK_EX);
1875 * Clear the discovery error state, unless we're already discovering
1876 * this peer, in which case the error is current.
1878 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
1880 spin_lock(&lp->lp_lock);
1881 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
1882 lp->lp_dc_error = 0;
1883 spin_unlock(&lp->lp_lock);
1887 * Peer discovery slow path. The ln_api_mutex is held on entry, and
1888 * dropped/retaken within this function. An lnet_peer_ni is passed in
1889 * because discovery could tear down an lnet_peer.
1892 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt)
1895 struct lnet_peer *lp;
1899 lnet_net_unlock(cpt);
1900 lnet_net_lock(LNET_LOCK_EX);
1901 lp = lpni->lpni_peer_net->lpn_peer;
1902 lnet_peer_clear_discovery_error(lp);
1905 * We're willing to be interrupted. The lpni can become a
1906 * zombie if we race with DLC, so we must check for that.
1909 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
1910 if (signal_pending(current))
1912 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
1914 if (lp->lp_dc_error)
1916 if (lnet_peer_is_uptodate(lp))
1918 lnet_peer_queue_for_discovery(lp);
1919 lnet_peer_addref_locked(lp);
1920 lnet_net_unlock(LNET_LOCK_EX);
1922 finish_wait(&lp->lp_dc_waitq, &wait);
1923 lnet_net_lock(LNET_LOCK_EX);
1924 lnet_peer_decref_locked(lp);
1925 /* Peer may have changed */
1926 lp = lpni->lpni_peer_net->lpn_peer;
1928 finish_wait(&lp->lp_dc_waitq, &wait);
1930 lnet_net_unlock(LNET_LOCK_EX);
1933 if (signal_pending(current))
1935 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
1937 else if (lp->lp_dc_error)
1938 rc = lp->lp_dc_error;
1939 else if (!lnet_peer_is_uptodate(lp))
1942 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1943 (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
1944 libcfs_nid2str(lpni->lpni_nid), rc);
1949 /* Handle an incoming ack for a push. */
1951 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
1953 struct lnet_ping_buffer *pbuf;
1955 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
1956 spin_lock(&lp->lp_lock);
1957 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
1958 lp->lp_push_error = ev->status;
1960 lp->lp_state |= LNET_PEER_PUSH_FAILED;
1962 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
1963 spin_unlock(&lp->lp_lock);
1965 CDEBUG(D_NET, "peer %s ev->status %d\n",
1966 libcfs_nid2str(lp->lp_primary_nid), ev->status);
1969 /* Handle a Reply message. This is the reply to a Ping message. */
1971 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
1973 struct lnet_ping_buffer *pbuf;
1976 spin_lock(&lp->lp_lock);
1979 * If some kind of error happened the contents of message
1980 * cannot be used. Set PING_FAILED to trigger a retry.
1983 lp->lp_state |= LNET_PEER_PING_FAILED;
1984 lp->lp_ping_error = ev->status;
1985 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
1987 libcfs_nid2str(lp->lp_primary_nid),
1988 libcfs_nid2str(ev->source.nid));
1992 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
1993 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
1994 lnet_swap_pinginfo(pbuf);
1997 * A reply with invalid or corrupted info. Set PING_FAILED to
2000 rc = lnet_ping_info_validate(&pbuf->pb_info);
2002 lp->lp_state |= LNET_PEER_PING_FAILED;
2003 lp->lp_ping_error = 0;
2004 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2005 libcfs_nid2str(lp->lp_primary_nid), rc);
2010 * Update the MULTI_RAIL flag based on the reply. If the peer
2011 * was configured with DLC then the setting should match what
2012 * DLC put in. Once MULTIRAIL has been set it is not expected
2015 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2016 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2017 /* Everything's fine */
2018 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2019 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2020 libcfs_nid2str(lp->lp_primary_nid));
2022 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2023 lnet_peer_clr_non_mr_pref_nids(lp);
2024 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2025 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2026 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2027 libcfs_nid2str(lp->lp_primary_nid));
2029 CERROR("Multi-Rail state vanished from %s\n",
2030 libcfs_nid2str(lp->lp_primary_nid));
2035 * Make sure we'll allocate the correct size ping buffer when
2038 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2039 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2042 * Check for truncation of the Reply. Clear PING_SENT and set
2043 * PING_FAILED to trigger a retry.
2045 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2046 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2047 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2048 lp->lp_state |= LNET_PEER_PING_FAILED;
2049 lp->lp_ping_error = 0;
2050 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2051 libcfs_nid2str(lp->lp_primary_nid),
2052 pbuf->pb_info.pi_nnis);
2057 * Check the sequence numbers in the reply. These are only
2058 * available if the reply came from a Multi-Rail peer.
2060 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2061 pbuf->pb_info.pi_nnis > 1 &&
2062 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2063 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno) {
2064 CDEBUG(D_NET, "Stale Reply from %s: got %u have %u\n",
2065 libcfs_nid2str(lp->lp_primary_nid),
2066 LNET_PING_BUFFER_SEQNO(pbuf),
2071 if (LNET_PING_BUFFER_SEQNO(pbuf) > lp->lp_peer_seqno)
2072 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2075 /* We're happy with the state of the data in the buffer. */
2076 CDEBUG(D_NET, "peer %s data present %u\n",
2077 libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno);
2078 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2079 lnet_ping_buffer_decref(lp->lp_data);
2081 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2082 lnet_ping_buffer_addref(pbuf);
2085 lp->lp_state &= ~LNET_PEER_PING_SENT;
2086 spin_unlock(&lp->lp_lock);
2090 * Send event handling. Only matters for error cases, where we clean
2091 * up state on the peer and peer_ni that would otherwise be updated in
2092 * the REPLY event handler for a successful Ping, and the ACK event
2093 * handler for a successful Push.
2096 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2103 LASSERT(lp->lp_state & LNET_PEER_QUEUED);
2104 spin_lock(&lp->lp_lock);
2105 if (ev->msg_type == LNET_MSG_GET) {
2106 lp->lp_state &= ~LNET_PEER_PING_SENT;
2107 lp->lp_state |= LNET_PEER_PING_FAILED;
2108 lp->lp_ping_error = ev->status;
2109 } else { /* ev->msg_type == LNET_MSG_PUT */
2110 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2111 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2112 lp->lp_push_error = ev->status;
2114 spin_unlock(&lp->lp_lock);
2115 rc = LNET_REDISCOVER_PEER;
2117 CDEBUG(D_NET, "%s Send to %s: %d\n",
2118 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2119 libcfs_nid2str(ev->target.nid), rc);
2124 * Event handler for the discovery EQ.
2126 * Called with lnet_res_lock(cpt) held. The cpt is the
2127 * lnet_cpt_of_cookie() of the md handle cookie.
2129 static void lnet_discovery_event_handler(lnet_event_t *event)
2131 struct lnet_peer *lp = event->md.user_ptr;
2132 struct lnet_ping_buffer *pbuf;
2135 /* discovery needs to take another look */
2136 rc = LNET_REDISCOVER_PEER;
2138 switch (event->type) {
2139 case LNET_EVENT_ACK:
2140 lnet_discovery_event_ack(lp, event);
2142 case LNET_EVENT_REPLY:
2143 lnet_discovery_event_reply(lp, event);
2145 case LNET_EVENT_SEND:
2146 /* Only send failure triggers a retry. */
2147 rc = lnet_discovery_event_send(lp, event);
2149 case LNET_EVENT_UNLINK:
2150 /* Valid event, nothing to do here. */
2153 /* Invalid events. */
2156 lnet_net_lock(LNET_LOCK_EX);
2157 if (event->unlinked) {
2158 pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
2159 lnet_ping_buffer_decref(pbuf);
2160 lnet_peer_decref_locked(lp);
2162 if (rc == LNET_REDISCOVER_PEER) {
2163 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2164 wake_up(&the_lnet.ln_dc_waitq);
2166 lnet_net_unlock(LNET_LOCK_EX);
2170 * Build a peer from incoming data.
2172 * The NIDs in the incoming data are supposed to be structured as follows:
2175 * - other NIDs in same net
2176 * - NIDs in second net
2177 * - NIDs in third net
2179 * This due to the way the list of NIDs in the data is created.
2181 * Note that this function will mark the peer uptodate unless an
2182 * ENOMEM is encontered. All other errors are due to a conflict
2183 * between the DLC configuration and what discovery sees. We treat DLC
2184 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2185 * peer from becoming stuck in discovery.
2187 static int lnet_peer_merge_data(struct lnet_peer *lp,
2188 struct lnet_ping_buffer *pbuf)
2190 struct lnet_peer_ni *lpni;
2191 lnet_nid_t *curnis = NULL;
2192 lnet_nid_t *addnis = NULL;
2193 lnet_nid_t *delnis = NULL;
2203 flags = LNET_PEER_DISCOVERED;
2204 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2205 flags |= LNET_PEER_MULTI_RAIL;
2207 nnis = MAX(lp->lp_nnis, pbuf->pb_info.pi_nnis);
2208 LIBCFS_ALLOC(curnis, nnis * sizeof(lnet_nid_t));
2209 LIBCFS_ALLOC(addnis, nnis * sizeof(lnet_nid_t));
2210 LIBCFS_ALLOC(delnis, nnis * sizeof(lnet_nid_t));
2211 if (!curnis || !addnis || !delnis) {
2219 /* Construct the list of NIDs present in peer. */
2221 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2222 curnis[ncurnis++] = lpni->lpni_nid;
2225 * Check for NIDs in pbuf not present in curnis[].
2226 * The loop starts at 1 to skip the loopback NID.
2228 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2229 for (j = 0; j < ncurnis; j++)
2230 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2233 addnis[naddnis++] = pbuf->pb_info.pi_ni[i].ns_nid;
2236 * Check for NIDs in curnis[] not present in pbuf.
2237 * The nested loop starts at 1 to skip the loopback NID.
2239 * But never add the loopback NID to delnis[]: if it is
2240 * present in curnis[] then this peer is for this node.
2242 for (i = 0; i < ncurnis; i++) {
2243 if (LNET_NETTYP(LNET_NIDNET(curnis[i])) == LOLND)
2245 for (j = 1; j < pbuf->pb_info.pi_nnis; j++)
2246 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid)
2248 if (j == pbuf->pb_info.pi_nnis)
2249 delnis[ndelnis++] = curnis[i];
2252 for (i = 0; i < naddnis; i++) {
2253 rc = lnet_peer_add_nid(lp, addnis[i], flags);
2255 CERROR("Error adding NID %s to peer %s: %d\n",
2256 libcfs_nid2str(addnis[i]),
2257 libcfs_nid2str(lp->lp_primary_nid), rc);
2262 for (i = 0; i < ndelnis; i++) {
2263 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2265 CERROR("Error deleting NID %s from peer %s: %d\n",
2266 libcfs_nid2str(delnis[i]),
2267 libcfs_nid2str(lp->lp_primary_nid), rc);
2273 * Errors other than -ENOMEM are due to peers having been
2274 * configured with DLC. Ignore these because DLC overrides
2279 LIBCFS_FREE(curnis, nnis * sizeof(lnet_nid_t));
2280 LIBCFS_FREE(addnis, nnis * sizeof(lnet_nid_t));
2281 LIBCFS_FREE(delnis, nnis * sizeof(lnet_nid_t));
2282 lnet_ping_buffer_decref(pbuf);
2283 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2286 spin_lock(&lp->lp_lock);
2287 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2288 lp->lp_state |= LNET_PEER_PING_REQUIRED;
2289 spin_unlock(&lp->lp_lock);
2295 * The data in pbuf says lp is its primary peer, but the data was
2296 * received by a different peer. Try to update lp with the data.
2299 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2301 lnet_handle_md_t mdh;
2303 /* Queue lp for discovery, and force it on the request queue. */
2304 lnet_net_lock(LNET_LOCK_EX);
2305 if (lnet_peer_queue_for_discovery(lp))
2306 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2307 lnet_net_unlock(LNET_LOCK_EX);
2309 LNetInvalidateHandle(&mdh);
2312 * Decide whether we can move the peer to the DATA_PRESENT state.
2314 * We replace stale data for a multi-rail peer, repair PING_FAILED
2315 * status, and preempt PING_REQUIRED.
2317 * If after that we have DATA_PRESENT, we merge it into this peer.
2319 spin_lock(&lp->lp_lock);
2320 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2321 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2322 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2323 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2324 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2325 lnet_ping_buffer_decref(pbuf);
2330 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2331 lnet_ping_buffer_decref(lp->lp_data);
2333 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2335 if (lp->lp_state & LNET_PEER_PING_FAILED) {
2336 mdh = lp->lp_ping_mdh;
2337 LNetInvalidateHandle(&lp->lp_ping_mdh);
2338 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2339 lp->lp_ping_error = 0;
2341 if (lp->lp_state & LNET_PEER_PING_REQUIRED) {
2342 lp->lp_state &= ~LNET_PEER_PING_REQUIRED;
2344 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2345 spin_unlock(&lp->lp_lock);
2347 if (!LNetHandleIsInvalid(mdh))
2351 return lnet_peer_merge_data(lp, pbuf);
2353 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2358 * Update a peer using the data received.
2360 static int lnet_peer_data_present(struct lnet_peer *lp)
2361 __must_hold(&lp->lp_lock)
2363 struct lnet_ping_buffer *pbuf;
2364 struct lnet_peer_ni *lpni;
2365 lnet_nid_t nid = LNET_NID_ANY;
2371 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2372 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2373 spin_unlock(&lp->lp_lock);
2376 * Modifications of peer structures are done while holding the
2377 * ln_api_mutex. A global lock is required because we may be
2378 * modifying multiple peer structures, and a mutex greatly
2379 * simplifies memory management.
2381 * The actual changes to the data structures must also protect
2382 * against concurrent lookups, for which the lnet_net_lock in
2383 * LNET_LOCK_EX mode is used.
2385 mutex_lock(&the_lnet.ln_api_mutex);
2386 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2392 * If this peer is not on the peer list then it is being torn
2393 * down, and our reference count may be all that is keeping it
2394 * alive. Don't do any work on it.
2396 if (list_empty(&lp->lp_peer_list))
2399 flags = LNET_PEER_DISCOVERED;
2400 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2401 flags |= LNET_PEER_MULTI_RAIL;
2404 * Check whether the primary NID in the message matches the
2405 * primary NID of the peer. If it does, update the peer, if
2406 * it it does not, check whether there is already a peer with
2407 * that primary NID. If no such peer exists, try to update
2408 * the primary NID of the current peer (allowed if it was
2409 * created due to message traffic) and complete the update.
2410 * If the peer did exist, hand off the data to it.
2412 * The peer for the loopback interface is a special case: this
2413 * is the peer for the local node, and we want to set its
2414 * primary NID to the correct value here.
2416 if (pbuf->pb_info.pi_nnis > 1)
2417 nid = pbuf->pb_info.pi_ni[1].ns_nid;
2418 if (LNET_NETTYP(LNET_NIDNET(lp->lp_primary_nid)) == LOLND) {
2419 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2421 rc = lnet_peer_merge_data(lp, pbuf);
2422 } else if (lp->lp_primary_nid == nid) {
2423 rc = lnet_peer_merge_data(lp, pbuf);
2425 lpni = lnet_find_peer_ni_locked(nid);
2427 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2429 CERROR("Primary NID error %s versus %s: %d\n",
2430 libcfs_nid2str(lp->lp_primary_nid),
2431 libcfs_nid2str(nid), rc);
2433 rc = lnet_peer_merge_data(lp, pbuf);
2436 rc = lnet_peer_set_primary_data(
2437 lpni->lpni_peer_net->lpn_peer, pbuf);
2438 lnet_peer_ni_decref_locked(lpni);
2442 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2443 mutex_unlock(&the_lnet.ln_api_mutex);
2445 spin_lock(&lp->lp_lock);
2446 /* Tell discovery to re-check the peer immediately. */
2448 rc = LNET_REDISCOVER_PEER;
2453 * A ping failed. Clear the PING_FAILED state and set the
2454 * PING_REQUIRED state, to ensure a retry even if discovery is
2455 * disabled. This avoids being left with incorrect state.
2457 static int lnet_peer_ping_failed(struct lnet_peer *lp)
2458 __must_hold(&lp->lp_lock)
2460 lnet_handle_md_t mdh;
2463 mdh = lp->lp_ping_mdh;
2464 LNetInvalidateHandle(&lp->lp_ping_mdh);
2465 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2466 lp->lp_state |= LNET_PEER_PING_REQUIRED;
2467 rc = lp->lp_ping_error;
2468 lp->lp_ping_error = 0;
2469 spin_unlock(&lp->lp_lock);
2473 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2475 spin_lock(&lp->lp_lock);
2476 return rc ? rc : LNET_REDISCOVER_PEER;
2480 * Select NID to send a Ping or Push to.
2482 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
2484 struct lnet_peer_ni *lpni;
2486 /* Look for a direct-connected NID for this peer. */
2488 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2489 if (!lnet_is_peer_ni_healthy_locked(lpni))
2491 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
2496 return lpni->lpni_nid;
2498 /* Look for a routed-connected NID for this peer. */
2500 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2501 if (!lnet_is_peer_ni_healthy_locked(lpni))
2503 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
2508 return lpni->lpni_nid;
2510 return LNET_NID_ANY;
2513 /* Active side of ping. */
2514 static int lnet_peer_send_ping(struct lnet_peer *lp)
2515 __must_hold(&lp->lp_lock)
2517 lnet_md_t md = { NULL };
2518 lnet_process_id_t id;
2519 struct lnet_ping_buffer *pbuf;
2524 lp->lp_state |= LNET_PEER_PING_SENT;
2525 lp->lp_state &= ~LNET_PEER_PING_REQUIRED;
2526 spin_unlock(&lp->lp_lock);
2528 nnis = MAX(lp->lp_data_nnis, LNET_MIN_INTERFACES);
2529 pbuf = lnet_ping_buffer_alloc(nnis, GFP_NOFS);
2535 /* initialize md content */
2536 md.start = &pbuf->pb_info;
2537 md.length = LNET_PING_INFO_SIZE(nnis);
2538 md.threshold = 2; /* GET/REPLY */
2540 md.options = LNET_MD_TRUNCATE;
2542 md.eq_handle = the_lnet.ln_dc_eqh;
2544 rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_ping_mdh);
2546 lnet_ping_buffer_decref(pbuf);
2547 CERROR("Can't bind MD: %d\n", rc);
2550 cpt = lnet_net_lock_current();
2551 /* Refcount for MD. */
2552 lnet_peer_addref_locked(lp);
2553 id.pid = LNET_PID_LUSTRE;
2554 id.nid = lnet_peer_select_nid(lp);
2555 lnet_net_unlock(cpt);
2557 if (id.nid == LNET_NID_ANY) {
2559 goto fail_unlink_md;
2562 rc = LNetGet(LNET_NID_ANY, lp->lp_ping_mdh, id,
2563 LNET_RESERVED_PORTAL,
2564 LNET_PROTO_PING_MATCHBITS, 0);
2567 goto fail_unlink_md;
2569 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2571 spin_lock(&lp->lp_lock);
2575 LNetMDUnlink(lp->lp_ping_mdh);
2576 LNetInvalidateHandle(&lp->lp_ping_mdh);
2578 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2580 * The errors that get us here are considered hard errors and
2581 * cause Discovery to terminate. Se we clear PING_SENT, but do
2582 * not set either PING_FAILED or PING_REQUIRED.
2584 spin_lock(&lp->lp_lock);
2585 lp->lp_state &= ~LNET_PEER_PING_SENT;
2590 * This function exists because you cannot call LNetMDUnlink() from an
2593 static int lnet_peer_push_failed(struct lnet_peer *lp)
2594 __must_hold(&lp->lp_lock)
2596 lnet_handle_md_t mdh;
2599 mdh = lp->lp_push_mdh;
2600 LNetInvalidateHandle(&lp->lp_push_mdh);
2601 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
2602 rc = lp->lp_push_error;
2603 lp->lp_push_error = 0;
2604 spin_unlock(&lp->lp_lock);
2608 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2609 spin_lock(&lp->lp_lock);
2610 return rc ? rc : LNET_REDISCOVER_PEER;
2613 /* Active side of push. */
2614 static int lnet_peer_send_push(struct lnet_peer *lp)
2615 __must_hold(&lp->lp_lock)
2617 struct lnet_ping_buffer *pbuf;
2618 lnet_process_id_t id;
2623 LASSERT(lp->lp_state & LNET_PEER_MULTI_RAIL);
2625 lp->lp_state |= LNET_PEER_PUSH_SENT;
2626 spin_unlock(&lp->lp_lock);
2628 cpt = lnet_net_lock_current();
2629 pbuf = the_lnet.ln_ping_target;
2630 lnet_ping_buffer_addref(pbuf);
2631 lnet_net_unlock(cpt);
2633 /* Push source MD */
2634 md.start = &pbuf->pb_info;
2635 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
2636 md.threshold = 2; /* Put/Ack */
2639 md.eq_handle = the_lnet.ln_dc_eqh;
2642 rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh);
2644 lnet_ping_buffer_decref(pbuf);
2645 CERROR("Can't bind push source MD: %d\n", rc);
2648 cpt = lnet_net_lock_current();
2649 /* Refcount for MD. */
2650 lnet_peer_addref_locked(lp);
2651 id.pid = LNET_PID_LUSTRE;
2652 id.nid = lnet_peer_select_nid(lp);
2653 lnet_net_unlock(cpt);
2655 if (id.nid == LNET_NID_ANY) {
2660 rc = LNetPut(LNET_NID_ANY, lp->lp_push_mdh,
2661 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
2662 LNET_PROTO_PING_MATCHBITS, 0, 0);
2667 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2669 spin_lock(&lp->lp_lock);
2673 LNetMDUnlink(lp->lp_push_mdh);
2674 LNetInvalidateHandle(&lp->lp_push_mdh);
2676 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2678 * The errors that get us here are considered hard errors and
2679 * cause Discovery to terminate. Se we clear PUSH_SENT, but do
2680 * not set PUSH_FAILED.
2682 spin_lock(&lp->lp_lock);
2683 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2688 * An unrecoverable error was encountered during discovery.
2689 * Set error status in peer and abort discovery.
2691 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
2693 CDEBUG(D_NET, "Discovery error %s: %d\n",
2694 libcfs_nid2str(lp->lp_primary_nid), error);
2696 spin_lock(&lp->lp_lock);
2697 lp->lp_dc_error = error;
2698 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2699 lp->lp_state |= LNET_PEER_UNDISCOVERED;
2700 spin_unlock(&lp->lp_lock);
2704 * Mark the peer as undiscovered because discovery was disabled.
2706 static int lnet_peer_undiscovered(struct lnet_peer *lp)
2707 __must_hold(&lp->lp_lock)
2710 lp->lp_state &= ~(LNET_PEER_DISCOVERED | LNET_PEER_DISCOVERING);
2711 lp->lp_state |= LNET_PEER_UNDISCOVERED;
2713 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2719 * Mark the peer as discovered.
2721 static int lnet_peer_discovered(struct lnet_peer *lp)
2722 __must_hold(&lp->lp_lock)
2724 lp->lp_state |= LNET_PEER_DISCOVERED;
2725 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2727 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2733 * Wait for work to be queued or some other change that must be
2734 * attended to. Returns non-zero if the discovery thread should shut
2737 static int lnet_peer_discovery_wait_for_work(void)
2744 cpt = lnet_net_lock_current();
2746 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
2747 TASK_INTERRUPTIBLE);
2748 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
2750 if (lnet_push_target_resize_needed())
2752 if (!list_empty(&the_lnet.ln_dc_request))
2754 lnet_net_unlock(cpt);
2756 finish_wait(&the_lnet.ln_dc_waitq, &wait);
2757 cpt = lnet_net_lock_current();
2759 finish_wait(&the_lnet.ln_dc_waitq, &wait);
2761 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
2764 lnet_net_unlock(cpt);
2766 CDEBUG(D_NET, "woken: %d\n", rc);
2768 CDEBUG(D_NET, "%d\n", rc);
2773 /* The discovery thread. */
2774 static int lnet_peer_discovery(void *arg)
2776 struct lnet_peer *lp;
2779 CDEBUG(D_NET, "started\n");
2780 cfs_block_allsigs();
2783 if (lnet_peer_discovery_wait_for_work())
2786 if (lnet_push_target_resize_needed())
2787 lnet_push_target_resize();
2789 lnet_net_lock(LNET_LOCK_EX);
2790 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
2792 while (!list_empty(&the_lnet.ln_dc_request)) {
2793 lp = list_first_entry(&the_lnet.ln_dc_request,
2794 struct lnet_peer, lp_dc_list);
2795 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
2796 lnet_net_unlock(LNET_LOCK_EX);
2798 spin_lock(&lp->lp_lock);
2799 CDEBUG(D_NET, "peer %s state %#x\n",
2800 libcfs_nid2str(lp->lp_primary_nid),
2802 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2803 rc = lnet_peer_data_present(lp);
2804 else if (lp->lp_state & LNET_PEER_PING_FAILED)
2805 rc = lnet_peer_ping_failed(lp);
2806 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
2807 rc = lnet_peer_push_failed(lp);
2808 else if (lp->lp_state & LNET_PEER_PING_REQUIRED)
2809 rc = lnet_peer_send_ping(lp);
2810 else if (!lnet_peer_discovery_enabled)
2811 rc = lnet_peer_undiscovered(lp);
2812 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
2813 rc = lnet_peer_send_ping(lp);
2814 else if (lnet_peer_needs_push(lp))
2815 rc = lnet_peer_send_push(lp);
2817 rc = lnet_peer_discovered(lp);
2818 CDEBUG(D_NET, "peer %s state %#x rc %d\n",
2819 libcfs_nid2str(lp->lp_primary_nid),
2821 spin_unlock(&lp->lp_lock);
2823 lnet_net_lock(LNET_LOCK_EX);
2824 if (rc == LNET_REDISCOVER_PEER) {
2825 list_move(&lp->lp_dc_list,
2826 &the_lnet.ln_dc_request);
2828 lnet_peer_discovery_error(lp, rc);
2830 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2831 lnet_peer_discovery_complete(lp);
2832 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
2835 lnet_net_unlock(LNET_LOCK_EX);
2838 CDEBUG(D_NET, "stopping\n");
2840 * Clean up before telling lnet_peer_discovery_stop() that
2841 * we're done. Use wake_up() below to somewhat reduce the
2842 * size of the thundering herd if there are multiple threads
2843 * waiting on discovery of a single peer.
2845 LNetEQFree(the_lnet.ln_dc_eqh);
2846 LNetInvalidateHandle(&the_lnet.ln_dc_eqh);
2848 lnet_net_lock(LNET_LOCK_EX);
2849 list_for_each_entry(lp, &the_lnet.ln_dc_request, lp_dc_list) {
2850 lnet_peer_discovery_error(lp, -ESHUTDOWN);
2851 lnet_peer_discovery_complete(lp);
2853 list_for_each_entry(lp, &the_lnet.ln_dc_working, lp_dc_list) {
2854 lnet_peer_discovery_error(lp, -ESHUTDOWN);
2855 lnet_peer_discovery_complete(lp);
2857 lnet_net_unlock(LNET_LOCK_EX);
2859 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
2860 wake_up(&the_lnet.ln_dc_waitq);
2862 CDEBUG(D_NET, "stopped\n");
2867 /* ln_api_mutex is held on entry. */
2868 int lnet_peer_discovery_start(void)
2870 struct task_struct *task;
2873 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
2876 INIT_LIST_HEAD(&the_lnet.ln_dc_request);
2877 INIT_LIST_HEAD(&the_lnet.ln_dc_working);
2878 init_waitqueue_head(&the_lnet.ln_dc_waitq);
2880 rc = LNetEQAlloc(0, lnet_discovery_event_handler, &the_lnet.ln_dc_eqh);
2882 CERROR("Can't allocate discovery EQ: %d\n", rc);
2886 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
2887 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
2890 CERROR("Can't start peer discovery thread: %d\n", rc);
2892 LNetEQFree(the_lnet.ln_dc_eqh);
2893 LNetInvalidateHandle(&the_lnet.ln_dc_eqh);
2895 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
2898 CDEBUG(D_NET, "discovery start: %d\n", rc);
2903 /* ln_api_mutex is held on entry. */
2904 void lnet_peer_discovery_stop(void)
2906 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
2909 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
2910 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
2911 wake_up(&the_lnet.ln_dc_waitq);
2913 wait_event(the_lnet.ln_dc_waitq,
2914 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
2916 LASSERT(list_empty(&the_lnet.ln_dc_request));
2917 LASSERT(list_empty(&the_lnet.ln_dc_working));
2919 CDEBUG(D_NET, "discovery stopped\n");
2925 lnet_debug_peer(lnet_nid_t nid)
2927 char *aliveness = "NA";
2928 struct lnet_peer_ni *lp;
2931 cpt = lnet_cpt_of_nid(nid, NULL);
2934 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
2936 lnet_net_unlock(cpt);
2937 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
2941 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
2942 aliveness = lp->lpni_alive ? "up" : "down";
2944 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
2945 libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
2946 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
2947 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
2948 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
2950 lnet_peer_ni_decref_locked(lp);
2952 lnet_net_unlock(cpt);
2955 /* Gathering information for userspace. */
2957 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
2958 char aliveness[LNET_MAX_STR_LEN],
2959 __u32 *cpt_iter, __u32 *refcount,
2960 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
2961 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
2962 __u32 *peer_tx_qnob)
2964 struct lnet_peer_table *peer_table;
2965 struct lnet_peer_ni *lp;
2970 /* get the number of CPTs */
2971 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
2973 /* if the cpt number to be examined is >= the number of cpts in
2974 * the system then indicate that there are no more cpts to examin
2976 if (*cpt_iter >= lncpt)
2979 /* get the current table */
2980 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
2981 /* if the ptable is NULL then there are no more cpts to examine */
2982 if (peer_table == NULL)
2985 lnet_net_lock(*cpt_iter);
2987 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
2988 struct list_head *peers = &peer_table->pt_hash[j];
2990 list_for_each_entry(lp, peers, lpni_hashlist) {
2991 if (peer_index-- > 0)
2994 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
2995 if (lnet_isrouter(lp) ||
2996 lnet_peer_aliveness_enabled(lp))
2997 snprintf(aliveness, LNET_MAX_STR_LEN,
2998 lp->lpni_alive ? "up" : "down");
3000 *nid = lp->lpni_nid;
3001 *refcount = atomic_read(&lp->lpni_refcount);
3002 *ni_peer_tx_credits =
3003 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3004 *peer_tx_credits = lp->lpni_txcredits;
3005 *peer_rtr_credits = lp->lpni_rtrcredits;
3006 *peer_min_rtr_credits = lp->lpni_mintxcredits;
3007 *peer_tx_qnob = lp->lpni_txqnob;
3013 lnet_net_unlock(*cpt_iter);
3017 return found ? 0 : -ENOENT;
3020 /* ln_api_mutex is held, which keeps the peer list stable */
3021 int lnet_get_peer_info(__u32 idx, lnet_nid_t *primary_nid, lnet_nid_t *nid,
3022 bool *mr, struct lnet_peer_ni_credit_info *peer_ni_info,
3023 struct lnet_ioctl_element_stats *peer_ni_stats)
3025 struct lnet_peer_ni *lpni = NULL;
3026 struct lnet_peer_net *lpn = NULL;
3027 struct lnet_peer *lp = NULL;
3029 lpni = lnet_get_peer_ni_idx_locked(idx, &lpn, &lp);
3034 *primary_nid = lp->lp_primary_nid;
3035 *mr = lnet_peer_is_multi_rail(lp);
3036 *nid = lpni->lpni_nid;
3037 snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3038 if (lnet_isrouter(lpni) ||
3039 lnet_peer_aliveness_enabled(lpni))
3040 snprintf(peer_ni_info->cr_aliveness, LNET_MAX_STR_LEN,
3041 lpni->lpni_alive ? "up" : "down");
3043 peer_ni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3044 peer_ni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3045 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3046 peer_ni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3047 peer_ni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3048 peer_ni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3049 peer_ni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3050 peer_ni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3052 peer_ni_stats->send_count = atomic_read(&lpni->lpni_stats.send_count);
3053 peer_ni_stats->recv_count = atomic_read(&lpni->lpni_stats.recv_count);
3054 peer_ni_stats->drop_count = atomic_read(&lpni->lpni_stats.drop_count);