4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
41 #include <linux/uaccess.h>
43 #include <lnet/udsp.h>
44 #include <lnet/lib-lnet.h>
45 #include <uapi/linux/lnet/lnet-dlc.h>
47 /* Value indicating that recovery needs to re-check a peer immediately. */
48 #define LNET_REDISCOVER_PEER (1)
50 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
53 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
55 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
56 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
57 lnet_peer_ni_decref_locked(lpni);
62 lnet_peer_net_added(struct lnet_net *net)
64 struct lnet_peer_ni *lpni, *tmp;
66 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
67 lpni_on_remote_peer_ni_list) {
69 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
72 spin_lock(&lpni->lpni_lock);
73 lpni->lpni_txcredits =
74 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
75 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
76 lpni->lpni_rtrcredits =
77 lnet_peer_buffer_credits(lpni->lpni_net);
78 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
79 spin_unlock(&lpni->lpni_lock);
81 lnet_peer_remove_from_remote_list(lpni);
87 lnet_peer_tables_destroy(void)
89 struct lnet_peer_table *ptable;
90 struct list_head *hash;
94 if (!the_lnet.ln_peer_tables)
97 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
98 hash = ptable->pt_hash;
99 if (!hash) /* not intialized */
102 LASSERT(list_empty(&ptable->pt_zombie_list));
104 ptable->pt_hash = NULL;
105 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
106 LASSERT(list_empty(&hash[j]));
108 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
111 cfs_percpt_free(the_lnet.ln_peer_tables);
112 the_lnet.ln_peer_tables = NULL;
116 lnet_peer_tables_create(void)
118 struct lnet_peer_table *ptable;
119 struct list_head *hash;
123 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
125 if (the_lnet.ln_peer_tables == NULL) {
126 CERROR("Failed to allocate cpu-partition peer tables\n");
130 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
131 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
132 LNET_PEER_HASH_SIZE * sizeof(*hash));
134 CERROR("Failed to create peer hash table\n");
135 lnet_peer_tables_destroy();
139 spin_lock_init(&ptable->pt_zombie_lock);
140 INIT_LIST_HEAD(&ptable->pt_zombie_list);
142 INIT_LIST_HEAD(&ptable->pt_peer_list);
144 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
145 INIT_LIST_HEAD(&hash[j]);
146 ptable->pt_hash = hash; /* sign of initialization */
152 static struct lnet_peer_ni *
153 lnet_peer_ni_alloc(lnet_nid_t nid)
155 struct lnet_peer_ni *lpni;
156 struct lnet_net *net;
159 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
161 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
165 INIT_LIST_HEAD(&lpni->lpni_txq);
166 INIT_LIST_HEAD(&lpni->lpni_hashlist);
167 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
168 INIT_LIST_HEAD(&lpni->lpni_recovery);
169 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
170 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
171 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
172 atomic_set(&lpni->lpni_refcount, 1);
173 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
175 spin_lock_init(&lpni->lpni_lock);
177 if (lnet_peers_start_down())
178 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
180 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
181 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
182 lpni->lpni_nid = nid;
183 lpni->lpni_cpt = cpt;
184 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
186 net = lnet_get_net_locked(LNET_NIDNET(nid));
187 lpni->lpni_net = net;
189 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
190 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
191 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
192 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
195 * This peer_ni is not on a local network, so we
196 * cannot add the credits here. In case the net is
197 * added later, add the peer_ni to the remote peer ni
198 * list so it can be easily found and revisited.
200 /* FIXME: per-net implementation instead? */
201 lnet_peer_ni_addref_locked(lpni);
202 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
203 &the_lnet.ln_remote_peer_ni_list);
206 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
211 static struct lnet_peer_net *
212 lnet_peer_net_alloc(__u32 net_id)
214 struct lnet_peer_net *lpn;
216 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
220 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
221 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
222 lpn->lpn_net_id = net_id;
223 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
225 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
231 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
233 struct lnet_peer *lp;
235 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
237 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
238 LASSERT(list_empty(&lpn->lpn_peer_nis));
239 LASSERT(list_empty(&lpn->lpn_peer_nets));
241 lpn->lpn_peer = NULL;
242 LIBCFS_FREE(lpn, sizeof(*lpn));
244 lnet_peer_decref_locked(lp);
247 static struct lnet_peer *
248 lnet_peer_alloc(lnet_nid_t nid)
250 struct lnet_peer *lp;
252 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
256 INIT_LIST_HEAD(&lp->lp_rtrq);
257 INIT_LIST_HEAD(&lp->lp_routes);
258 INIT_LIST_HEAD(&lp->lp_peer_list);
259 INIT_LIST_HEAD(&lp->lp_peer_nets);
260 INIT_LIST_HEAD(&lp->lp_dc_list);
261 INIT_LIST_HEAD(&lp->lp_dc_pendq);
262 INIT_LIST_HEAD(&lp->lp_rtr_list);
263 init_waitqueue_head(&lp->lp_dc_waitq);
264 spin_lock_init(&lp->lp_lock);
265 lp->lp_primary_nid = nid;
266 lp->lp_disc_src_nid = LNET_NID_ANY;
267 if (lnet_peers_start_down())
268 lp->lp_alive = false;
273 * all peers created on a router should have health on
274 * if it's not already on.
276 if (the_lnet.ln_routing && !lnet_health_sensitivity)
277 lp->lp_health_sensitivity = 1;
280 * Turn off discovery for loopback peer. If you're creating a peer
281 * for the loopback interface then that was initiated when we
282 * attempted to send a message over the loopback. There is no need
283 * to ever use a different interface when sending messages to
286 if (nid == LNET_NID_LO_0)
287 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
290 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
298 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
300 LASSERT(atomic_read(&lp->lp_refcount) == 0);
301 LASSERT(lp->lp_rtr_refcount == 0);
302 LASSERT(list_empty(&lp->lp_peer_nets));
303 LASSERT(list_empty(&lp->lp_peer_list));
304 LASSERT(list_empty(&lp->lp_dc_list));
307 lnet_ping_buffer_decref(lp->lp_data);
310 * if there are messages still on the pending queue, then make
311 * sure to queue them on the ln_msg_resend list so they can be
312 * resent at a later point if the discovery thread is still
314 * If the discovery thread has stopped, then the wakeup will be a
315 * no-op, and it is expected the lnet_shutdown_lndnets() will
316 * eventually be called, which will traverse this list and
317 * finalize the messages on the list.
318 * We can not resend them now because we're holding the cpt lock.
319 * Releasing the lock can cause an inconsistent state
321 spin_lock(&the_lnet.ln_msg_resend_lock);
322 spin_lock(&lp->lp_lock);
323 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324 spin_unlock(&lp->lp_lock);
325 spin_unlock(&the_lnet.ln_msg_resend_lock);
326 wake_up(&the_lnet.ln_dc_waitq);
328 LIBCFS_FREE(lp, sizeof(*lp));
332 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333 * that peer_net, detach the peer_net from the peer.
335 * Call with lnet_net_lock/EX held
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
340 struct lnet_peer_table *ptable;
341 struct lnet_peer_net *lpn;
342 struct lnet_peer *lp;
345 * Belts and suspenders: gracefully handle teardown of a
346 * partially connected peer_ni.
348 lpn = lpni->lpni_peer_net;
350 list_del_init(&lpni->lpni_peer_nis);
352 * If there are no lpni's left, we detach lpn from
353 * lp_peer_nets, so it cannot be found anymore.
355 if (list_empty(&lpn->lpn_peer_nis))
356 list_del_init(&lpn->lpn_peer_nets);
358 /* Update peer NID count. */
363 * If there are no more peer nets, make the peer unfindable
364 * via the peer_tables.
366 * Otherwise, if the peer is DISCOVERED, tell discovery to
367 * take another look at it. This is a no-op if discovery for
368 * this peer did the detaching.
370 if (list_empty(&lp->lp_peer_nets)) {
371 list_del_init(&lp->lp_peer_list);
372 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
374 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375 /* Discovery isn't running, nothing to do here. */
376 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377 lnet_peer_queue_for_discovery(lp);
378 wake_up(&the_lnet.ln_dc_waitq);
380 CDEBUG(D_NET, "peer %s NID %s\n",
381 libcfs_nid2str(lp->lp_primary_nid),
382 libcfs_nid2str(lpni->lpni_nid));
385 /* called with lnet_net_lock LNET_LOCK_EX held */
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
389 struct lnet_peer_table *ptable = NULL;
391 /* don't remove a peer_ni if it's also a gateway */
392 if (lnet_isrouter(lpni) && !force) {
393 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394 libcfs_nid2str(lpni->lpni_nid));
398 lnet_peer_remove_from_remote_list(lpni);
400 /* remove peer ni from the hash list. */
401 list_del_init(&lpni->lpni_hashlist);
404 * indicate the peer is being deleted so the monitor thread can
405 * remove it from the recovery queue.
407 spin_lock(&lpni->lpni_lock);
408 lpni->lpni_state |= LNET_PEER_NI_DELETING;
409 spin_unlock(&lpni->lpni_lock);
411 /* decrement the ref count on the peer table */
412 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
415 * The peer_ni can no longer be found with a lookup. But there
416 * can be current users, so keep track of it on the zombie
417 * list until the reference count has gone to zero.
419 * The last reference may be lost in a place where the
420 * lnet_net_lock locks only a single cpt, and that cpt may not
421 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
424 spin_lock(&ptable->pt_zombie_lock);
425 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426 ptable->pt_zombies++;
427 spin_unlock(&ptable->pt_zombie_lock);
429 /* no need to keep this peer_ni on the hierarchy anymore */
430 lnet_peer_detach_peer_ni_locked(lpni);
432 /* remove hashlist reference on peer_ni */
433 lnet_peer_ni_decref_locked(lpni);
438 void lnet_peer_uninit(void)
440 struct lnet_peer_ni *lpni, *tmp;
442 lnet_net_lock(LNET_LOCK_EX);
444 /* remove all peer_nis from the remote peer and the hash list */
445 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446 lpni_on_remote_peer_ni_list)
447 lnet_peer_ni_del_locked(lpni, false);
449 lnet_peer_tables_destroy();
451 lnet_net_unlock(LNET_LOCK_EX);
455 lnet_peer_del_locked(struct lnet_peer *peer)
457 struct lnet_peer_ni *lpni = NULL, *lpni2;
460 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
462 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
463 while (lpni != NULL) {
464 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
465 rc = lnet_peer_ni_del_locked(lpni, false);
475 lnet_peer_del(struct lnet_peer *peer)
477 lnet_net_lock(LNET_LOCK_EX);
478 lnet_peer_del_locked(peer);
479 lnet_net_unlock(LNET_LOCK_EX);
485 * Delete a NID from a peer. Call with ln_api_mutex held.
488 * -EPERM: Non-DLC deletion from DLC-configured peer.
489 * -ENOENT: No lnet_peer_ni corresponding to the nid.
490 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
491 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
494 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
496 struct lnet_peer_ni *lpni;
497 lnet_nid_t primary_nid = lp->lp_primary_nid;
499 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
501 if (!(flags & LNET_PEER_CONFIGURED)) {
502 if (lp->lp_state & LNET_PEER_CONFIGURED) {
507 lpni = lnet_find_peer_ni_locked(nid);
512 lnet_peer_ni_decref_locked(lpni);
513 if (lp != lpni->lpni_peer_net->lpn_peer) {
519 * This function only allows deletion of the primary NID if it
522 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
527 lnet_net_lock(LNET_LOCK_EX);
529 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
530 struct lnet_peer_ni *lpni2;
531 /* assign the next peer_ni to be the primary */
532 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
534 lp->lp_primary_nid = lpni2->lpni_nid;
536 rc = lnet_peer_ni_del_locked(lpni, force);
538 lnet_net_unlock(LNET_LOCK_EX);
541 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
542 libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
548 lnet_peer_table_cleanup_locked(struct lnet_net *net,
549 struct lnet_peer_table *ptable)
552 struct lnet_peer_ni *next;
553 struct lnet_peer_ni *lpni;
554 struct lnet_peer *peer;
556 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
557 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
559 if (net != NULL && net != lpni->lpni_net)
562 peer = lpni->lpni_peer_net->lpn_peer;
563 if (peer->lp_primary_nid != lpni->lpni_nid) {
564 lnet_peer_ni_del_locked(lpni, false);
568 * Removing the primary NID implies removing
569 * the entire peer. Advance next beyond any
570 * peer_ni that belongs to the same peer.
572 list_for_each_entry_from(next, &ptable->pt_hash[i],
574 if (next->lpni_peer_net->lpn_peer != peer)
577 lnet_peer_del_locked(peer);
583 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
585 wait_var_event_warning(&ptable->pt_zombies,
586 ptable->pt_zombies == 0,
587 "Waiting for %d zombies on peer table\n",
592 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
593 struct lnet_peer_table *ptable)
595 struct lnet_peer_ni *lp;
596 struct lnet_peer_ni *tmp;
600 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
601 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
603 if (net != lp->lpni_net)
606 if (!lnet_isrouter(lp))
609 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
611 lnet_net_unlock(LNET_LOCK_EX);
612 lnet_del_route(LNET_NET_ANY, gw_nid);
613 lnet_net_lock(LNET_LOCK_EX);
619 lnet_peer_tables_cleanup(struct lnet_net *net)
622 struct lnet_peer_table *ptable;
624 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
625 /* If just deleting the peers for a NI, get rid of any routes these
626 * peers are gateways for. */
627 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
628 lnet_net_lock(LNET_LOCK_EX);
629 lnet_peer_table_del_rtrs_locked(net, ptable);
630 lnet_net_unlock(LNET_LOCK_EX);
633 /* Start the cleanup process */
634 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
635 lnet_net_lock(LNET_LOCK_EX);
636 lnet_peer_table_cleanup_locked(net, ptable);
637 lnet_net_unlock(LNET_LOCK_EX);
640 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
641 lnet_peer_ni_finalize_wait(ptable);
644 static struct lnet_peer_ni *
645 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
647 struct list_head *peers;
648 struct lnet_peer_ni *lp;
650 if (the_lnet.ln_state != LNET_STATE_RUNNING)
653 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
654 list_for_each_entry(lp, peers, lpni_hashlist) {
655 if (lp->lpni_nid == nid) {
656 lnet_peer_ni_addref_locked(lp);
664 struct lnet_peer_ni *
665 lnet_find_peer_ni_locked(lnet_nid_t nid)
667 struct lnet_peer_ni *lpni;
668 struct lnet_peer_table *ptable;
671 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
673 ptable = the_lnet.ln_peer_tables[cpt];
674 lpni = lnet_get_peer_ni_locked(ptable, nid);
679 struct lnet_peer_ni *
680 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
682 struct lnet_peer_net *lpn;
683 struct lnet_peer_ni *lpni;
685 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
689 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
690 if (lpni->lpni_nid == nid)
698 lnet_find_peer(lnet_nid_t nid)
700 struct lnet_peer_ni *lpni;
701 struct lnet_peer *lp = NULL;
704 cpt = lnet_net_lock_current();
705 lpni = lnet_find_peer_ni_locked(nid);
707 lp = lpni->lpni_peer_net->lpn_peer;
708 lnet_peer_addref_locked(lp);
709 lnet_peer_ni_decref_locked(lpni);
711 lnet_net_unlock(cpt);
716 struct lnet_peer_net *
717 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
719 struct lnet_peer_net *net;
722 /* no net id provided return the first net */
723 net = list_first_entry_or_null(&lp->lp_peer_nets,
724 struct lnet_peer_net,
730 /* find the net after the one provided */
731 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
732 if (net->lpn_net_id == prev_lpn_id) {
734 * if we reached the end of the list loop to the
737 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
738 return list_first_entry_or_null(&lp->lp_peer_nets,
739 struct lnet_peer_net,
742 return list_next_entry(net, lpn_peer_nets);
749 struct lnet_peer_ni *
750 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
751 struct lnet_peer_net *peer_net,
752 struct lnet_peer_ni *prev)
754 struct lnet_peer_ni *lpni;
755 struct lnet_peer_net *net = peer_net;
759 if (list_empty(&peer->lp_peer_nets))
762 net = list_entry(peer->lp_peer_nets.next,
763 struct lnet_peer_net,
766 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
772 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
774 * if you reached the end of the peer ni list and the peer
775 * net is specified then there are no more peer nis in that
782 * we reached the end of this net ni list. move to the
785 if (prev->lpni_peer_net->lpn_peer_nets.next ==
787 /* no more nets and no more NIs. */
790 /* get the next net */
791 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
792 struct lnet_peer_net,
794 /* get the ni on it */
795 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
801 /* there are more nis left */
802 lpni = list_entry(prev->lpni_peer_nis.next,
803 struct lnet_peer_ni, lpni_peer_nis);
808 /* Call with the ln_api_mutex held */
809 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
811 struct lnet_process_id id;
812 struct lnet_peer_table *ptable;
813 struct lnet_peer *lp;
822 if (the_lnet.ln_state != LNET_STATE_RUNNING)
825 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
828 * Count the number of peers, and return E2BIG if the buffer
829 * is too small. We'll also return the desired size.
832 for (cpt = 0; cpt < lncpt; cpt++) {
833 ptable = the_lnet.ln_peer_tables[cpt];
834 count += ptable->pt_peers;
836 size = count * sizeof(*ids);
841 * Walk the peer lists and copy out the primary nids.
842 * This is safe because the peer lists are only modified
843 * while the ln_api_mutex is held. So we don't need to
844 * hold the lnet_net_lock as well, and can therefore
845 * directly call copy_to_user().
848 memset(&id, 0, sizeof(id));
849 id.pid = LNET_PID_LUSTRE;
851 for (cpt = 0; cpt < lncpt; cpt++) {
852 ptable = the_lnet.ln_peer_tables[cpt];
853 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
856 id.nid = lp->lp_primary_nid;
857 if (copy_to_user(&ids[i], &id, sizeof(id)))
870 * Start pushes to peers that need to be updated for a configuration
871 * change on this node.
874 lnet_push_update_to_peers(int force)
876 struct lnet_peer_table *ptable;
877 struct lnet_peer *lp;
881 lnet_net_lock(LNET_LOCK_EX);
882 if (lnet_peer_discovery_disabled)
884 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
885 for (cpt = 0; cpt < lncpt; cpt++) {
886 ptable = the_lnet.ln_peer_tables[cpt];
887 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
889 spin_lock(&lp->lp_lock);
890 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
891 lp->lp_state |= LNET_PEER_FORCE_PUSH;
892 spin_unlock(&lp->lp_lock);
894 if (lnet_peer_needs_push(lp))
895 lnet_peer_queue_for_discovery(lp);
898 lnet_net_unlock(LNET_LOCK_EX);
899 wake_up(&the_lnet.ln_dc_waitq);
902 /* find the NID in the preferred gateways for the remote peer
904 * false: list is not empty and NID is not preferred
905 * false: list is empty
906 * true: nid is found in the list
909 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
912 struct lnet_nid_list *ne;
914 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
915 libcfs_nid2str(lpni->lpni_nid),
916 list_empty(&lpni->lpni_rtr_pref_nids));
918 if (list_empty(&lpni->lpni_rtr_pref_nids))
921 /* iterate through all the preferred NIDs and see if any of them
922 * matches the provided gw_nid
924 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
925 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
926 libcfs_nid2str(ne->nl_nid),
927 libcfs_nid2str(gw_nid));
928 if (ne->nl_nid == gw_nid)
936 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
938 struct list_head zombies;
939 struct lnet_nid_list *ne;
940 struct lnet_nid_list *tmp;
941 int cpt = lpni->lpni_cpt;
943 INIT_LIST_HEAD(&zombies);
946 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
947 lnet_net_unlock(cpt);
949 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
950 list_del(&ne->nl_list);
951 LIBCFS_FREE(ne, sizeof(*ne));
956 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
959 int cpt = lpni->lpni_cpt;
960 struct lnet_nid_list *ne = NULL;
962 /* This function is called with api_mutex held. When the api_mutex
963 * is held the list can not be modified, as it is only modified as
964 * a result of applying a UDSP and that happens under api_mutex
967 __must_hold(&the_lnet.ln_api_mutex);
969 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
970 if (ne->nl_nid == gw_nid)
974 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
980 /* Lock the cpt to protect against addition and checks in the
981 * selection algorithm
984 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
985 lnet_net_unlock(cpt);
991 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
992 * this is a preferred point-to-point path. Call with lnet_net_lock in
996 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
998 struct lnet_nid_list *ne;
1000 if (lpni->lpni_pref_nnids == 0)
1002 if (lpni->lpni_pref_nnids == 1)
1003 return lpni->lpni_pref.nid == nid;
1004 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1005 if (ne->nl_nid == nid)
1012 * Set a single ni as preferred, provided no preferred ni is already
1013 * defined. Only to be used for non-multi-rail peer_ni.
1016 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1020 spin_lock(&lpni->lpni_lock);
1021 if (nid == LNET_NID_ANY) {
1023 } else if (lpni->lpni_pref_nnids > 0) {
1025 } else if (lpni->lpni_pref_nnids == 0) {
1026 lpni->lpni_pref.nid = nid;
1027 lpni->lpni_pref_nnids = 1;
1028 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1030 spin_unlock(&lpni->lpni_lock);
1032 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1033 libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1038 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1039 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1042 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1046 spin_lock(&lpni->lpni_lock);
1047 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1048 lpni->lpni_pref_nnids = 0;
1049 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1050 } else if (lpni->lpni_pref_nnids == 0) {
1055 spin_unlock(&lpni->lpni_lock);
1057 CDEBUG(D_NET, "peer %s: %d\n",
1058 libcfs_nid2str(lpni->lpni_nid), rc);
1063 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1065 lpni->lpni_sel_priority = priority;
1069 * Clear the preferred NIDs from a non-multi-rail peer.
1072 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1074 struct lnet_peer_ni *lpni = NULL;
1076 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1077 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1081 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1083 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1084 struct lnet_nid_list *ne1 = NULL;
1085 struct lnet_nid_list *ne2 = NULL;
1086 lnet_nid_t tmp_nid = LNET_NID_ANY;
1089 if (nid == LNET_NID_ANY) {
1094 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1099 /* A non-MR node may have only one preferred NI per peer_ni */
1100 if (lpni->lpni_pref_nnids > 0 &&
1101 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1106 /* add the new preferred nid to the list of preferred nids */
1107 if (lpni->lpni_pref_nnids != 0) {
1108 size_t alloc_size = sizeof(*ne1);
1110 if (lpni->lpni_pref_nnids == 1) {
1111 tmp_nid = lpni->lpni_pref.nid;
1112 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1115 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1116 if (ne1->nl_nid == nid) {
1122 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1129 /* move the originally stored nid to the list */
1130 if (lpni->lpni_pref_nnids == 1) {
1131 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1132 lpni->lpni_cpt, alloc_size);
1137 INIT_LIST_HEAD(&ne2->nl_list);
1138 ne2->nl_nid = tmp_nid;
1143 lnet_net_lock(LNET_LOCK_EX);
1144 spin_lock(&lpni->lpni_lock);
1145 if (lpni->lpni_pref_nnids == 0) {
1146 lpni->lpni_pref.nid = nid;
1149 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1150 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1152 lpni->lpni_pref_nnids++;
1153 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1154 spin_unlock(&lpni->lpni_lock);
1155 lnet_net_unlock(LNET_LOCK_EX);
1158 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1159 spin_lock(&lpni->lpni_lock);
1160 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1161 spin_unlock(&lpni->lpni_lock);
1163 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1164 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1169 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1171 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1172 struct lnet_nid_list *ne = NULL;
1175 if (lpni->lpni_pref_nnids == 0) {
1180 if (lpni->lpni_pref_nnids == 1) {
1181 if (lpni->lpni_pref.nid != nid) {
1186 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1187 if (ne->nl_nid == nid)
1188 goto remove_nid_entry;
1196 lnet_net_lock(LNET_LOCK_EX);
1197 spin_lock(&lpni->lpni_lock);
1198 if (lpni->lpni_pref_nnids == 1)
1199 lpni->lpni_pref.nid = LNET_NID_ANY;
1201 list_del_init(&ne->nl_list);
1202 if (lpni->lpni_pref_nnids == 2) {
1203 struct lnet_nid_list *ne, *tmp;
1205 list_for_each_entry_safe(ne, tmp,
1206 &lpni->lpni_pref.nids,
1208 lpni->lpni_pref.nid = ne->nl_nid;
1209 list_del_init(&ne->nl_list);
1210 LIBCFS_FREE(ne, sizeof(*ne));
1214 lpni->lpni_pref_nnids--;
1215 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1216 spin_unlock(&lpni->lpni_lock);
1217 lnet_net_unlock(LNET_LOCK_EX);
1220 LIBCFS_FREE(ne, sizeof(*ne));
1222 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1223 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1228 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1230 struct list_head zombies;
1231 struct lnet_nid_list *ne;
1232 struct lnet_nid_list *tmp;
1234 INIT_LIST_HEAD(&zombies);
1236 lnet_net_lock(LNET_LOCK_EX);
1237 if (lpni->lpni_pref_nnids == 1)
1238 lpni->lpni_pref.nid = LNET_NID_ANY;
1239 else if (lpni->lpni_pref_nnids > 1)
1240 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1241 lpni->lpni_pref_nnids = 0;
1242 lnet_net_unlock(LNET_LOCK_EX);
1244 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1245 list_del_init(&ne->nl_list);
1246 LIBCFS_FREE(ne, sizeof(*ne));
1251 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1253 struct lnet_peer_ni *lpni;
1254 lnet_nid_t primary_nid = nid;
1256 lpni = lnet_find_peer_ni_locked(nid);
1258 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1259 lnet_peer_ni_decref_locked(lpni);
1266 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1267 __must_hold(&lp->lp_lock)
1269 if (lnet_peer_discovery_disabled)
1272 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1273 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1284 lnet_is_discovery_disabled(struct lnet_peer *lp)
1288 spin_lock(&lp->lp_lock);
1289 rc = lnet_is_discovery_disabled_locked(lp);
1290 spin_unlock(&lp->lp_lock);
1296 LNetPrimaryNID(lnet_nid_t nid)
1298 struct lnet_peer *lp;
1299 struct lnet_peer_ni *lpni;
1300 lnet_nid_t primary_nid = nid;
1304 if (nid == LNET_NID_LO_0)
1305 return LNET_NID_LO_0;
1307 cpt = lnet_net_lock_current();
1308 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1313 lp = lpni->lpni_peer_net->lpn_peer;
1315 while (!lnet_peer_is_uptodate(lp)) {
1316 spin_lock(&lp->lp_lock);
1317 /* force a full discovery cycle */
1318 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1319 spin_unlock(&lp->lp_lock);
1321 rc = lnet_discover_peer_locked(lpni, cpt, true);
1324 lp = lpni->lpni_peer_net->lpn_peer;
1326 /* Only try once if discovery is disabled */
1327 if (lnet_is_discovery_disabled(lp))
1330 primary_nid = lp->lp_primary_nid;
1332 lnet_peer_ni_decref_locked(lpni);
1334 lnet_net_unlock(cpt);
1336 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1337 libcfs_nid2str(primary_nid), rc);
1340 EXPORT_SYMBOL(LNetPrimaryNID);
1342 struct lnet_peer_net *
1343 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1345 struct lnet_peer_net *peer_net;
1346 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1347 if (peer_net->lpn_net_id == net_id)
1354 * Attach a peer_ni to a peer_net and peer. This function assumes
1355 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1356 * may be attached to a different peer, in which case it will be
1357 * properly detached first. The whole operation is done atomically.
1359 * This function consumes the reference on lpni and Always returns 0.
1360 * This is the last function called from functions that do return an
1361 * int, so returning 0 here allows the compiler to do a tail call.
1364 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1365 struct lnet_peer_net *lpn,
1366 struct lnet_peer_ni *lpni,
1369 struct lnet_peer_table *ptable;
1370 bool new_lpn = false;
1373 /* Install the new peer_ni */
1374 lnet_net_lock(LNET_LOCK_EX);
1375 /* Add peer_ni to global peer table hash, if necessary. */
1376 if (list_empty(&lpni->lpni_hashlist)) {
1377 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1379 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1380 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1381 ptable->pt_version++;
1382 lnet_peer_ni_addref_locked(lpni);
1385 /* Detach the peer_ni from an existing peer, if necessary. */
1386 if (lpni->lpni_peer_net) {
1387 LASSERT(lpni->lpni_peer_net != lpn);
1388 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1389 lnet_peer_detach_peer_ni_locked(lpni);
1390 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1391 lpni->lpni_peer_net = NULL;
1394 /* Add peer_ni to peer_net */
1395 lpni->lpni_peer_net = lpn;
1396 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1397 lnet_update_peer_net_healthv(lpni);
1398 lnet_peer_net_addref_locked(lpn);
1400 /* Add peer_net to peer */
1401 if (!lpn->lpn_peer) {
1404 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1405 lnet_peer_addref_locked(lp);
1408 /* Add peer to global peer list, if necessary */
1409 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1410 if (list_empty(&lp->lp_peer_list)) {
1411 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1416 /* Update peer state */
1417 spin_lock(&lp->lp_lock);
1418 if (flags & LNET_PEER_CONFIGURED) {
1419 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1420 lp->lp_state |= LNET_PEER_CONFIGURED;
1422 if (flags & LNET_PEER_MULTI_RAIL) {
1423 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1424 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1425 lnet_peer_clr_non_mr_pref_nids(lp);
1428 spin_unlock(&lp->lp_lock);
1434 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1436 CERROR("Failed to apply UDSPs on lpn %s\n",
1437 libcfs_net2str(lpn->lpn_net_id));
1439 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1441 CERROR("Failed to apply UDSPs on lpni %s\n",
1442 libcfs_nid2str(lpni->lpni_nid));
1444 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1445 libcfs_nid2str(lp->lp_primary_nid),
1446 libcfs_nid2str(lpni->lpni_nid), flags);
1447 lnet_peer_ni_decref_locked(lpni);
1448 lnet_net_unlock(LNET_LOCK_EX);
1454 * Create a new peer, with nid as its primary nid.
1456 * Call with the lnet_api_mutex held.
1459 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1461 struct lnet_peer *lp;
1462 struct lnet_peer_net *lpn;
1463 struct lnet_peer_ni *lpni;
1466 LASSERT(nid != LNET_NID_ANY);
1469 * No need for the lnet_net_lock here, because the
1470 * lnet_api_mutex is held.
1472 lpni = lnet_find_peer_ni_locked(nid);
1474 /* A peer with this NID already exists. */
1475 lp = lpni->lpni_peer_net->lpn_peer;
1476 lnet_peer_ni_decref_locked(lpni);
1478 * This is an error if the peer was configured and the
1479 * primary NID differs or an attempt is made to change
1480 * the Multi-Rail flag. Otherwise the assumption is
1481 * that an existing peer is being modified.
1483 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1484 if (lp->lp_primary_nid != nid)
1486 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1490 /* Delete and recreate as a configured peer. */
1494 /* Create peer, peer_net, and peer_ni. */
1496 lp = lnet_peer_alloc(nid);
1499 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1502 lpni = lnet_peer_ni_alloc(nid);
1506 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1509 LIBCFS_FREE(lpn, sizeof(*lpn));
1511 LIBCFS_FREE(lp, sizeof(*lp));
1513 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1514 libcfs_nid2str(nid), flags, rc);
1519 * Add a NID to a peer. Call with ln_api_mutex held.
1522 * -EPERM: Non-DLC addition to a DLC-configured peer.
1523 * -EEXIST: The NID was configured by DLC for a different peer.
1524 * -ENOMEM: Out of memory.
1525 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1526 * non-multi-rail peer.
1529 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1531 struct lnet_peer_net *lpn;
1532 struct lnet_peer_ni *lpni;
1536 LASSERT(nid != LNET_NID_ANY);
1538 /* A configured peer can only be updated through configuration. */
1539 if (!(flags & LNET_PEER_CONFIGURED)) {
1540 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1547 * The MULTI_RAIL flag can be set but not cleared, because
1548 * that would leave the peer struct in an invalid state.
1550 if (flags & LNET_PEER_MULTI_RAIL) {
1551 spin_lock(&lp->lp_lock);
1552 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1553 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1554 lnet_peer_clr_non_mr_pref_nids(lp);
1556 spin_unlock(&lp->lp_lock);
1557 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1562 lpni = lnet_find_peer_ni_locked(nid);
1565 * A peer_ni already exists. This is only a problem if
1566 * it is not connected to this peer and was configured
1569 if (lpni->lpni_peer_net->lpn_peer == lp)
1571 if (lnet_peer_ni_is_configured(lpni)) {
1575 /* If this is the primary NID, destroy the peer. */
1576 if (lnet_peer_ni_is_primary(lpni)) {
1577 struct lnet_peer *rtr_lp =
1578 lpni->lpni_peer_net->lpn_peer;
1579 int rtr_refcount = rtr_lp->lp_rtr_refcount;
1581 * if we're trying to delete a router it means
1582 * we're moving this peer NI to a new peer so must
1583 * transfer router properties to the new peer
1585 if (rtr_refcount > 0) {
1586 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1587 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1589 lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1590 lnet_peer_ni_decref_locked(lpni);
1591 lpni = lnet_peer_ni_alloc(nid);
1598 lpni = lnet_peer_ni_alloc(nid);
1606 * Get the peer_net. Check that we're not adding a second
1607 * peer_ni on a peer_net of a non-multi-rail peer.
1609 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1611 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1616 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1621 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1624 lnet_peer_ni_decref_locked(lpni);
1626 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1627 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1633 * Update the primary NID of a peer, if possible.
1635 * Call with the lnet_api_mutex held.
1638 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1640 lnet_nid_t old = lp->lp_primary_nid;
1643 if (lp->lp_primary_nid == nid)
1645 rc = lnet_peer_add_nid(lp, nid, flags);
1648 lp->lp_primary_nid = nid;
1650 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1651 libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1656 * lpni creation initiated due to traffic either sending or receiving.
1659 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1661 struct lnet_peer *lp;
1662 struct lnet_peer_net *lpn;
1663 struct lnet_peer_ni *lpni;
1667 if (nid == LNET_NID_ANY) {
1672 /* lnet_net_lock is not needed here because ln_api_lock is held */
1673 lpni = lnet_find_peer_ni_locked(nid);
1676 * We must have raced with another thread. Since we
1677 * know next to nothing about a peer_ni created by
1678 * traffic, we just assume everything is ok and
1681 lnet_peer_ni_decref_locked(lpni);
1685 /* Create peer, peer_net, and peer_ni. */
1687 lp = lnet_peer_alloc(nid);
1690 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1693 lpni = lnet_peer_ni_alloc(nid);
1696 if (pref != LNET_NID_ANY)
1697 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1699 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1702 LIBCFS_FREE(lpn, sizeof(*lpn));
1704 LIBCFS_FREE(lp, sizeof(*lp));
1706 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1711 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1713 * This API handles the following combinations:
1714 * Create a peer with its primary NI if only the prim_nid is provided
1715 * Add a NID to a peer identified by the prim_nid. The peer identified
1716 * by the prim_nid must already exist.
1717 * The peer being created may be non-MR.
1719 * The caller must hold ln_api_mutex. This prevents the peer from
1720 * being created/modified/deleted by a different thread.
1723 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1725 struct lnet_peer *lp = NULL;
1726 struct lnet_peer_ni *lpni;
1729 /* The prim_nid must always be specified */
1730 if (prim_nid == LNET_NID_ANY)
1733 flags = LNET_PEER_CONFIGURED;
1735 flags |= LNET_PEER_MULTI_RAIL;
1738 * If nid isn't specified, we must create a new peer with
1739 * prim_nid as its primary nid.
1741 if (nid == LNET_NID_ANY)
1742 return lnet_peer_add(prim_nid, flags);
1744 /* Look up the prim_nid, which must exist. */
1745 lpni = lnet_find_peer_ni_locked(prim_nid);
1748 lnet_peer_ni_decref_locked(lpni);
1749 lp = lpni->lpni_peer_net->lpn_peer;
1751 /* Peer must have been configured. */
1752 if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1753 CDEBUG(D_NET, "peer %s was not configured\n",
1754 libcfs_nid2str(prim_nid));
1758 /* Primary NID must match */
1759 if (lp->lp_primary_nid != prim_nid) {
1760 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1761 libcfs_nid2str(prim_nid),
1762 libcfs_nid2str(lp->lp_primary_nid));
1766 /* Multi-Rail flag must match. */
1767 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1768 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1769 libcfs_nid2str(prim_nid));
1773 return lnet_peer_add_nid(lp, nid, flags);
1777 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1779 * This API handles the following combinations:
1780 * Delete a NI from a peer if both prim_nid and nid are provided.
1781 * Delete a peer if only prim_nid is provided.
1782 * Delete a peer if its primary nid is provided.
1784 * The caller must hold ln_api_mutex. This prevents the peer from
1785 * being modified/deleted by a different thread.
1788 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1790 struct lnet_peer *lp;
1791 struct lnet_peer_ni *lpni;
1794 if (prim_nid == LNET_NID_ANY)
1797 lpni = lnet_find_peer_ni_locked(prim_nid);
1800 lnet_peer_ni_decref_locked(lpni);
1801 lp = lpni->lpni_peer_net->lpn_peer;
1803 if (prim_nid != lp->lp_primary_nid) {
1804 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1805 libcfs_nid2str(prim_nid),
1806 libcfs_nid2str(lp->lp_primary_nid));
1810 lnet_net_lock(LNET_LOCK_EX);
1811 if (lp->lp_rtr_refcount > 0) {
1812 lnet_net_unlock(LNET_LOCK_EX);
1813 CERROR("%s is a router. Can not be deleted\n",
1814 libcfs_nid2str(prim_nid));
1817 lnet_net_unlock(LNET_LOCK_EX);
1819 if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1820 return lnet_peer_del(lp);
1822 flags = LNET_PEER_CONFIGURED;
1823 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1824 flags |= LNET_PEER_MULTI_RAIL;
1826 return lnet_peer_del_nid(lp, nid, flags);
1830 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1832 struct lnet_peer_table *ptable;
1833 struct lnet_peer_net *lpn;
1835 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1837 LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1838 LASSERT(list_empty(&lpni->lpni_txq));
1839 LASSERT(lpni->lpni_txqnob == 0);
1840 LASSERT(list_empty(&lpni->lpni_peer_nis));
1841 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1843 lpn = lpni->lpni_peer_net;
1844 lpni->lpni_peer_net = NULL;
1845 lpni->lpni_net = NULL;
1847 if (!list_empty(&lpni->lpni_hashlist)) {
1848 /* remove the peer ni from the zombie list */
1849 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1850 spin_lock(&ptable->pt_zombie_lock);
1851 list_del_init(&lpni->lpni_hashlist);
1852 ptable->pt_zombies--;
1853 spin_unlock(&ptable->pt_zombie_lock);
1856 if (lpni->lpni_pref_nnids > 1) {
1857 struct lnet_nid_list *ne, *tmp;
1859 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1861 list_del_init(&ne->nl_list);
1862 LIBCFS_FREE(ne, sizeof(*ne));
1865 LIBCFS_FREE(lpni, sizeof(*lpni));
1868 lnet_peer_net_decref_locked(lpn);
1871 struct lnet_peer_ni *
1872 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1874 struct lnet_peer_ni *lpni = NULL;
1877 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1878 return ERR_PTR(-ESHUTDOWN);
1881 * find if a peer_ni already exists.
1882 * If so then just return that.
1884 lpni = lnet_find_peer_ni_locked(nid);
1888 lnet_net_unlock(cpt);
1890 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1893 goto out_net_relock;
1896 lpni = lnet_find_peer_ni_locked(nid);
1906 * Get a peer_ni for the given nid, create it if necessary. Takes a
1907 * hold on the peer_ni.
1909 struct lnet_peer_ni *
1910 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1912 struct lnet_peer_ni *lpni = NULL;
1915 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1916 return ERR_PTR(-ESHUTDOWN);
1919 * find if a peer_ni already exists.
1920 * If so then just return that.
1922 lpni = lnet_find_peer_ni_locked(nid);
1928 * use the lnet_api_mutex to serialize the creation of the peer_ni
1929 * and the creation/deletion of the local ni/net. When a local ni is
1930 * created, if there exists a set of peer_nis on that network,
1931 * they need to be traversed and updated. When a local NI is
1932 * deleted, which could result in a network being deleted, then
1933 * all peer nis on that network need to be removed as well.
1935 * Creation through traffic should also be serialized with
1936 * creation through DLC.
1938 lnet_net_unlock(cpt);
1939 mutex_lock(&the_lnet.ln_api_mutex);
1941 * Shutdown is only set under the ln_api_lock, so a single
1942 * check here is sufficent.
1944 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1945 lpni = ERR_PTR(-ESHUTDOWN);
1946 goto out_mutex_unlock;
1949 rc = lnet_peer_ni_traffic_add(nid, pref);
1952 goto out_mutex_unlock;
1955 lpni = lnet_find_peer_ni_locked(nid);
1959 mutex_unlock(&the_lnet.ln_api_mutex);
1962 /* Lock has been dropped, check again for shutdown. */
1963 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1965 lnet_peer_ni_decref_locked(lpni);
1966 lpni = ERR_PTR(-ESHUTDOWN);
1973 lnet_peer_gw_discovery(struct lnet_peer *lp)
1977 spin_lock(&lp->lp_lock);
1978 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
1980 spin_unlock(&lp->lp_lock);
1986 lnet_peer_is_uptodate(struct lnet_peer *lp)
1990 spin_lock(&lp->lp_lock);
1991 rc = lnet_peer_is_uptodate_locked(lp);
1992 spin_unlock(&lp->lp_lock);
1997 * Is a peer uptodate from the point of view of discovery?
1999 * If it is currently being processed, obviously not.
2000 * A forced Ping or Push is also handled by the discovery thread.
2002 * Otherwise look at whether the peer needs rediscovering.
2005 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2006 __must_hold(&lp->lp_lock)
2010 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2011 LNET_PEER_FORCE_PING |
2012 LNET_PEER_FORCE_PUSH)) {
2014 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2016 } else if (lnet_peer_needs_push(lp)) {
2018 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2019 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2031 * Queue a peer for the attention of the discovery thread. Call with
2032 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2033 * -EALREADY if the peer was already queued.
2035 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2039 spin_lock(&lp->lp_lock);
2040 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2041 lp->lp_state |= LNET_PEER_DISCOVERING;
2042 spin_unlock(&lp->lp_lock);
2043 if (list_empty(&lp->lp_dc_list)) {
2044 lnet_peer_addref_locked(lp);
2045 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2046 wake_up(&the_lnet.ln_dc_waitq);
2052 CDEBUG(D_NET, "Queue peer %s: %d\n",
2053 libcfs_nid2str(lp->lp_primary_nid), rc);
2059 * Discovery of a peer is complete. Wake all waiters on the peer.
2060 * Call with lnet_net_lock/EX held.
2062 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2064 struct lnet_msg *msg, *tmp;
2066 LIST_HEAD(pending_msgs);
2068 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2069 libcfs_nid2str(lp->lp_primary_nid));
2071 list_del_init(&lp->lp_dc_list);
2072 spin_lock(&lp->lp_lock);
2073 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2074 spin_unlock(&lp->lp_lock);
2075 wake_up_all(&lp->lp_dc_waitq);
2077 if (lp->lp_rtr_refcount > 0)
2078 lnet_router_discovery_complete(lp);
2080 lnet_net_unlock(LNET_LOCK_EX);
2082 /* iterate through all pending messages and send them again */
2083 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2084 list_del_init(&msg->msg_list);
2085 if (lp->lp_dc_error) {
2086 lnet_finalize(msg, lp->lp_dc_error);
2090 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2091 lnet_msgtyp2str(msg->msg_type),
2092 libcfs_id2str(msg->msg_target));
2093 rc = lnet_send(msg->msg_src_nid_param, msg,
2094 msg->msg_rtr_nid_param);
2096 CNETERR("Error sending %s to %s: %d\n",
2097 lnet_msgtyp2str(msg->msg_type),
2098 libcfs_id2str(msg->msg_target), rc);
2099 lnet_finalize(msg, rc);
2102 lnet_net_lock(LNET_LOCK_EX);
2103 lnet_peer_decref_locked(lp);
2107 * Handle inbound push.
2108 * Like any event handler, called with lnet_res_lock/CPT held.
2110 void lnet_peer_push_event(struct lnet_event *ev)
2112 struct lnet_ping_buffer *pbuf;
2113 struct lnet_peer *lp;
2115 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2117 /* lnet_find_peer() adds a refcount */
2118 lp = lnet_find_peer(ev->source.nid);
2120 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2121 libcfs_nid2str(ev->initiator.nid),
2122 libcfs_nid2str(ev->source.nid));
2123 pbuf->pb_needs_post = true;
2127 /* Ensure peer state remains consistent while we modify it. */
2128 spin_lock(&lp->lp_lock);
2131 * If some kind of error happened the contents of the message
2132 * cannot be used. Clear the NIDS_UPTODATE and set the
2133 * FORCE_PING flag to trigger a ping.
2136 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2137 lp->lp_state |= LNET_PEER_FORCE_PING;
2138 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2140 libcfs_nid2str(lp->lp_primary_nid),
2141 libcfs_nid2str(ev->source.nid));
2146 * A push with invalid or corrupted info. Clear the UPTODATE
2147 * flag to trigger a ping.
2149 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2150 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2151 lp->lp_state |= LNET_PEER_FORCE_PING;
2152 CDEBUG(D_NET, "Corrupted Push from %s\n",
2153 libcfs_nid2str(lp->lp_primary_nid));
2158 * Make sure we'll allocate the correct size ping buffer when
2161 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2162 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2165 * A non-Multi-Rail peer is not supposed to be capable of
2168 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2169 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2170 libcfs_nid2str(lp->lp_primary_nid));
2175 * The peer may have discovery disabled at its end. Set
2176 * NO_DISCOVERY as appropriate.
2178 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2179 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2180 libcfs_nid2str(lp->lp_primary_nid));
2182 * Mark the peer for deletion if we already know about it
2183 * and it's going from discovery set to no discovery set
2185 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2186 LNET_PEER_DISCOVERING)) &&
2187 lp->lp_state & LNET_PEER_DISCOVERED) {
2188 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2189 libcfs_nid2str(lp->lp_primary_nid),
2191 lp->lp_state |= LNET_PEER_MARK_DELETION;
2193 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2194 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2195 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2196 libcfs_nid2str(lp->lp_primary_nid));
2197 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2201 * Update the MULTI_RAIL flag based on the push. If the peer
2202 * was configured with DLC then the setting should match what
2204 * NB: We verified above that the MR feature bit is set in pi_features
2206 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2207 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2208 libcfs_nid2str(lp->lp_primary_nid), lp);
2209 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2210 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2211 libcfs_nid2str(lp->lp_primary_nid));
2212 } else if (lnet_peer_discovery_disabled) {
2213 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2214 libcfs_nid2str(lp->lp_primary_nid), lp);
2215 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2216 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2217 libcfs_nid2str(lp->lp_primary_nid), lp);
2219 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2220 libcfs_nid2str(lp->lp_primary_nid), lp);
2221 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2222 lnet_peer_clr_non_mr_pref_nids(lp);
2226 * Check for truncation of the Put message. Clear the
2227 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2228 * and tell discovery to allocate a bigger buffer.
2230 if (ev->mlength < ev->rlength) {
2231 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2232 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2233 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2234 lp->lp_state |= LNET_PEER_FORCE_PING;
2235 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2236 libcfs_nid2str(lp->lp_primary_nid),
2237 pbuf->pb_info.pi_nnis);
2241 /* always assume new data */
2242 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2243 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2246 * If there is data present that hasn't been processed yet,
2247 * we'll replace it if the Put contained newer data and it
2248 * fits. We're racing with a Ping or earlier Push in this
2251 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2252 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2253 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2254 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2255 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2256 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2257 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2258 libcfs_nid2str(lp->lp_primary_nid),
2259 LNET_PING_BUFFER_SEQNO(pbuf),
2260 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2266 * Allocate a buffer to copy the data. On a failure we drop
2267 * the Push and set FORCE_PING to force the discovery
2268 * thread to fix the problem by pinging the peer.
2270 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2272 lp->lp_state |= LNET_PEER_FORCE_PING;
2273 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2274 libcfs_nid2str(lp->lp_primary_nid),
2275 LNET_PING_BUFFER_SEQNO(pbuf));
2280 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2281 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2282 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2283 CDEBUG(D_NET, "Received Push %s %u\n",
2284 libcfs_nid2str(lp->lp_primary_nid),
2285 LNET_PING_BUFFER_SEQNO(pbuf));
2288 /* We've processed this buffer. It can be reposted */
2289 pbuf->pb_needs_post = true;
2292 * Queue the peer for discovery if not done, force it on the request
2293 * queue and wake the discovery thread if the peer was already queued,
2294 * because its status changed.
2296 spin_unlock(&lp->lp_lock);
2297 lnet_net_lock(LNET_LOCK_EX);
2298 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2299 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2300 wake_up(&the_lnet.ln_dc_waitq);
2302 /* Drop refcount from lookup */
2303 lnet_peer_decref_locked(lp);
2304 lnet_net_unlock(LNET_LOCK_EX);
2308 * Clear the discovery error state, unless we're already discovering
2309 * this peer, in which case the error is current.
2311 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2313 spin_lock(&lp->lp_lock);
2314 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2315 lp->lp_dc_error = 0;
2316 spin_unlock(&lp->lp_lock);
2320 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2321 * dropped/retaken within this function. An lnet_peer_ni is passed in
2322 * because discovery could tear down an lnet_peer.
2325 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2328 struct lnet_peer *lp;
2333 lnet_net_unlock(cpt);
2334 lnet_net_lock(LNET_LOCK_EX);
2335 lp = lpni->lpni_peer_net->lpn_peer;
2336 lnet_peer_clear_discovery_error(lp);
2339 * We're willing to be interrupted. The lpni can become a
2340 * zombie if we race with DLC, so we must check for that.
2343 /* Keep lp alive when the lnet_net_lock is unlocked */
2344 lnet_peer_addref_locked(lp);
2345 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2346 if (signal_pending(current))
2348 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2351 * Don't repeat discovery if discovery is disabled. This is
2352 * done to ensure we can use discovery as a standard ping as
2353 * well for backwards compatibility with routers which do not
2354 * have discovery or have discovery disabled
2356 if (lnet_is_discovery_disabled(lp) && count > 0)
2358 if (lp->lp_dc_error)
2360 if (lnet_peer_is_uptodate(lp))
2362 lnet_peer_queue_for_discovery(lp);
2364 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2367 * If caller requested a non-blocking operation then
2368 * return immediately. Once discovery is complete any
2369 * pending messages that were stopped due to discovery
2370 * will be transmitted.
2375 lnet_net_unlock(LNET_LOCK_EX);
2377 finish_wait(&lp->lp_dc_waitq, &wait);
2378 lnet_net_lock(LNET_LOCK_EX);
2379 lnet_peer_decref_locked(lp);
2380 /* Peer may have changed */
2381 lp = lpni->lpni_peer_net->lpn_peer;
2383 finish_wait(&lp->lp_dc_waitq, &wait);
2385 lnet_net_unlock(LNET_LOCK_EX);
2387 lnet_peer_decref_locked(lp);
2389 * The peer may have changed, so re-check and rediscover if that turns
2390 * out to have been the case. The reference count on lp ensured that
2391 * even if it was unlinked from lpni the memory could not be recycled.
2392 * Thus the check below is sufficient to determine whether the peer
2393 * changed. If the peer changed, then lp must not be dereferenced.
2395 if (lp != lpni->lpni_peer_net->lpn_peer)
2398 if (signal_pending(current))
2400 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2402 else if (lp->lp_dc_error)
2403 rc = lp->lp_dc_error;
2405 CDEBUG(D_NET, "non-blocking discovery\n");
2406 else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2409 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2410 (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2411 libcfs_nid2str(lpni->lpni_nid), rc,
2412 (!block) ? "pending discovery" : "discovery complete");
2417 /* Handle an incoming ack for a push. */
2419 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2421 struct lnet_ping_buffer *pbuf;
2423 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2424 spin_lock(&lp->lp_lock);
2425 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2426 lp->lp_push_error = ev->status;
2428 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2430 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2431 spin_unlock(&lp->lp_lock);
2433 CDEBUG(D_NET, "peer %s ev->status %d\n",
2434 libcfs_nid2str(lp->lp_primary_nid), ev->status);
2437 /* Handle a Reply message. This is the reply to a Ping message. */
2439 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2441 struct lnet_ping_buffer *pbuf;
2444 spin_lock(&lp->lp_lock);
2446 lp->lp_disc_src_nid = ev->target.nid;
2449 * If some kind of error happened the contents of message
2450 * cannot be used. Set PING_FAILED to trigger a retry.
2453 lp->lp_state |= LNET_PEER_PING_FAILED;
2454 lp->lp_ping_error = ev->status;
2455 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2457 libcfs_nid2str(lp->lp_primary_nid),
2458 libcfs_nid2str(ev->source.nid));
2462 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2463 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2464 lnet_swap_pinginfo(pbuf);
2467 * A reply with invalid or corrupted info. Set PING_FAILED to
2470 rc = lnet_ping_info_validate(&pbuf->pb_info);
2472 lp->lp_state |= LNET_PEER_PING_FAILED;
2473 lp->lp_ping_error = 0;
2474 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2475 libcfs_nid2str(lp->lp_primary_nid), rc);
2481 * The peer may have discovery disabled at its end. Set
2482 * NO_DISCOVERY as appropriate.
2484 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2485 !lnet_peer_discovery_disabled) {
2486 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2487 libcfs_nid2str(lp->lp_primary_nid));
2488 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2490 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2491 libcfs_nid2str(lp->lp_primary_nid));
2492 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2496 * Update the MULTI_RAIL flag based on the reply. If the peer
2497 * was configured with DLC then the setting should match what
2500 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2501 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2502 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2503 libcfs_nid2str(lp->lp_primary_nid), lp);
2504 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2505 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2506 libcfs_nid2str(lp->lp_primary_nid));
2507 } else if (lnet_peer_discovery_disabled) {
2509 "peer %s(%p) not MR: DD disabled locally\n",
2510 libcfs_nid2str(lp->lp_primary_nid), lp);
2511 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2513 "peer %s(%p) not MR: DD disabled remotely\n",
2514 libcfs_nid2str(lp->lp_primary_nid), lp);
2516 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2517 libcfs_nid2str(lp->lp_primary_nid), lp);
2518 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2519 lnet_peer_clr_non_mr_pref_nids(lp);
2521 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2522 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2523 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2524 libcfs_nid2str(lp->lp_primary_nid));
2526 CERROR("Multi-Rail state vanished from %s\n",
2527 libcfs_nid2str(lp->lp_primary_nid));
2528 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2533 * Make sure we'll allocate the correct size ping buffer when
2536 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2537 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2540 * Check for truncation of the Reply. Clear PING_SENT and set
2541 * PING_FAILED to trigger a retry.
2543 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2544 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2545 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2546 lp->lp_state |= LNET_PEER_PING_FAILED;
2547 lp->lp_ping_error = 0;
2548 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2549 libcfs_nid2str(lp->lp_primary_nid),
2550 pbuf->pb_info.pi_nnis);
2555 * Check the sequence numbers in the reply. These are only
2556 * available if the reply came from a Multi-Rail peer.
2558 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2559 pbuf->pb_info.pi_nnis > 1 &&
2560 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2561 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2562 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2563 libcfs_nid2str(lp->lp_primary_nid),
2564 LNET_PING_BUFFER_SEQNO(pbuf),
2567 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2570 /* We're happy with the state of the data in the buffer. */
2571 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2572 libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2573 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2574 lnet_ping_buffer_decref(lp->lp_data);
2576 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2577 lnet_ping_buffer_addref(pbuf);
2580 lp->lp_state &= ~LNET_PEER_PING_SENT;
2581 spin_unlock(&lp->lp_lock);
2583 lnet_net_lock(LNET_LOCK_EX);
2585 * If this peer is a gateway, call the routing callback to
2586 * handle the ping reply
2588 if (lp->lp_rtr_refcount > 0)
2589 lnet_router_discovery_ping_reply(lp);
2590 lnet_net_unlock(LNET_LOCK_EX);
2594 * Send event handling. Only matters for error cases, where we clean
2595 * up state on the peer and peer_ni that would otherwise be updated in
2596 * the REPLY event handler for a successful Ping, and the ACK event
2597 * handler for a successful Push.
2600 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2607 spin_lock(&lp->lp_lock);
2608 if (ev->msg_type == LNET_MSG_GET) {
2609 lp->lp_state &= ~LNET_PEER_PING_SENT;
2610 lp->lp_state |= LNET_PEER_PING_FAILED;
2611 lp->lp_ping_error = ev->status;
2612 } else { /* ev->msg_type == LNET_MSG_PUT */
2613 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2614 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2615 lp->lp_push_error = ev->status;
2617 spin_unlock(&lp->lp_lock);
2618 rc = LNET_REDISCOVER_PEER;
2620 CDEBUG(D_NET, "%s Send to %s: %d\n",
2621 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2622 libcfs_nid2str(ev->target.nid), rc);
2627 * Unlink event handling. This event is only seen if a call to
2628 * LNetMDUnlink() caused the event to be unlinked. If this call was
2629 * made after the event was set up in LNetGet() or LNetPut() then we
2630 * assume the Ping or Push timed out.
2633 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2635 spin_lock(&lp->lp_lock);
2636 /* We've passed through LNetGet() */
2637 if (lp->lp_state & LNET_PEER_PING_SENT) {
2638 lp->lp_state &= ~LNET_PEER_PING_SENT;
2639 lp->lp_state |= LNET_PEER_PING_FAILED;
2640 lp->lp_ping_error = -ETIMEDOUT;
2641 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2642 libcfs_nid2str(lp->lp_primary_nid));
2644 /* We've passed through LNetPut() */
2645 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2646 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2647 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2648 lp->lp_push_error = -ETIMEDOUT;
2649 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2650 libcfs_nid2str(lp->lp_primary_nid));
2652 spin_unlock(&lp->lp_lock);
2656 * Event handler for the discovery EQ.
2658 * Called with lnet_res_lock(cpt) held. The cpt is the
2659 * lnet_cpt_of_cookie() of the md handle cookie.
2661 static void lnet_discovery_event_handler(struct lnet_event *event)
2663 struct lnet_peer *lp = event->md_user_ptr;
2664 struct lnet_ping_buffer *pbuf;
2667 /* discovery needs to take another look */
2668 rc = LNET_REDISCOVER_PEER;
2670 CDEBUG(D_NET, "Received event: %d\n", event->type);
2672 switch (event->type) {
2673 case LNET_EVENT_ACK:
2674 lnet_discovery_event_ack(lp, event);
2676 case LNET_EVENT_REPLY:
2677 lnet_discovery_event_reply(lp, event);
2679 case LNET_EVENT_SEND:
2680 /* Only send failure triggers a retry. */
2681 rc = lnet_discovery_event_send(lp, event);
2683 case LNET_EVENT_UNLINK:
2684 /* LNetMDUnlink() was called */
2685 lnet_discovery_event_unlink(lp, event);
2688 /* Invalid events. */
2691 lnet_net_lock(LNET_LOCK_EX);
2692 if (event->unlinked) {
2693 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2694 lnet_ping_buffer_decref(pbuf);
2695 lnet_peer_decref_locked(lp);
2698 /* put peer back at end of request queue, if discovery not already
2700 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2701 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2702 wake_up(&the_lnet.ln_dc_waitq);
2704 lnet_net_unlock(LNET_LOCK_EX);
2708 * Build a peer from incoming data.
2710 * The NIDs in the incoming data are supposed to be structured as follows:
2713 * - other NIDs in same net
2714 * - NIDs in second net
2715 * - NIDs in third net
2717 * This due to the way the list of NIDs in the data is created.
2719 * Note that this function will mark the peer uptodate unless an
2720 * ENOMEM is encontered. All other errors are due to a conflict
2721 * between the DLC configuration and what discovery sees. We treat DLC
2722 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2723 * peer from becoming stuck in discovery.
2725 static int lnet_peer_merge_data(struct lnet_peer *lp,
2726 struct lnet_ping_buffer *pbuf)
2728 struct lnet_peer_ni *lpni;
2729 lnet_nid_t *curnis = NULL;
2730 struct lnet_ni_status *addnis = NULL;
2731 lnet_nid_t *delnis = NULL;
2741 flags = LNET_PEER_DISCOVERED;
2742 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2743 flags |= LNET_PEER_MULTI_RAIL;
2746 * Cache the routing feature for the peer; whether it is enabled
2747 * for disabled as reported by the remote peer.
2749 spin_lock(&lp->lp_lock);
2750 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2751 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2753 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2754 spin_unlock(&lp->lp_lock);
2756 nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2757 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2758 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2759 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2760 if (!curnis || !addnis || !delnis) {
2768 /* Construct the list of NIDs present in peer. */
2770 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2771 curnis[ncurnis++] = lpni->lpni_nid;
2774 * Check for NIDs in pbuf not present in curnis[].
2775 * The loop starts at 1 to skip the loopback NID.
2777 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2778 for (j = 0; j < ncurnis; j++)
2779 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2782 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2785 * Check for NIDs in curnis[] not present in pbuf.
2786 * The nested loop starts at 1 to skip the loopback NID.
2788 * But never add the loopback NID to delnis[]: if it is
2789 * present in curnis[] then this peer is for this node.
2791 for (i = 0; i < ncurnis; i++) {
2792 if (curnis[i] == LNET_NID_LO_0)
2794 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2795 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2797 * update the information we cache for the
2798 * peer with the latest information we
2801 lpni = lnet_find_peer_ni_locked(curnis[i]);
2803 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2804 lnet_peer_ni_decref_locked(lpni);
2809 if (j == pbuf->pb_info.pi_nnis)
2810 delnis[ndelnis++] = curnis[i];
2814 * If we get here and the discovery is disabled then we don't want
2815 * to add or delete any NIs. We just updated the ones we have some
2816 * information on, and call it a day
2819 if (lnet_is_discovery_disabled(lp))
2822 for (i = 0; i < naddnis; i++) {
2823 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2825 CERROR("Error adding NID %s to peer %s: %d\n",
2826 libcfs_nid2str(addnis[i].ns_nid),
2827 libcfs_nid2str(lp->lp_primary_nid), rc);
2831 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2833 lpni->lpni_ns_status = addnis[i].ns_status;
2834 lnet_peer_ni_decref_locked(lpni);
2838 for (i = 0; i < ndelnis; i++) {
2840 * for routers it's okay to delete the primary_nid because
2841 * the upper layers don't really rely on it. So if we're
2842 * being told that the router changed its primary_nid
2843 * then it's okay to delete it.
2845 if (lp->lp_rtr_refcount > 0)
2846 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2847 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2849 CERROR("Error deleting NID %s from peer %s: %d\n",
2850 libcfs_nid2str(delnis[i]),
2851 libcfs_nid2str(lp->lp_primary_nid), rc);
2857 * Errors other than -ENOMEM are due to peers having been
2858 * configured with DLC. Ignore these because DLC overrides
2863 CFS_FREE_PTR_ARRAY(curnis, nnis);
2864 CFS_FREE_PTR_ARRAY(addnis, nnis);
2865 CFS_FREE_PTR_ARRAY(delnis, nnis);
2866 lnet_ping_buffer_decref(pbuf);
2867 CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2870 spin_lock(&lp->lp_lock);
2871 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2872 lp->lp_state |= LNET_PEER_FORCE_PING;
2873 spin_unlock(&lp->lp_lock);
2879 * The data in pbuf says lp is its primary peer, but the data was
2880 * received by a different peer. Try to update lp with the data.
2883 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2885 struct lnet_handle_md mdh;
2887 /* Queue lp for discovery, and force it on the request queue. */
2888 lnet_net_lock(LNET_LOCK_EX);
2889 if (lnet_peer_queue_for_discovery(lp))
2890 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2891 lnet_net_unlock(LNET_LOCK_EX);
2893 LNetInvalidateMDHandle(&mdh);
2896 * Decide whether we can move the peer to the DATA_PRESENT state.
2898 * We replace stale data for a multi-rail peer, repair PING_FAILED
2899 * status, and preempt FORCE_PING.
2901 * If after that we have DATA_PRESENT, we merge it into this peer.
2903 spin_lock(&lp->lp_lock);
2904 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2905 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2906 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2907 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2908 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2909 lnet_ping_buffer_decref(pbuf);
2914 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2915 lnet_ping_buffer_decref(lp->lp_data);
2917 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2919 if (lp->lp_state & LNET_PEER_PING_FAILED) {
2920 mdh = lp->lp_ping_mdh;
2921 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2922 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2923 lp->lp_ping_error = 0;
2925 if (lp->lp_state & LNET_PEER_FORCE_PING)
2926 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2927 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2928 spin_unlock(&lp->lp_lock);
2930 if (!LNetMDHandleIsInvalid(mdh))
2934 return lnet_peer_merge_data(lp, pbuf);
2936 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2940 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
2944 for (i = 0; i < pinfo->pi_nnis; i++) {
2945 if (pinfo->pi_ni[i].ns_nid == nid)
2952 /* Delete a peer that has been marked for deletion. NB: when this peer was added
2953 * to the discovery queue a reference was taken that will prevent the peer from
2954 * actually being freed by this function. After this function exits the
2955 * discovery thread should call lnet_peer_discovery_complete() which will
2956 * drop that reference as well as wake any waiters that may also be holding a
2959 static int lnet_peer_deletion(struct lnet_peer *lp)
2960 __must_hold(&lp->lp_lock)
2962 struct list_head rlist;
2963 struct lnet_route *route, *tmp;
2964 int sensitivity = lp->lp_health_sensitivity;
2966 INIT_LIST_HEAD(&rlist);
2968 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
2969 LNET_PEER_FORCE_PUSH);
2970 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
2971 libcfs_nid2str(lp->lp_primary_nid), lp, lp->lp_state);
2973 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2976 spin_unlock(&lp->lp_lock);
2978 mutex_lock(&the_lnet.ln_api_mutex);
2980 lnet_net_lock(LNET_LOCK_EX);
2981 /* remove the peer from the discovery work
2982 * queue if it's on there in preparation
2985 if (!list_empty(&lp->lp_dc_list))
2986 list_del(&lp->lp_dc_list);
2987 list_for_each_entry_safe(route, tmp,
2990 lnet_move_route(route, NULL, &rlist);
2991 lnet_net_unlock(LNET_LOCK_EX);
2993 /* lnet_peer_del() deletes all the peer NIs owned by this peer */
2996 list_for_each_entry_safe(route, tmp,
2998 /* re-add these routes */
2999 lnet_add_route(route->lr_net,
3004 LIBCFS_FREE(route, sizeof(*route));
3007 mutex_unlock(&the_lnet.ln_api_mutex);
3009 spin_lock(&lp->lp_lock);
3015 * Update a peer using the data received.
3017 static int lnet_peer_data_present(struct lnet_peer *lp)
3018 __must_hold(&lp->lp_lock)
3020 struct lnet_ping_buffer *pbuf;
3021 struct lnet_peer_ni *lpni;
3022 lnet_nid_t nid = LNET_NID_ANY;
3028 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3029 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3030 spin_unlock(&lp->lp_lock);
3033 * Modifications of peer structures are done while holding the
3034 * ln_api_mutex. A global lock is required because we may be
3035 * modifying multiple peer structures, and a mutex greatly
3036 * simplifies memory management.
3038 * The actual changes to the data structures must also protect
3039 * against concurrent lookups, for which the lnet_net_lock in
3040 * LNET_LOCK_EX mode is used.
3042 mutex_lock(&the_lnet.ln_api_mutex);
3043 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3049 * If this peer is not on the peer list then it is being torn
3050 * down, and our reference count may be all that is keeping it
3051 * alive. Don't do any work on it.
3053 if (list_empty(&lp->lp_peer_list))
3056 flags = LNET_PEER_DISCOVERED;
3057 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3058 flags |= LNET_PEER_MULTI_RAIL;
3061 * Check whether the primary NID in the message matches the
3062 * primary NID of the peer. If it does, update the peer, if
3063 * it it does not, check whether there is already a peer with
3064 * that primary NID. If no such peer exists, try to update
3065 * the primary NID of the current peer (allowed if it was
3066 * created due to message traffic) and complete the update.
3067 * If the peer did exist, hand off the data to it.
3069 * The peer for the loopback interface is a special case: this
3070 * is the peer for the local node, and we want to set its
3071 * primary NID to the correct value here. Moreover, this peer
3072 * can show up with only the loopback NID in the ping buffer.
3074 if (pbuf->pb_info.pi_nnis <= 1)
3076 nid = pbuf->pb_info.pi_ni[1].ns_nid;
3077 if (lp->lp_primary_nid == LNET_NID_LO_0) {
3078 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3080 rc = lnet_peer_merge_data(lp, pbuf);
3082 * if the primary nid of the peer is present in the ping info returned
3083 * from the peer, but it's not the local primary peer we have
3084 * cached and discovery is disabled, then we don't want to update
3085 * our local peer info, by adding or removing NIDs, we just want
3086 * to update the status of the nids that we currently have
3087 * recorded in that peer.
3089 } else if (lp->lp_primary_nid == nid ||
3090 (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3091 lnet_is_discovery_disabled(lp))) {
3092 rc = lnet_peer_merge_data(lp, pbuf);
3094 lpni = lnet_find_peer_ni_locked(nid);
3096 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3098 CERROR("Primary NID error %s versus %s: %d\n",
3099 libcfs_nid2str(lp->lp_primary_nid),
3100 libcfs_nid2str(nid), rc);
3102 rc = lnet_peer_merge_data(lp, pbuf);
3105 struct lnet_peer *new_lp;
3106 new_lp = lpni->lpni_peer_net->lpn_peer;
3108 * if lp has discovery/MR enabled that means new_lp
3109 * should have discovery/MR enabled as well, since
3110 * it's the same peer, which we're about to merge
3112 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3113 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3114 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3115 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3117 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3118 lnet_consolidate_routes_locked(lp, new_lp);
3119 lnet_peer_ni_decref_locked(lpni);
3123 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3125 mutex_unlock(&the_lnet.ln_api_mutex);
3127 spin_lock(&lp->lp_lock);
3128 /* Tell discovery to re-check the peer immediately. */
3130 rc = LNET_REDISCOVER_PEER;
3135 * A ping failed. Clear the PING_FAILED state and set the
3136 * FORCE_PING state, to ensure a retry even if discovery is
3137 * disabled. This avoids being left with incorrect state.
3139 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3140 __must_hold(&lp->lp_lock)
3142 struct lnet_handle_md mdh;
3145 mdh = lp->lp_ping_mdh;
3146 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3147 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3148 lp->lp_state |= LNET_PEER_FORCE_PING;
3149 rc = lp->lp_ping_error;
3150 lp->lp_ping_error = 0;
3151 spin_unlock(&lp->lp_lock);
3153 if (!LNetMDHandleIsInvalid(mdh))
3156 CDEBUG(D_NET, "peer %s:%d\n",
3157 libcfs_nid2str(lp->lp_primary_nid), rc);
3159 spin_lock(&lp->lp_lock);
3160 return rc ? rc : LNET_REDISCOVER_PEER;
3164 * Select NID to send a Ping or Push to.
3166 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
3168 struct lnet_peer_ni *lpni;
3170 /* Look for a direct-connected NID for this peer. */
3172 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3173 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
3178 return lpni->lpni_nid;
3180 /* Look for a routed-connected NID for this peer. */
3182 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3183 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
3188 return lpni->lpni_nid;
3190 return LNET_NID_ANY;
3193 /* Active side of ping. */
3194 static int lnet_peer_send_ping(struct lnet_peer *lp)
3195 __must_hold(&lp->lp_lock)
3202 lp->lp_state |= LNET_PEER_PING_SENT;
3203 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3204 spin_unlock(&lp->lp_lock);
3206 cpt = lnet_net_lock_current();
3207 /* Refcount for MD. */
3208 lnet_peer_addref_locked(lp);
3209 pnid = lnet_peer_select_nid(lp);
3210 lnet_net_unlock(cpt);
3212 nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3214 rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
3215 the_lnet.ln_dc_handler, false);
3218 * if LNetMDBind in lnet_send_ping fails we need to decrement the
3219 * refcount on the peer, otherwise LNetMDUnlink will be called
3220 * which will eventually do that.
3224 lnet_peer_decref_locked(lp);
3225 lnet_net_unlock(cpt);
3226 rc = -rc; /* change the rc to negative value */
3228 } else if (rc < 0) {
3232 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3234 spin_lock(&lp->lp_lock);
3238 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3240 * The errors that get us here are considered hard errors and
3241 * cause Discovery to terminate. So we clear PING_SENT, but do
3242 * not set either PING_FAILED or FORCE_PING. In fact we need
3243 * to clear PING_FAILED, because the unlink event handler will
3244 * have set it if we called LNetMDUnlink() above.
3246 spin_lock(&lp->lp_lock);
3247 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3252 * This function exists because you cannot call LNetMDUnlink() from an
3255 static int lnet_peer_push_failed(struct lnet_peer *lp)
3256 __must_hold(&lp->lp_lock)
3258 struct lnet_handle_md mdh;
3261 mdh = lp->lp_push_mdh;
3262 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3263 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3264 rc = lp->lp_push_error;
3265 lp->lp_push_error = 0;
3266 spin_unlock(&lp->lp_lock);
3268 if (!LNetMDHandleIsInvalid(mdh))
3271 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3272 spin_lock(&lp->lp_lock);
3273 return rc ? rc : LNET_REDISCOVER_PEER;
3277 * Mark the peer as discovered.
3279 static int lnet_peer_discovered(struct lnet_peer *lp)
3280 __must_hold(&lp->lp_lock)
3282 lp->lp_state |= LNET_PEER_DISCOVERED;
3283 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3284 LNET_PEER_REDISCOVER);
3286 lp->lp_dc_error = 0;
3288 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3293 /* Active side of push. */
3294 static int lnet_peer_send_push(struct lnet_peer *lp)
3295 __must_hold(&lp->lp_lock)
3297 struct lnet_ping_buffer *pbuf;
3298 struct lnet_process_id id;
3303 /* Don't push to a non-multi-rail peer. */
3304 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3305 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3306 /* if peer's NIDs are uptodate then peer is discovered */
3307 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3308 rc = lnet_peer_discovered(lp);
3315 lp->lp_state |= LNET_PEER_PUSH_SENT;
3316 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3317 spin_unlock(&lp->lp_lock);
3319 cpt = lnet_net_lock_current();
3320 pbuf = the_lnet.ln_ping_target;
3321 lnet_ping_buffer_addref(pbuf);
3322 lnet_net_unlock(cpt);
3324 /* Push source MD */
3325 md.start = &pbuf->pb_info;
3326 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3327 md.threshold = 2; /* Put/Ack */
3329 md.options = LNET_MD_TRACK_RESPONSE;
3330 md.handler = the_lnet.ln_dc_handler;
3333 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3335 lnet_ping_buffer_decref(pbuf);
3336 CERROR("Can't bind push source MD: %d\n", rc);
3339 cpt = lnet_net_lock_current();
3340 /* Refcount for MD. */
3341 lnet_peer_addref_locked(lp);
3342 id.pid = LNET_PID_LUSTRE;
3343 id.nid = lnet_peer_select_nid(lp);
3344 lnet_net_unlock(cpt);
3346 if (id.nid == LNET_NID_ANY) {
3351 rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3352 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3353 LNET_PROTO_PING_MATCHBITS, 0, 0);
3356 * reset the discovery nid. There is no need to restrict sending
3357 * from that source, if we call lnet_push_update_to_peers(). It'll
3358 * get set to a specific NID, if we initiate discovery from the
3361 lp->lp_disc_src_nid = LNET_NID_ANY;
3366 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3368 spin_lock(&lp->lp_lock);
3372 LNetMDUnlink(lp->lp_push_mdh);
3373 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3375 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3377 * The errors that get us here are considered hard errors and
3378 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3379 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3380 * because the unlink event handler will have set it if we
3381 * called LNetMDUnlink() above.
3383 spin_lock(&lp->lp_lock);
3384 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3389 * An unrecoverable error was encountered during discovery.
3390 * Set error status in peer and abort discovery.
3392 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3394 CDEBUG(D_NET, "Discovery error %s: %d\n",
3395 libcfs_nid2str(lp->lp_primary_nid), error);
3397 spin_lock(&lp->lp_lock);
3398 lp->lp_dc_error = error;
3399 lp->lp_state &= ~LNET_PEER_DISCOVERING;
3400 lp->lp_state |= LNET_PEER_REDISCOVER;
3401 spin_unlock(&lp->lp_lock);
3405 * Discovering this peer is taking too long. Cancel any Ping or Push
3406 * that discovery is waiting on by unlinking the relevant MDs. The
3407 * lnet_discovery_event_handler() will proceed from here and complete
3410 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3412 struct lnet_handle_md ping_mdh;
3413 struct lnet_handle_md push_mdh;
3415 LNetInvalidateMDHandle(&ping_mdh);
3416 LNetInvalidateMDHandle(&push_mdh);
3418 spin_lock(&lp->lp_lock);
3419 if (lp->lp_state & LNET_PEER_PING_SENT) {
3420 ping_mdh = lp->lp_ping_mdh;
3421 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3423 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3424 push_mdh = lp->lp_push_mdh;
3425 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3427 spin_unlock(&lp->lp_lock);
3429 if (!LNetMDHandleIsInvalid(ping_mdh))
3430 LNetMDUnlink(ping_mdh);
3431 if (!LNetMDHandleIsInvalid(push_mdh))
3432 LNetMDUnlink(push_mdh);
3436 * Wait for work to be queued or some other change that must be
3437 * attended to. Returns non-zero if the discovery thread should shut
3440 static int lnet_peer_discovery_wait_for_work(void)
3447 cpt = lnet_net_lock_current();
3449 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3450 TASK_INTERRUPTIBLE);
3451 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3453 if (lnet_push_target_resize_needed() ||
3454 the_lnet.ln_push_target->pb_needs_post)
3456 if (!list_empty(&the_lnet.ln_dc_request))
3458 if (!list_empty(&the_lnet.ln_msg_resend))
3460 lnet_net_unlock(cpt);
3463 * wakeup max every second to check if there are peers that
3464 * have been stuck on the working queue for greater than
3467 schedule_timeout(cfs_time_seconds(1));
3468 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3469 cpt = lnet_net_lock_current();
3471 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3473 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3476 lnet_net_unlock(cpt);
3478 CDEBUG(D_NET, "woken: %d\n", rc);
3484 * Messages that were pending on a destroyed peer will be put on a global
3485 * resend list. The message resend list will be checked by
3486 * the discovery thread when it wakes up, and will resend messages. These
3487 * messages can still be sendable in the case the lpni which was the initial
3488 * cause of the message re-queue was transfered to another peer.
3490 * It is possible that LNet could be shutdown while we're iterating
3491 * through the list. lnet_shudown_lndnets() will attempt to access the
3492 * resend list, but will have to wait until the spinlock is released, by
3493 * which time there shouldn't be any more messages on the resend list.
3494 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3495 * for the messages so they can be released. The other case is that
3496 * lnet_shudown_lndnets() can finalize all the messages before this
3497 * function can visit the resend list, in which case this function will be
3500 static void lnet_resend_msgs(void)
3502 struct lnet_msg *msg, *tmp;
3506 spin_lock(&the_lnet.ln_msg_resend_lock);
3507 list_splice(&the_lnet.ln_msg_resend, &resend);
3508 spin_unlock(&the_lnet.ln_msg_resend_lock);
3510 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3511 list_del_init(&msg->msg_list);
3512 rc = lnet_send(msg->msg_src_nid_param, msg,
3513 msg->msg_rtr_nid_param);
3515 CNETERR("Error sending %s to %s: %d\n",
3516 lnet_msgtyp2str(msg->msg_type),
3517 libcfs_id2str(msg->msg_target), rc);
3518 lnet_finalize(msg, rc);
3523 /* The discovery thread. */
3524 static int lnet_peer_discovery(void *arg)
3526 struct lnet_peer *lp;
3529 wait_for_completion(&the_lnet.ln_started);
3531 CDEBUG(D_NET, "started\n");
3534 if (lnet_peer_discovery_wait_for_work())
3537 if (lnet_push_target_resize_needed())
3538 lnet_push_target_resize();
3539 else if (the_lnet.ln_push_target->pb_needs_post)
3540 lnet_push_target_post(the_lnet.ln_push_target,
3541 &the_lnet.ln_push_target_md);
3545 lnet_net_lock(LNET_LOCK_EX);
3546 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3547 lnet_net_unlock(LNET_LOCK_EX);
3552 * Process all incoming discovery work requests. When
3553 * discovery must wait on a peer to change state, it
3554 * is added to the tail of the ln_dc_working queue. A
3555 * timestamp keeps track of when the peer was added,
3556 * so we can time out discovery requests that take too
3559 while (!list_empty(&the_lnet.ln_dc_request)) {
3560 lp = list_first_entry(&the_lnet.ln_dc_request,
3561 struct lnet_peer, lp_dc_list);
3562 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3564 * set the time the peer was put on the dc_working
3565 * queue. It shouldn't remain on the queue
3566 * forever, in case the GET message (for ping)
3567 * doesn't get a REPLY or the PUT message (for
3568 * push) doesn't get an ACK.
3570 lp->lp_last_queued = ktime_get_real_seconds();
3571 lnet_net_unlock(LNET_LOCK_EX);
3573 if (lnet_push_target_resize_needed())
3574 lnet_push_target_resize();
3575 else if (the_lnet.ln_push_target->pb_needs_post)
3576 lnet_push_target_post(the_lnet.ln_push_target,
3577 &the_lnet.ln_push_target_md);
3580 * Select an action depending on the state of
3581 * the peer and whether discovery is disabled.
3582 * The check whether discovery is disabled is
3583 * done after the code that handles processing
3584 * for arrived data, cleanup for failures, and
3585 * forcing a Ping or Push.
3587 spin_lock(&lp->lp_lock);
3588 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3589 libcfs_nid2str(lp->lp_primary_nid), lp,
3591 if (lp->lp_state & LNET_PEER_MARK_DELETION)
3592 rc = lnet_peer_deletion(lp);
3593 else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3594 rc = lnet_peer_data_present(lp);
3595 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3596 rc = lnet_peer_ping_failed(lp);
3597 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3598 rc = lnet_peer_push_failed(lp);
3599 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3600 rc = lnet_peer_send_ping(lp);
3601 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3602 rc = lnet_peer_send_push(lp);
3603 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3604 rc = lnet_peer_send_ping(lp);
3605 else if (lnet_peer_needs_push(lp))
3606 rc = lnet_peer_send_push(lp);
3608 rc = lnet_peer_discovered(lp);
3609 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3610 libcfs_nid2str(lp->lp_primary_nid), lp,
3612 spin_unlock(&lp->lp_lock);
3614 lnet_net_lock(LNET_LOCK_EX);
3615 if (rc == LNET_REDISCOVER_PEER) {
3616 list_move(&lp->lp_dc_list,
3617 &the_lnet.ln_dc_request);
3619 lnet_peer_discovery_error(lp, rc);
3621 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3622 lnet_peer_discovery_complete(lp);
3623 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3628 lnet_net_unlock(LNET_LOCK_EX);
3631 CDEBUG(D_NET, "stopping\n");
3633 * Clean up before telling lnet_peer_discovery_stop() that
3634 * we're done. Use wake_up() below to somewhat reduce the
3635 * size of the thundering herd if there are multiple threads
3636 * waiting on discovery of a single peer.
3639 /* Queue cleanup 1: stop all pending pings and pushes. */
3640 lnet_net_lock(LNET_LOCK_EX);
3641 while (!list_empty(&the_lnet.ln_dc_working)) {
3642 lp = list_first_entry(&the_lnet.ln_dc_working,
3643 struct lnet_peer, lp_dc_list);
3644 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3645 lnet_net_unlock(LNET_LOCK_EX);
3646 lnet_peer_cancel_discovery(lp);
3647 lnet_net_lock(LNET_LOCK_EX);
3649 lnet_net_unlock(LNET_LOCK_EX);
3651 /* Queue cleanup 2: wait for the expired queue to clear. */
3652 while (!list_empty(&the_lnet.ln_dc_expired))
3653 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3655 /* Queue cleanup 3: clear the request queue. */
3656 lnet_net_lock(LNET_LOCK_EX);
3657 while (!list_empty(&the_lnet.ln_dc_request)) {
3658 lp = list_first_entry(&the_lnet.ln_dc_request,
3659 struct lnet_peer, lp_dc_list);
3660 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3661 lnet_peer_discovery_complete(lp);
3663 lnet_net_unlock(LNET_LOCK_EX);
3665 lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3666 the_lnet.ln_dc_handler = NULL;
3668 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3669 wake_up(&the_lnet.ln_dc_waitq);
3671 CDEBUG(D_NET, "stopped\n");
3676 /* ln_api_mutex is held on entry. */
3677 int lnet_peer_discovery_start(void)
3679 struct task_struct *task;
3682 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3685 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3686 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3687 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3690 CERROR("Can't start peer discovery thread: %d\n", rc);
3692 the_lnet.ln_dc_handler = NULL;
3694 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3697 CDEBUG(D_NET, "discovery start: %d\n", rc);
3702 /* ln_api_mutex is held on entry. */
3703 void lnet_peer_discovery_stop(void)
3705 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3708 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3709 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3711 /* In the LNetNIInit() path we may be stopping discovery before it
3712 * entered its work loop
3714 if (!completion_done(&the_lnet.ln_started))
3715 complete(&the_lnet.ln_started);
3717 wake_up(&the_lnet.ln_dc_waitq);
3719 wait_event(the_lnet.ln_dc_waitq,
3720 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3722 LASSERT(list_empty(&the_lnet.ln_dc_request));
3723 LASSERT(list_empty(&the_lnet.ln_dc_working));
3724 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3726 CDEBUG(D_NET, "discovery stopped\n");
3732 lnet_debug_peer(lnet_nid_t nid)
3734 char *aliveness = "NA";
3735 struct lnet_peer_ni *lp;
3738 cpt = lnet_cpt_of_nid(nid, NULL);
3741 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3743 lnet_net_unlock(cpt);
3744 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3748 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3749 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3751 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3752 libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3753 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3754 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3755 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3757 lnet_peer_ni_decref_locked(lp);
3759 lnet_net_unlock(cpt);
3762 /* Gathering information for userspace. */
3764 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3765 char aliveness[LNET_MAX_STR_LEN],
3766 __u32 *cpt_iter, __u32 *refcount,
3767 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3768 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3769 __u32 *peer_tx_qnob)
3771 struct lnet_peer_table *peer_table;
3772 struct lnet_peer_ni *lp;
3777 /* get the number of CPTs */
3778 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3780 /* if the cpt number to be examined is >= the number of cpts in
3781 * the system then indicate that there are no more cpts to examin
3783 if (*cpt_iter >= lncpt)
3786 /* get the current table */
3787 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3788 /* if the ptable is NULL then there are no more cpts to examine */
3789 if (peer_table == NULL)
3792 lnet_net_lock(*cpt_iter);
3794 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3795 struct list_head *peers = &peer_table->pt_hash[j];
3797 list_for_each_entry(lp, peers, lpni_hashlist) {
3798 if (peer_index-- > 0)
3801 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3802 if (lnet_isrouter(lp) ||
3803 lnet_peer_aliveness_enabled(lp))
3804 snprintf(aliveness, LNET_MAX_STR_LEN,
3805 lnet_is_peer_ni_alive(lp) ? "up" : "down");
3807 *nid = lp->lpni_nid;
3808 *refcount = atomic_read(&lp->lpni_refcount);
3809 *ni_peer_tx_credits =
3810 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3811 *peer_tx_credits = lp->lpni_txcredits;
3812 *peer_rtr_credits = lp->lpni_rtrcredits;
3813 *peer_min_rtr_credits = lp->lpni_mintxcredits;
3814 *peer_tx_qnob = lp->lpni_txqnob;
3820 lnet_net_unlock(*cpt_iter);
3824 return found ? 0 : -ENOENT;
3827 /* ln_api_mutex is held, which keeps the peer list stable */
3828 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3830 struct lnet_ioctl_element_stats *lpni_stats;
3831 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3832 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3833 struct lnet_peer_ni_credit_info *lpni_info;
3834 struct lnet_peer_ni *lpni;
3835 struct lnet_peer *lp;
3840 lp = lnet_find_peer(cfg->prcfg_prim_nid);
3847 size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3848 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3849 size *= lp->lp_nnis;
3850 if (size > cfg->prcfg_size) {
3851 cfg->prcfg_size = size;
3856 cfg->prcfg_prim_nid = lp->lp_primary_nid;
3857 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3858 cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3859 cfg->prcfg_count = lp->lp_nnis;
3860 cfg->prcfg_size = size;
3861 cfg->prcfg_state = lp->lp_state;
3863 /* Allocate helper buffers. */
3865 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3868 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3871 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3872 if (!lpni_msg_stats)
3873 goto out_free_stats;
3874 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3876 goto out_free_msg_stats;
3881 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3882 nid = lpni->lpni_nid;
3883 if (copy_to_user(bulk, &nid, sizeof(nid)))
3884 goto out_free_hstats;
3885 bulk += sizeof(nid);
3887 memset(lpni_info, 0, sizeof(*lpni_info));
3888 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3889 if (lnet_isrouter(lpni) ||
3890 lnet_peer_aliveness_enabled(lpni))
3891 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3892 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3894 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3895 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3896 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3897 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3898 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3899 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3900 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3901 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3902 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3903 goto out_free_hstats;
3904 bulk += sizeof(*lpni_info);
3906 memset(lpni_stats, 0, sizeof(*lpni_stats));
3907 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3908 LNET_STATS_TYPE_SEND);
3909 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3910 LNET_STATS_TYPE_RECV);
3911 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3912 LNET_STATS_TYPE_DROP);
3913 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3914 goto out_free_hstats;
3915 bulk += sizeof(*lpni_stats);
3916 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3917 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3918 goto out_free_hstats;
3919 bulk += sizeof(*lpni_msg_stats);
3920 lpni_hstats->hlpni_network_timeout =
3921 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3922 lpni_hstats->hlpni_remote_dropped =
3923 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3924 lpni_hstats->hlpni_remote_timeout =
3925 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3926 lpni_hstats->hlpni_remote_error =
3927 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3928 lpni_hstats->hlpni_health_value =
3929 atomic_read(&lpni->lpni_healthv);
3930 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3931 goto out_free_hstats;
3932 bulk += sizeof(*lpni_hstats);
3937 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3939 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3941 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3943 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3945 lnet_peer_decref_locked(lp);
3951 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3953 /* the mt could've shutdown and cleaned up the queues */
3954 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3957 if (list_empty(&lpni->lpni_recovery) &&
3958 atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3959 CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
3960 libcfs_nid2str(lpni->lpni_nid),
3961 atomic_read(&lpni->lpni_healthv));
3962 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3963 lnet_peer_ni_addref_locked(lpni);
3967 /* Call with the ln_api_mutex held */
3969 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3971 struct lnet_peer_table *ptable;
3972 struct lnet_peer *lp;
3973 struct lnet_peer_net *lpn;
3974 struct lnet_peer_ni *lpni;
3978 if (the_lnet.ln_state != LNET_STATE_RUNNING)
3982 lnet_net_lock(LNET_LOCK_EX);
3983 lpni = lnet_find_peer_ni_locked(nid);
3985 lnet_net_unlock(LNET_LOCK_EX);
3988 atomic_set(&lpni->lpni_healthv, value);
3989 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3990 lnet_peer_ni_decref_locked(lpni);
3991 lnet_net_unlock(LNET_LOCK_EX);
3995 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3998 * Walk all the peers and reset the healhv for each one to the
4001 lnet_net_lock(LNET_LOCK_EX);
4002 for (cpt = 0; cpt < lncpt; cpt++) {
4003 ptable = the_lnet.ln_peer_tables[cpt];
4004 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4005 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4006 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4008 atomic_set(&lpni->lpni_healthv, value);
4009 lnet_peer_ni_add_to_recoveryq_locked(lpni);
4014 lnet_net_unlock(LNET_LOCK_EX);