4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
41 #include <linux/uaccess.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
158 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
164 INIT_LIST_HEAD(&lpni->lpni_txq);
165 INIT_LIST_HEAD(&lpni->lpni_hashlist);
166 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167 INIT_LIST_HEAD(&lpni->lpni_recovery);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171 atomic_set(&lpni->lpni_refcount, 1);
172 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
174 spin_lock_init(&lpni->lpni_lock);
176 if (lnet_peers_start_down())
177 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
179 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181 lpni->lpni_nid = nid;
182 lpni->lpni_cpt = cpt;
183 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
185 net = lnet_get_net_locked(LNET_NIDNET(nid));
186 lpni->lpni_net = net;
188 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
194 * This peer_ni is not on a local network, so we
195 * cannot add the credits here. In case the net is
196 * added later, add the peer_ni to the remote peer ni
197 * list so it can be easily found and revisited.
199 /* FIXME: per-net implementation instead? */
200 lnet_peer_ni_addref_locked(lpni);
201 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202 &the_lnet.ln_remote_peer_ni_list);
205 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
213 struct lnet_peer_net *lpn;
215 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
219 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221 lpn->lpn_net_id = net_id;
222 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
224 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
232 struct lnet_peer *lp;
234 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
236 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237 LASSERT(list_empty(&lpn->lpn_peer_nis));
238 LASSERT(list_empty(&lpn->lpn_peer_nets));
240 lpn->lpn_peer = NULL;
241 LIBCFS_FREE(lpn, sizeof(*lpn));
243 lnet_peer_decref_locked(lp);
246 static struct lnet_peer *
247 lnet_peer_alloc(lnet_nid_t nid)
249 struct lnet_peer *lp;
251 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
255 INIT_LIST_HEAD(&lp->lp_rtrq);
256 INIT_LIST_HEAD(&lp->lp_routes);
257 INIT_LIST_HEAD(&lp->lp_peer_list);
258 INIT_LIST_HEAD(&lp->lp_peer_nets);
259 INIT_LIST_HEAD(&lp->lp_dc_list);
260 INIT_LIST_HEAD(&lp->lp_dc_pendq);
261 INIT_LIST_HEAD(&lp->lp_rtr_list);
262 init_waitqueue_head(&lp->lp_dc_waitq);
263 spin_lock_init(&lp->lp_lock);
264 lp->lp_primary_nid = nid;
265 lp->lp_disc_src_nid = LNET_NID_ANY;
266 if (lnet_peers_start_down())
267 lp->lp_alive = false;
272 * all peers created on a router should have health on
273 * if it's not already on.
275 if (the_lnet.ln_routing && !lnet_health_sensitivity)
276 lp->lp_health_sensitivity = 1;
279 * Turn off discovery for loopback peer. If you're creating a peer
280 * for the loopback interface then that was initiated when we
281 * attempted to send a message over the loopback. There is no need
282 * to ever use a different interface when sending messages to
285 if (nid == LNET_NID_LO_0)
286 lp->lp_state = LNET_PEER_NO_DISCOVERY;
287 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
295 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
299 LASSERT(atomic_read(&lp->lp_refcount) == 0);
300 LASSERT(lp->lp_rtr_refcount == 0);
301 LASSERT(list_empty(&lp->lp_peer_nets));
302 LASSERT(list_empty(&lp->lp_peer_list));
303 LASSERT(list_empty(&lp->lp_dc_list));
306 lnet_ping_buffer_decref(lp->lp_data);
309 * if there are messages still on the pending queue, then make
310 * sure to queue them on the ln_msg_resend list so they can be
311 * resent at a later point if the discovery thread is still
313 * If the discovery thread has stopped, then the wakeup will be a
314 * no-op, and it is expected the lnet_shutdown_lndnets() will
315 * eventually be called, which will traverse this list and
316 * finalize the messages on the list.
317 * We can not resend them now because we're holding the cpt lock.
318 * Releasing the lock can cause an inconsistent state
320 spin_lock(&the_lnet.ln_msg_resend_lock);
321 spin_lock(&lp->lp_lock);
322 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
323 spin_unlock(&lp->lp_lock);
324 spin_unlock(&the_lnet.ln_msg_resend_lock);
325 wake_up(&the_lnet.ln_dc_waitq);
327 LIBCFS_FREE(lp, sizeof(*lp));
331 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
332 * that peer_net, detach the peer_net from the peer.
334 * Call with lnet_net_lock/EX held
337 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 struct lnet_peer_table *ptable;
340 struct lnet_peer_net *lpn;
341 struct lnet_peer *lp;
344 * Belts and suspenders: gracefully handle teardown of a
345 * partially connected peer_ni.
347 lpn = lpni->lpni_peer_net;
349 list_del_init(&lpni->lpni_peer_nis);
351 * If there are no lpni's left, we detach lpn from
352 * lp_peer_nets, so it cannot be found anymore.
354 if (list_empty(&lpn->lpn_peer_nis))
355 list_del_init(&lpn->lpn_peer_nets);
357 /* Update peer NID count. */
362 * If there are no more peer nets, make the peer unfindable
363 * via the peer_tables.
365 * Otherwise, if the peer is DISCOVERED, tell discovery to
366 * take another look at it. This is a no-op if discovery for
367 * this peer did the detaching.
369 if (list_empty(&lp->lp_peer_nets)) {
370 list_del_init(&lp->lp_peer_list);
371 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
374 /* Discovery isn't running, nothing to do here. */
375 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
376 lnet_peer_queue_for_discovery(lp);
377 wake_up(&the_lnet.ln_dc_waitq);
379 CDEBUG(D_NET, "peer %s NID %s\n",
380 libcfs_nid2str(lp->lp_primary_nid),
381 libcfs_nid2str(lpni->lpni_nid));
384 /* called with lnet_net_lock LNET_LOCK_EX held */
386 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 struct lnet_peer_table *ptable = NULL;
390 /* don't remove a peer_ni if it's also a gateway */
391 if (lnet_isrouter(lpni) && !force) {
392 CERROR("Peer NI %s is a gateway. Can not delete it\n",
393 libcfs_nid2str(lpni->lpni_nid));
397 lnet_peer_remove_from_remote_list(lpni);
399 /* remove peer ni from the hash list. */
400 list_del_init(&lpni->lpni_hashlist);
403 * indicate the peer is being deleted so the monitor thread can
404 * remove it from the recovery queue.
406 spin_lock(&lpni->lpni_lock);
407 lpni->lpni_state |= LNET_PEER_NI_DELETING;
408 spin_unlock(&lpni->lpni_lock);
410 /* decrement the ref count on the peer table */
411 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
414 * The peer_ni can no longer be found with a lookup. But there
415 * can be current users, so keep track of it on the zombie
416 * list until the reference count has gone to zero.
418 * The last reference may be lost in a place where the
419 * lnet_net_lock locks only a single cpt, and that cpt may not
420 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
423 spin_lock(&ptable->pt_zombie_lock);
424 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
425 ptable->pt_zombies++;
426 spin_unlock(&ptable->pt_zombie_lock);
428 /* no need to keep this peer_ni on the hierarchy anymore */
429 lnet_peer_detach_peer_ni_locked(lpni);
431 /* remove hashlist reference on peer_ni */
432 lnet_peer_ni_decref_locked(lpni);
437 void lnet_peer_uninit(void)
439 struct lnet_peer_ni *lpni, *tmp;
441 lnet_net_lock(LNET_LOCK_EX);
443 /* remove all peer_nis from the remote peer and the hash list */
444 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
445 lpni_on_remote_peer_ni_list)
446 lnet_peer_ni_del_locked(lpni, false);
448 lnet_peer_tables_destroy();
450 lnet_net_unlock(LNET_LOCK_EX);
454 lnet_peer_del_locked(struct lnet_peer *peer)
456 struct lnet_peer_ni *lpni = NULL, *lpni2;
459 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
461 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
462 while (lpni != NULL) {
463 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
464 rc = lnet_peer_ni_del_locked(lpni, false);
474 lnet_peer_del(struct lnet_peer *peer)
476 lnet_net_lock(LNET_LOCK_EX);
477 lnet_peer_del_locked(peer);
478 lnet_net_unlock(LNET_LOCK_EX);
484 * Delete a NID from a peer. Call with ln_api_mutex held.
487 * -EPERM: Non-DLC deletion from DLC-configured peer.
488 * -ENOENT: No lnet_peer_ni corresponding to the nid.
489 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
490 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
493 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
495 struct lnet_peer_ni *lpni;
496 lnet_nid_t primary_nid = lp->lp_primary_nid;
498 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
500 if (!(flags & LNET_PEER_CONFIGURED)) {
501 if (lp->lp_state & LNET_PEER_CONFIGURED) {
506 lpni = lnet_find_peer_ni_locked(nid);
511 lnet_peer_ni_decref_locked(lpni);
512 if (lp != lpni->lpni_peer_net->lpn_peer) {
518 * This function only allows deletion of the primary NID if it
521 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
526 lnet_net_lock(LNET_LOCK_EX);
528 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
529 struct lnet_peer_ni *lpni2;
530 /* assign the next peer_ni to be the primary */
531 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
533 lp->lp_primary_nid = lpni2->lpni_nid;
535 rc = lnet_peer_ni_del_locked(lpni, force);
537 lnet_net_unlock(LNET_LOCK_EX);
540 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
541 libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
547 lnet_peer_table_cleanup_locked(struct lnet_net *net,
548 struct lnet_peer_table *ptable)
551 struct lnet_peer_ni *next;
552 struct lnet_peer_ni *lpni;
553 struct lnet_peer *peer;
555 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
556 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
558 if (net != NULL && net != lpni->lpni_net)
561 peer = lpni->lpni_peer_net->lpn_peer;
562 if (peer->lp_primary_nid != lpni->lpni_nid) {
563 lnet_peer_ni_del_locked(lpni, false);
567 * Removing the primary NID implies removing
568 * the entire peer. Advance next beyond any
569 * peer_ni that belongs to the same peer.
571 list_for_each_entry_from(next, &ptable->pt_hash[i],
573 if (next->lpni_peer_net->lpn_peer != peer)
576 lnet_peer_del_locked(peer);
582 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
584 wait_var_event_warning(&ptable->pt_zombies,
585 ptable->pt_zombies == 0,
586 "Waiting for %d zombies on peer table\n",
591 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
592 struct lnet_peer_table *ptable)
594 struct lnet_peer_ni *lp;
595 struct lnet_peer_ni *tmp;
599 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
600 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
602 if (net != lp->lpni_net)
605 if (!lnet_isrouter(lp))
608 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
610 lnet_net_unlock(LNET_LOCK_EX);
611 lnet_del_route(LNET_NET_ANY, gw_nid);
612 lnet_net_lock(LNET_LOCK_EX);
618 lnet_peer_tables_cleanup(struct lnet_net *net)
621 struct lnet_peer_table *ptable;
623 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
624 /* If just deleting the peers for a NI, get rid of any routes these
625 * peers are gateways for. */
626 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
627 lnet_net_lock(LNET_LOCK_EX);
628 lnet_peer_table_del_rtrs_locked(net, ptable);
629 lnet_net_unlock(LNET_LOCK_EX);
632 /* Start the cleanup process */
633 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
634 lnet_net_lock(LNET_LOCK_EX);
635 lnet_peer_table_cleanup_locked(net, ptable);
636 lnet_net_unlock(LNET_LOCK_EX);
639 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
640 lnet_peer_ni_finalize_wait(ptable);
643 static struct lnet_peer_ni *
644 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
646 struct list_head *peers;
647 struct lnet_peer_ni *lp;
649 if (the_lnet.ln_state != LNET_STATE_RUNNING)
652 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
653 list_for_each_entry(lp, peers, lpni_hashlist) {
654 if (lp->lpni_nid == nid) {
655 lnet_peer_ni_addref_locked(lp);
663 struct lnet_peer_ni *
664 lnet_find_peer_ni_locked(lnet_nid_t nid)
666 struct lnet_peer_ni *lpni;
667 struct lnet_peer_table *ptable;
670 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
672 ptable = the_lnet.ln_peer_tables[cpt];
673 lpni = lnet_get_peer_ni_locked(ptable, nid);
678 struct lnet_peer_ni *
679 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
681 struct lnet_peer_net *lpn;
682 struct lnet_peer_ni *lpni;
684 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
688 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
689 if (lpni->lpni_nid == nid)
697 lnet_find_peer(lnet_nid_t nid)
699 struct lnet_peer_ni *lpni;
700 struct lnet_peer *lp = NULL;
703 cpt = lnet_net_lock_current();
704 lpni = lnet_find_peer_ni_locked(nid);
706 lp = lpni->lpni_peer_net->lpn_peer;
707 lnet_peer_addref_locked(lp);
708 lnet_peer_ni_decref_locked(lpni);
710 lnet_net_unlock(cpt);
715 struct lnet_peer_net *
716 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
718 struct lnet_peer_net *net;
721 /* no net id provided return the first net */
722 net = list_first_entry_or_null(&lp->lp_peer_nets,
723 struct lnet_peer_net,
729 /* find the net after the one provided */
730 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
731 if (net->lpn_net_id == prev_lpn_id) {
733 * if we reached the end of the list loop to the
736 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
737 return list_first_entry_or_null(&lp->lp_peer_nets,
738 struct lnet_peer_net,
741 return list_next_entry(net, lpn_peer_nets);
748 struct lnet_peer_ni *
749 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
750 struct lnet_peer_net *peer_net,
751 struct lnet_peer_ni *prev)
753 struct lnet_peer_ni *lpni;
754 struct lnet_peer_net *net = peer_net;
758 if (list_empty(&peer->lp_peer_nets))
761 net = list_entry(peer->lp_peer_nets.next,
762 struct lnet_peer_net,
765 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
771 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
773 * if you reached the end of the peer ni list and the peer
774 * net is specified then there are no more peer nis in that
781 * we reached the end of this net ni list. move to the
784 if (prev->lpni_peer_net->lpn_peer_nets.next ==
786 /* no more nets and no more NIs. */
789 /* get the next net */
790 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
791 struct lnet_peer_net,
793 /* get the ni on it */
794 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
800 /* there are more nis left */
801 lpni = list_entry(prev->lpni_peer_nis.next,
802 struct lnet_peer_ni, lpni_peer_nis);
807 /* Call with the ln_api_mutex held */
808 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
810 struct lnet_process_id id;
811 struct lnet_peer_table *ptable;
812 struct lnet_peer *lp;
821 if (the_lnet.ln_state != LNET_STATE_RUNNING)
824 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
827 * Count the number of peers, and return E2BIG if the buffer
828 * is too small. We'll also return the desired size.
831 for (cpt = 0; cpt < lncpt; cpt++) {
832 ptable = the_lnet.ln_peer_tables[cpt];
833 count += ptable->pt_peers;
835 size = count * sizeof(*ids);
840 * Walk the peer lists and copy out the primary nids.
841 * This is safe because the peer lists are only modified
842 * while the ln_api_mutex is held. So we don't need to
843 * hold the lnet_net_lock as well, and can therefore
844 * directly call copy_to_user().
847 memset(&id, 0, sizeof(id));
848 id.pid = LNET_PID_LUSTRE;
850 for (cpt = 0; cpt < lncpt; cpt++) {
851 ptable = the_lnet.ln_peer_tables[cpt];
852 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
855 id.nid = lp->lp_primary_nid;
856 if (copy_to_user(&ids[i], &id, sizeof(id)))
869 * Start pushes to peers that need to be updated for a configuration
870 * change on this node.
873 lnet_push_update_to_peers(int force)
875 struct lnet_peer_table *ptable;
876 struct lnet_peer *lp;
880 lnet_net_lock(LNET_LOCK_EX);
881 if (lnet_peer_discovery_disabled)
883 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
884 for (cpt = 0; cpt < lncpt; cpt++) {
885 ptable = the_lnet.ln_peer_tables[cpt];
886 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
888 spin_lock(&lp->lp_lock);
889 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
890 lp->lp_state |= LNET_PEER_FORCE_PUSH;
891 spin_unlock(&lp->lp_lock);
893 if (lnet_peer_needs_push(lp))
894 lnet_peer_queue_for_discovery(lp);
897 lnet_net_unlock(LNET_LOCK_EX);
898 wake_up(&the_lnet.ln_dc_waitq);
901 /* find the NID in the preferred gateways for the remote peer
903 * false: list is not empty and NID is not preferred
904 * false: list is empty
905 * true: nid is found in the list
908 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
911 struct lnet_nid_list *ne;
913 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
914 libcfs_nid2str(lpni->lpni_nid),
915 list_empty(&lpni->lpni_rtr_pref_nids));
917 if (list_empty(&lpni->lpni_rtr_pref_nids))
920 /* iterate through all the preferred NIDs and see if any of them
921 * matches the provided gw_nid
923 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
924 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
925 libcfs_nid2str(ne->nl_nid),
926 libcfs_nid2str(gw_nid));
927 if (ne->nl_nid == gw_nid)
935 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
937 struct list_head zombies;
938 struct lnet_nid_list *ne;
939 struct lnet_nid_list *tmp;
940 int cpt = lpni->lpni_cpt;
942 INIT_LIST_HEAD(&zombies);
945 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
946 lnet_net_unlock(cpt);
948 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
949 list_del(&ne->nl_list);
950 LIBCFS_FREE(ne, sizeof(*ne));
955 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
958 int cpt = lpni->lpni_cpt;
959 struct lnet_nid_list *ne = NULL;
961 /* This function is called with api_mutex held. When the api_mutex
962 * is held the list can not be modified, as it is only modified as
963 * a result of applying a UDSP and that happens under api_mutex
966 __must_hold(&the_lnet.ln_api_mutex);
968 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
969 if (ne->nl_nid == gw_nid)
973 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
979 /* Lock the cpt to protect against addition and checks in the
980 * selection algorithm
983 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
984 lnet_net_unlock(cpt);
990 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
991 * this is a preferred point-to-point path. Call with lnet_net_lock in
995 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
997 struct lnet_nid_list *ne;
999 if (lpni->lpni_pref_nnids == 0)
1001 if (lpni->lpni_pref_nnids == 1)
1002 return lpni->lpni_pref.nid == nid;
1003 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1004 if (ne->nl_nid == nid)
1011 * Set a single ni as preferred, provided no preferred ni is already
1012 * defined. Only to be used for non-multi-rail peer_ni.
1015 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1019 spin_lock(&lpni->lpni_lock);
1020 if (nid == LNET_NID_ANY) {
1022 } else if (lpni->lpni_pref_nnids > 0) {
1024 } else if (lpni->lpni_pref_nnids == 0) {
1025 lpni->lpni_pref.nid = nid;
1026 lpni->lpni_pref_nnids = 1;
1027 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1029 spin_unlock(&lpni->lpni_lock);
1031 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1032 libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1037 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1038 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1041 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1045 spin_lock(&lpni->lpni_lock);
1046 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1047 lpni->lpni_pref_nnids = 0;
1048 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1049 } else if (lpni->lpni_pref_nnids == 0) {
1054 spin_unlock(&lpni->lpni_lock);
1056 CDEBUG(D_NET, "peer %s: %d\n",
1057 libcfs_nid2str(lpni->lpni_nid), rc);
1062 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1064 lpni->lpni_sel_priority = priority;
1068 * Clear the preferred NIDs from a non-multi-rail peer.
1071 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1073 struct lnet_peer_ni *lpni = NULL;
1075 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1076 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1080 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1082 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1083 struct lnet_nid_list *ne1 = NULL;
1084 struct lnet_nid_list *ne2 = NULL;
1085 lnet_nid_t tmp_nid = LNET_NID_ANY;
1088 if (nid == LNET_NID_ANY) {
1093 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1098 /* A non-MR node may have only one preferred NI per peer_ni */
1099 if (lpni->lpni_pref_nnids > 0 &&
1100 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1105 /* add the new preferred nid to the list of preferred nids */
1106 if (lpni->lpni_pref_nnids != 0) {
1107 size_t alloc_size = sizeof(*ne1);
1109 if (lpni->lpni_pref_nnids == 1) {
1110 tmp_nid = lpni->lpni_pref.nid;
1111 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1114 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1115 if (ne1->nl_nid == nid) {
1121 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1128 /* move the originally stored nid to the list */
1129 if (lpni->lpni_pref_nnids == 1) {
1130 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1131 lpni->lpni_cpt, alloc_size);
1136 INIT_LIST_HEAD(&ne2->nl_list);
1137 ne2->nl_nid = tmp_nid;
1142 lnet_net_lock(LNET_LOCK_EX);
1143 spin_lock(&lpni->lpni_lock);
1144 if (lpni->lpni_pref_nnids == 0) {
1145 lpni->lpni_pref.nid = nid;
1148 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1149 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1151 lpni->lpni_pref_nnids++;
1152 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1153 spin_unlock(&lpni->lpni_lock);
1154 lnet_net_unlock(LNET_LOCK_EX);
1157 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1158 spin_lock(&lpni->lpni_lock);
1159 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1160 spin_unlock(&lpni->lpni_lock);
1162 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1163 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1168 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1170 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1171 struct lnet_nid_list *ne = NULL;
1174 if (lpni->lpni_pref_nnids == 0) {
1179 if (lpni->lpni_pref_nnids == 1) {
1180 if (lpni->lpni_pref.nid != nid) {
1185 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1186 if (ne->nl_nid == nid)
1187 goto remove_nid_entry;
1195 lnet_net_lock(LNET_LOCK_EX);
1196 spin_lock(&lpni->lpni_lock);
1197 if (lpni->lpni_pref_nnids == 1)
1198 lpni->lpni_pref.nid = LNET_NID_ANY;
1200 list_del_init(&ne->nl_list);
1201 if (lpni->lpni_pref_nnids == 2) {
1202 struct lnet_nid_list *ne, *tmp;
1204 list_for_each_entry_safe(ne, tmp,
1205 &lpni->lpni_pref.nids,
1207 lpni->lpni_pref.nid = ne->nl_nid;
1208 list_del_init(&ne->nl_list);
1209 LIBCFS_FREE(ne, sizeof(*ne));
1213 lpni->lpni_pref_nnids--;
1214 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1215 spin_unlock(&lpni->lpni_lock);
1216 lnet_net_unlock(LNET_LOCK_EX);
1219 LIBCFS_FREE(ne, sizeof(*ne));
1221 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1222 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1227 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1229 struct list_head zombies;
1230 struct lnet_nid_list *ne;
1231 struct lnet_nid_list *tmp;
1233 INIT_LIST_HEAD(&zombies);
1235 lnet_net_lock(LNET_LOCK_EX);
1236 if (lpni->lpni_pref_nnids == 1)
1237 lpni->lpni_pref.nid = LNET_NID_ANY;
1238 else if (lpni->lpni_pref_nnids > 1)
1239 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1240 lpni->lpni_pref_nnids = 0;
1241 lnet_net_unlock(LNET_LOCK_EX);
1243 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1244 list_del_init(&ne->nl_list);
1245 LIBCFS_FREE(ne, sizeof(*ne));
1250 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1252 struct lnet_peer_ni *lpni;
1253 lnet_nid_t primary_nid = nid;
1255 lpni = lnet_find_peer_ni_locked(nid);
1257 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1258 lnet_peer_ni_decref_locked(lpni);
1265 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1266 __must_hold(&lp->lp_lock)
1268 if (lnet_peer_discovery_disabled)
1271 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1272 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1283 lnet_is_discovery_disabled(struct lnet_peer *lp)
1287 spin_lock(&lp->lp_lock);
1288 rc = lnet_is_discovery_disabled_locked(lp);
1289 spin_unlock(&lp->lp_lock);
1295 LNetPrimaryNID(lnet_nid_t nid)
1297 struct lnet_peer *lp;
1298 struct lnet_peer_ni *lpni;
1299 lnet_nid_t primary_nid = nid;
1303 if (nid == LNET_NID_LO_0)
1304 return LNET_NID_LO_0;
1306 cpt = lnet_net_lock_current();
1307 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1312 lp = lpni->lpni_peer_net->lpn_peer;
1314 while (!lnet_peer_is_uptodate(lp)) {
1315 spin_lock(&lp->lp_lock);
1316 /* force a full discovery cycle */
1317 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1318 spin_unlock(&lp->lp_lock);
1320 rc = lnet_discover_peer_locked(lpni, cpt, true);
1323 lp = lpni->lpni_peer_net->lpn_peer;
1325 /* Only try once if discovery is disabled */
1326 if (lnet_is_discovery_disabled(lp))
1329 primary_nid = lp->lp_primary_nid;
1331 lnet_peer_ni_decref_locked(lpni);
1333 lnet_net_unlock(cpt);
1335 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1336 libcfs_nid2str(primary_nid), rc);
1339 EXPORT_SYMBOL(LNetPrimaryNID);
1341 struct lnet_peer_net *
1342 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1344 struct lnet_peer_net *peer_net;
1345 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1346 if (peer_net->lpn_net_id == net_id)
1353 * Attach a peer_ni to a peer_net and peer. This function assumes
1354 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1355 * may be attached to a different peer, in which case it will be
1356 * properly detached first. The whole operation is done atomically.
1358 * This function consumes the reference on lpni and Always returns 0.
1359 * This is the last function called from functions that do return an
1360 * int, so returning 0 here allows the compiler to do a tail call.
1363 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1364 struct lnet_peer_net *lpn,
1365 struct lnet_peer_ni *lpni,
1368 struct lnet_peer_table *ptable;
1370 /* Install the new peer_ni */
1371 lnet_net_lock(LNET_LOCK_EX);
1372 /* Add peer_ni to global peer table hash, if necessary. */
1373 if (list_empty(&lpni->lpni_hashlist)) {
1374 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1376 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1377 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1378 ptable->pt_version++;
1379 lnet_peer_ni_addref_locked(lpni);
1382 /* Detach the peer_ni from an existing peer, if necessary. */
1383 if (lpni->lpni_peer_net) {
1384 LASSERT(lpni->lpni_peer_net != lpn);
1385 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1386 lnet_peer_detach_peer_ni_locked(lpni);
1387 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1388 lpni->lpni_peer_net = NULL;
1391 /* Add peer_ni to peer_net */
1392 lpni->lpni_peer_net = lpn;
1393 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1394 lnet_update_peer_net_healthv(lpni);
1395 lnet_peer_net_addref_locked(lpn);
1397 /* Add peer_net to peer */
1398 if (!lpn->lpn_peer) {
1400 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1401 lnet_peer_addref_locked(lp);
1404 /* Add peer to global peer list, if necessary */
1405 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1406 if (list_empty(&lp->lp_peer_list)) {
1407 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1412 /* Update peer state */
1413 spin_lock(&lp->lp_lock);
1414 if (flags & LNET_PEER_CONFIGURED) {
1415 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1416 lp->lp_state |= LNET_PEER_CONFIGURED;
1418 if (flags & LNET_PEER_MULTI_RAIL) {
1419 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1420 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1421 lnet_peer_clr_non_mr_pref_nids(lp);
1424 spin_unlock(&lp->lp_lock);
1428 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1429 libcfs_nid2str(lp->lp_primary_nid),
1430 libcfs_nid2str(lpni->lpni_nid), flags);
1431 lnet_peer_ni_decref_locked(lpni);
1432 lnet_net_unlock(LNET_LOCK_EX);
1438 * Create a new peer, with nid as its primary nid.
1440 * Call with the lnet_api_mutex held.
1443 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1445 struct lnet_peer *lp;
1446 struct lnet_peer_net *lpn;
1447 struct lnet_peer_ni *lpni;
1450 LASSERT(nid != LNET_NID_ANY);
1453 * No need for the lnet_net_lock here, because the
1454 * lnet_api_mutex is held.
1456 lpni = lnet_find_peer_ni_locked(nid);
1458 /* A peer with this NID already exists. */
1459 lp = lpni->lpni_peer_net->lpn_peer;
1460 lnet_peer_ni_decref_locked(lpni);
1462 * This is an error if the peer was configured and the
1463 * primary NID differs or an attempt is made to change
1464 * the Multi-Rail flag. Otherwise the assumption is
1465 * that an existing peer is being modified.
1467 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1468 if (lp->lp_primary_nid != nid)
1470 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1474 /* Delete and recreate as a configured peer. */
1478 /* Create peer, peer_net, and peer_ni. */
1480 lp = lnet_peer_alloc(nid);
1483 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1486 lpni = lnet_peer_ni_alloc(nid);
1490 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1493 LIBCFS_FREE(lpn, sizeof(*lpn));
1495 LIBCFS_FREE(lp, sizeof(*lp));
1497 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1498 libcfs_nid2str(nid), flags, rc);
1503 * Add a NID to a peer. Call with ln_api_mutex held.
1506 * -EPERM: Non-DLC addition to a DLC-configured peer.
1507 * -EEXIST: The NID was configured by DLC for a different peer.
1508 * -ENOMEM: Out of memory.
1509 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1510 * non-multi-rail peer.
1513 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1515 struct lnet_peer_net *lpn;
1516 struct lnet_peer_ni *lpni;
1520 LASSERT(nid != LNET_NID_ANY);
1522 /* A configured peer can only be updated through configuration. */
1523 if (!(flags & LNET_PEER_CONFIGURED)) {
1524 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1531 * The MULTI_RAIL flag can be set but not cleared, because
1532 * that would leave the peer struct in an invalid state.
1534 if (flags & LNET_PEER_MULTI_RAIL) {
1535 spin_lock(&lp->lp_lock);
1536 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1537 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1538 lnet_peer_clr_non_mr_pref_nids(lp);
1540 spin_unlock(&lp->lp_lock);
1541 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1546 lpni = lnet_find_peer_ni_locked(nid);
1549 * A peer_ni already exists. This is only a problem if
1550 * it is not connected to this peer and was configured
1553 if (lpni->lpni_peer_net->lpn_peer == lp)
1555 if (lnet_peer_ni_is_configured(lpni)) {
1559 /* If this is the primary NID, destroy the peer. */
1560 if (lnet_peer_ni_is_primary(lpni)) {
1561 struct lnet_peer *rtr_lp =
1562 lpni->lpni_peer_net->lpn_peer;
1563 int rtr_refcount = rtr_lp->lp_rtr_refcount;
1565 * if we're trying to delete a router it means
1566 * we're moving this peer NI to a new peer so must
1567 * transfer router properties to the new peer
1569 if (rtr_refcount > 0) {
1570 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1571 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1573 lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1574 lnet_peer_ni_decref_locked(lpni);
1575 lpni = lnet_peer_ni_alloc(nid);
1582 lpni = lnet_peer_ni_alloc(nid);
1590 * Get the peer_net. Check that we're not adding a second
1591 * peer_ni on a peer_net of a non-multi-rail peer.
1593 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1595 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1600 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1605 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1608 lnet_peer_ni_decref_locked(lpni);
1610 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1611 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1617 * Update the primary NID of a peer, if possible.
1619 * Call with the lnet_api_mutex held.
1622 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1624 lnet_nid_t old = lp->lp_primary_nid;
1627 if (lp->lp_primary_nid == nid)
1629 rc = lnet_peer_add_nid(lp, nid, flags);
1632 lp->lp_primary_nid = nid;
1634 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1635 libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1640 * lpni creation initiated due to traffic either sending or receiving.
1643 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1645 struct lnet_peer *lp;
1646 struct lnet_peer_net *lpn;
1647 struct lnet_peer_ni *lpni;
1651 if (nid == LNET_NID_ANY) {
1656 /* lnet_net_lock is not needed here because ln_api_lock is held */
1657 lpni = lnet_find_peer_ni_locked(nid);
1660 * We must have raced with another thread. Since we
1661 * know next to nothing about a peer_ni created by
1662 * traffic, we just assume everything is ok and
1665 lnet_peer_ni_decref_locked(lpni);
1669 /* Create peer, peer_net, and peer_ni. */
1671 lp = lnet_peer_alloc(nid);
1674 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1677 lpni = lnet_peer_ni_alloc(nid);
1680 if (pref != LNET_NID_ANY)
1681 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1683 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1686 LIBCFS_FREE(lpn, sizeof(*lpn));
1688 LIBCFS_FREE(lp, sizeof(*lp));
1690 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1695 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1697 * This API handles the following combinations:
1698 * Create a peer with its primary NI if only the prim_nid is provided
1699 * Add a NID to a peer identified by the prim_nid. The peer identified
1700 * by the prim_nid must already exist.
1701 * The peer being created may be non-MR.
1703 * The caller must hold ln_api_mutex. This prevents the peer from
1704 * being created/modified/deleted by a different thread.
1707 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1709 struct lnet_peer *lp = NULL;
1710 struct lnet_peer_ni *lpni;
1713 /* The prim_nid must always be specified */
1714 if (prim_nid == LNET_NID_ANY)
1717 flags = LNET_PEER_CONFIGURED;
1719 flags |= LNET_PEER_MULTI_RAIL;
1722 * If nid isn't specified, we must create a new peer with
1723 * prim_nid as its primary nid.
1725 if (nid == LNET_NID_ANY)
1726 return lnet_peer_add(prim_nid, flags);
1728 /* Look up the prim_nid, which must exist. */
1729 lpni = lnet_find_peer_ni_locked(prim_nid);
1732 lnet_peer_ni_decref_locked(lpni);
1733 lp = lpni->lpni_peer_net->lpn_peer;
1735 /* Peer must have been configured. */
1736 if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1737 CDEBUG(D_NET, "peer %s was not configured\n",
1738 libcfs_nid2str(prim_nid));
1742 /* Primary NID must match */
1743 if (lp->lp_primary_nid != prim_nid) {
1744 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1745 libcfs_nid2str(prim_nid),
1746 libcfs_nid2str(lp->lp_primary_nid));
1750 /* Multi-Rail flag must match. */
1751 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1752 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1753 libcfs_nid2str(prim_nid));
1757 return lnet_peer_add_nid(lp, nid, flags);
1761 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1763 * This API handles the following combinations:
1764 * Delete a NI from a peer if both prim_nid and nid are provided.
1765 * Delete a peer if only prim_nid is provided.
1766 * Delete a peer if its primary nid is provided.
1768 * The caller must hold ln_api_mutex. This prevents the peer from
1769 * being modified/deleted by a different thread.
1772 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1774 struct lnet_peer *lp;
1775 struct lnet_peer_ni *lpni;
1778 if (prim_nid == LNET_NID_ANY)
1781 lpni = lnet_find_peer_ni_locked(prim_nid);
1784 lnet_peer_ni_decref_locked(lpni);
1785 lp = lpni->lpni_peer_net->lpn_peer;
1787 if (prim_nid != lp->lp_primary_nid) {
1788 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1789 libcfs_nid2str(prim_nid),
1790 libcfs_nid2str(lp->lp_primary_nid));
1794 lnet_net_lock(LNET_LOCK_EX);
1795 if (lp->lp_rtr_refcount > 0) {
1796 lnet_net_unlock(LNET_LOCK_EX);
1797 CERROR("%s is a router. Can not be deleted\n",
1798 libcfs_nid2str(prim_nid));
1801 lnet_net_unlock(LNET_LOCK_EX);
1803 if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1804 return lnet_peer_del(lp);
1806 flags = LNET_PEER_CONFIGURED;
1807 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1808 flags |= LNET_PEER_MULTI_RAIL;
1810 return lnet_peer_del_nid(lp, nid, flags);
1814 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1816 struct lnet_peer_table *ptable;
1817 struct lnet_peer_net *lpn;
1819 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1821 LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1822 LASSERT(list_empty(&lpni->lpni_txq));
1823 LASSERT(lpni->lpni_txqnob == 0);
1824 LASSERT(list_empty(&lpni->lpni_peer_nis));
1825 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1827 lpn = lpni->lpni_peer_net;
1828 lpni->lpni_peer_net = NULL;
1829 lpni->lpni_net = NULL;
1831 if (!list_empty(&lpni->lpni_hashlist)) {
1832 /* remove the peer ni from the zombie list */
1833 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1834 spin_lock(&ptable->pt_zombie_lock);
1835 list_del_init(&lpni->lpni_hashlist);
1836 ptable->pt_zombies--;
1837 spin_unlock(&ptable->pt_zombie_lock);
1840 if (lpni->lpni_pref_nnids > 1) {
1841 struct lnet_nid_list *ne, *tmp;
1843 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1845 list_del_init(&ne->nl_list);
1846 LIBCFS_FREE(ne, sizeof(*ne));
1849 LIBCFS_FREE(lpni, sizeof(*lpni));
1852 lnet_peer_net_decref_locked(lpn);
1855 struct lnet_peer_ni *
1856 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1858 struct lnet_peer_ni *lpni = NULL;
1861 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1862 return ERR_PTR(-ESHUTDOWN);
1865 * find if a peer_ni already exists.
1866 * If so then just return that.
1868 lpni = lnet_find_peer_ni_locked(nid);
1872 lnet_net_unlock(cpt);
1874 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1877 goto out_net_relock;
1880 lpni = lnet_find_peer_ni_locked(nid);
1890 * Get a peer_ni for the given nid, create it if necessary. Takes a
1891 * hold on the peer_ni.
1893 struct lnet_peer_ni *
1894 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1896 struct lnet_peer_ni *lpni = NULL;
1899 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1900 return ERR_PTR(-ESHUTDOWN);
1903 * find if a peer_ni already exists.
1904 * If so then just return that.
1906 lpni = lnet_find_peer_ni_locked(nid);
1912 * use the lnet_api_mutex to serialize the creation of the peer_ni
1913 * and the creation/deletion of the local ni/net. When a local ni is
1914 * created, if there exists a set of peer_nis on that network,
1915 * they need to be traversed and updated. When a local NI is
1916 * deleted, which could result in a network being deleted, then
1917 * all peer nis on that network need to be removed as well.
1919 * Creation through traffic should also be serialized with
1920 * creation through DLC.
1922 lnet_net_unlock(cpt);
1923 mutex_lock(&the_lnet.ln_api_mutex);
1925 * Shutdown is only set under the ln_api_lock, so a single
1926 * check here is sufficent.
1928 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1929 lpni = ERR_PTR(-ESHUTDOWN);
1930 goto out_mutex_unlock;
1933 rc = lnet_peer_ni_traffic_add(nid, pref);
1936 goto out_mutex_unlock;
1939 lpni = lnet_find_peer_ni_locked(nid);
1943 mutex_unlock(&the_lnet.ln_api_mutex);
1946 /* Lock has been dropped, check again for shutdown. */
1947 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1949 lnet_peer_ni_decref_locked(lpni);
1950 lpni = ERR_PTR(-ESHUTDOWN);
1957 lnet_peer_gw_discovery(struct lnet_peer *lp)
1961 spin_lock(&lp->lp_lock);
1962 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
1964 spin_unlock(&lp->lp_lock);
1970 lnet_peer_is_uptodate(struct lnet_peer *lp)
1974 spin_lock(&lp->lp_lock);
1975 rc = lnet_peer_is_uptodate_locked(lp);
1976 spin_unlock(&lp->lp_lock);
1981 * Is a peer uptodate from the point of view of discovery?
1983 * If it is currently being processed, obviously not.
1984 * A forced Ping or Push is also handled by the discovery thread.
1986 * Otherwise look at whether the peer needs rediscovering.
1989 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
1990 __must_hold(&lp->lp_lock)
1994 if (lp->lp_state & (LNET_PEER_DISCOVERING |
1995 LNET_PEER_FORCE_PING |
1996 LNET_PEER_FORCE_PUSH)) {
1998 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2000 } else if (lnet_peer_needs_push(lp)) {
2002 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2003 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2015 * Queue a peer for the attention of the discovery thread. Call with
2016 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2017 * -EALREADY if the peer was already queued.
2019 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2023 spin_lock(&lp->lp_lock);
2024 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2025 lp->lp_state |= LNET_PEER_DISCOVERING;
2026 spin_unlock(&lp->lp_lock);
2027 if (list_empty(&lp->lp_dc_list)) {
2028 lnet_peer_addref_locked(lp);
2029 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2030 wake_up(&the_lnet.ln_dc_waitq);
2036 CDEBUG(D_NET, "Queue peer %s: %d\n",
2037 libcfs_nid2str(lp->lp_primary_nid), rc);
2043 * Discovery of a peer is complete. Wake all waiters on the peer.
2044 * Call with lnet_net_lock/EX held.
2046 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2048 struct lnet_msg *msg, *tmp;
2050 LIST_HEAD(pending_msgs);
2052 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2053 libcfs_nid2str(lp->lp_primary_nid));
2055 list_del_init(&lp->lp_dc_list);
2056 spin_lock(&lp->lp_lock);
2057 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2058 spin_unlock(&lp->lp_lock);
2059 wake_up_all(&lp->lp_dc_waitq);
2061 if (lp->lp_rtr_refcount > 0)
2062 lnet_router_discovery_complete(lp);
2064 lnet_net_unlock(LNET_LOCK_EX);
2066 /* iterate through all pending messages and send them again */
2067 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2068 list_del_init(&msg->msg_list);
2069 if (lp->lp_dc_error) {
2070 lnet_finalize(msg, lp->lp_dc_error);
2074 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2075 lnet_msgtyp2str(msg->msg_type),
2076 libcfs_id2str(msg->msg_target));
2077 rc = lnet_send(msg->msg_src_nid_param, msg,
2078 msg->msg_rtr_nid_param);
2080 CNETERR("Error sending %s to %s: %d\n",
2081 lnet_msgtyp2str(msg->msg_type),
2082 libcfs_id2str(msg->msg_target), rc);
2083 lnet_finalize(msg, rc);
2086 lnet_net_lock(LNET_LOCK_EX);
2087 lnet_peer_decref_locked(lp);
2091 * Handle inbound push.
2092 * Like any event handler, called with lnet_res_lock/CPT held.
2094 void lnet_peer_push_event(struct lnet_event *ev)
2096 struct lnet_ping_buffer *pbuf;
2097 struct lnet_peer *lp;
2099 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2101 /* lnet_find_peer() adds a refcount */
2102 lp = lnet_find_peer(ev->source.nid);
2104 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2105 libcfs_nid2str(ev->initiator.nid),
2106 libcfs_nid2str(ev->source.nid));
2107 pbuf->pb_needs_post = true;
2111 /* Ensure peer state remains consistent while we modify it. */
2112 spin_lock(&lp->lp_lock);
2115 * If some kind of error happened the contents of the message
2116 * cannot be used. Clear the NIDS_UPTODATE and set the
2117 * FORCE_PING flag to trigger a ping.
2120 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2121 lp->lp_state |= LNET_PEER_FORCE_PING;
2122 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2124 libcfs_nid2str(lp->lp_primary_nid),
2125 libcfs_nid2str(ev->source.nid));
2130 * A push with invalid or corrupted info. Clear the UPTODATE
2131 * flag to trigger a ping.
2133 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2134 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2135 lp->lp_state |= LNET_PEER_FORCE_PING;
2136 CDEBUG(D_NET, "Corrupted Push from %s\n",
2137 libcfs_nid2str(lp->lp_primary_nid));
2142 * Make sure we'll allocate the correct size ping buffer when
2145 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2146 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2149 * A non-Multi-Rail peer is not supposed to be capable of
2152 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2153 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2154 libcfs_nid2str(lp->lp_primary_nid));
2159 * The peer may have discovery disabled at its end. Set
2160 * NO_DISCOVERY as appropriate.
2162 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2163 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2164 libcfs_nid2str(lp->lp_primary_nid));
2166 * Mark the peer for deletion if we already know about it
2167 * and it's going from discovery set to no discovery set
2169 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2170 LNET_PEER_DISCOVERING)) &&
2171 lp->lp_state & LNET_PEER_DISCOVERED) {
2172 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2173 libcfs_nid2str(lp->lp_primary_nid),
2175 lp->lp_state |= LNET_PEER_MARK_DELETION;
2177 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2178 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2179 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2180 libcfs_nid2str(lp->lp_primary_nid));
2181 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2185 * Update the MULTI_RAIL flag based on the push. If the peer
2186 * was configured with DLC then the setting should match what
2188 * NB: We verified above that the MR feature bit is set in pi_features
2190 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2191 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2192 libcfs_nid2str(lp->lp_primary_nid), lp);
2193 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2194 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2195 libcfs_nid2str(lp->lp_primary_nid));
2196 } else if (lnet_peer_discovery_disabled) {
2197 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2198 libcfs_nid2str(lp->lp_primary_nid), lp);
2199 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2200 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2201 libcfs_nid2str(lp->lp_primary_nid), lp);
2203 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2204 libcfs_nid2str(lp->lp_primary_nid), lp);
2205 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2206 lnet_peer_clr_non_mr_pref_nids(lp);
2210 * Check for truncation of the Put message. Clear the
2211 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2212 * and tell discovery to allocate a bigger buffer.
2214 if (ev->mlength < ev->rlength) {
2215 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2216 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2217 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2218 lp->lp_state |= LNET_PEER_FORCE_PING;
2219 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2220 libcfs_nid2str(lp->lp_primary_nid),
2221 pbuf->pb_info.pi_nnis);
2225 /* always assume new data */
2226 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2227 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2230 * If there is data present that hasn't been processed yet,
2231 * we'll replace it if the Put contained newer data and it
2232 * fits. We're racing with a Ping or earlier Push in this
2235 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2236 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2237 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2238 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2239 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2240 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2241 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2242 libcfs_nid2str(lp->lp_primary_nid),
2243 LNET_PING_BUFFER_SEQNO(pbuf),
2244 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2250 * Allocate a buffer to copy the data. On a failure we drop
2251 * the Push and set FORCE_PING to force the discovery
2252 * thread to fix the problem by pinging the peer.
2254 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2256 lp->lp_state |= LNET_PEER_FORCE_PING;
2257 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2258 libcfs_nid2str(lp->lp_primary_nid),
2259 LNET_PING_BUFFER_SEQNO(pbuf));
2264 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2265 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2266 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2267 CDEBUG(D_NET, "Received Push %s %u\n",
2268 libcfs_nid2str(lp->lp_primary_nid),
2269 LNET_PING_BUFFER_SEQNO(pbuf));
2272 /* We've processed this buffer. It can be reposted */
2273 pbuf->pb_needs_post = true;
2276 * Queue the peer for discovery if not done, force it on the request
2277 * queue and wake the discovery thread if the peer was already queued,
2278 * because its status changed.
2280 spin_unlock(&lp->lp_lock);
2281 lnet_net_lock(LNET_LOCK_EX);
2282 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2283 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2284 wake_up(&the_lnet.ln_dc_waitq);
2286 /* Drop refcount from lookup */
2287 lnet_peer_decref_locked(lp);
2288 lnet_net_unlock(LNET_LOCK_EX);
2292 * Clear the discovery error state, unless we're already discovering
2293 * this peer, in which case the error is current.
2295 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2297 spin_lock(&lp->lp_lock);
2298 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2299 lp->lp_dc_error = 0;
2300 spin_unlock(&lp->lp_lock);
2304 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2305 * dropped/retaken within this function. An lnet_peer_ni is passed in
2306 * because discovery could tear down an lnet_peer.
2309 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2312 struct lnet_peer *lp;
2317 lnet_net_unlock(cpt);
2318 lnet_net_lock(LNET_LOCK_EX);
2319 lp = lpni->lpni_peer_net->lpn_peer;
2320 lnet_peer_clear_discovery_error(lp);
2323 * We're willing to be interrupted. The lpni can become a
2324 * zombie if we race with DLC, so we must check for that.
2327 /* Keep lp alive when the lnet_net_lock is unlocked */
2328 lnet_peer_addref_locked(lp);
2329 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2330 if (signal_pending(current))
2332 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2335 * Don't repeat discovery if discovery is disabled. This is
2336 * done to ensure we can use discovery as a standard ping as
2337 * well for backwards compatibility with routers which do not
2338 * have discovery or have discovery disabled
2340 if (lnet_is_discovery_disabled(lp) && count > 0)
2342 if (lp->lp_dc_error)
2344 if (lnet_peer_is_uptodate(lp))
2346 lnet_peer_queue_for_discovery(lp);
2348 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2351 * If caller requested a non-blocking operation then
2352 * return immediately. Once discovery is complete any
2353 * pending messages that were stopped due to discovery
2354 * will be transmitted.
2359 lnet_net_unlock(LNET_LOCK_EX);
2361 finish_wait(&lp->lp_dc_waitq, &wait);
2362 lnet_net_lock(LNET_LOCK_EX);
2363 lnet_peer_decref_locked(lp);
2364 /* Peer may have changed */
2365 lp = lpni->lpni_peer_net->lpn_peer;
2367 finish_wait(&lp->lp_dc_waitq, &wait);
2369 lnet_net_unlock(LNET_LOCK_EX);
2371 lnet_peer_decref_locked(lp);
2373 * The peer may have changed, so re-check and rediscover if that turns
2374 * out to have been the case. The reference count on lp ensured that
2375 * even if it was unlinked from lpni the memory could not be recycled.
2376 * Thus the check below is sufficient to determine whether the peer
2377 * changed. If the peer changed, then lp must not be dereferenced.
2379 if (lp != lpni->lpni_peer_net->lpn_peer)
2382 if (signal_pending(current))
2384 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2386 else if (lp->lp_dc_error)
2387 rc = lp->lp_dc_error;
2389 CDEBUG(D_NET, "non-blocking discovery\n");
2390 else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2393 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2394 (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2395 libcfs_nid2str(lpni->lpni_nid), rc,
2396 (!block) ? "pending discovery" : "discovery complete");
2401 /* Handle an incoming ack for a push. */
2403 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2405 struct lnet_ping_buffer *pbuf;
2407 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2408 spin_lock(&lp->lp_lock);
2409 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2410 lp->lp_push_error = ev->status;
2412 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2414 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2415 spin_unlock(&lp->lp_lock);
2417 CDEBUG(D_NET, "peer %s ev->status %d\n",
2418 libcfs_nid2str(lp->lp_primary_nid), ev->status);
2421 /* Handle a Reply message. This is the reply to a Ping message. */
2423 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2425 struct lnet_ping_buffer *pbuf;
2428 spin_lock(&lp->lp_lock);
2430 lp->lp_disc_src_nid = ev->target.nid;
2433 * If some kind of error happened the contents of message
2434 * cannot be used. Set PING_FAILED to trigger a retry.
2437 lp->lp_state |= LNET_PEER_PING_FAILED;
2438 lp->lp_ping_error = ev->status;
2439 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2441 libcfs_nid2str(lp->lp_primary_nid),
2442 libcfs_nid2str(ev->source.nid));
2446 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2447 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2448 lnet_swap_pinginfo(pbuf);
2451 * A reply with invalid or corrupted info. Set PING_FAILED to
2454 rc = lnet_ping_info_validate(&pbuf->pb_info);
2456 lp->lp_state |= LNET_PEER_PING_FAILED;
2457 lp->lp_ping_error = 0;
2458 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2459 libcfs_nid2str(lp->lp_primary_nid), rc);
2465 * The peer may have discovery disabled at its end. Set
2466 * NO_DISCOVERY as appropriate.
2468 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2469 !lnet_peer_discovery_disabled) {
2470 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2471 libcfs_nid2str(lp->lp_primary_nid));
2472 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2474 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2475 libcfs_nid2str(lp->lp_primary_nid));
2476 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2480 * Update the MULTI_RAIL flag based on the reply. If the peer
2481 * was configured with DLC then the setting should match what
2484 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2485 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2486 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2487 libcfs_nid2str(lp->lp_primary_nid), lp);
2488 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2489 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2490 libcfs_nid2str(lp->lp_primary_nid));
2491 } else if (lnet_peer_discovery_disabled) {
2493 "peer %s(%p) not MR: DD disabled locally\n",
2494 libcfs_nid2str(lp->lp_primary_nid), lp);
2495 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2497 "peer %s(%p) not MR: DD disabled remotely\n",
2498 libcfs_nid2str(lp->lp_primary_nid), lp);
2500 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2501 libcfs_nid2str(lp->lp_primary_nid), lp);
2502 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2503 lnet_peer_clr_non_mr_pref_nids(lp);
2505 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2506 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2507 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2508 libcfs_nid2str(lp->lp_primary_nid));
2510 CERROR("Multi-Rail state vanished from %s\n",
2511 libcfs_nid2str(lp->lp_primary_nid));
2512 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2517 * Make sure we'll allocate the correct size ping buffer when
2520 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2521 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2524 * Check for truncation of the Reply. Clear PING_SENT and set
2525 * PING_FAILED to trigger a retry.
2527 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2528 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2529 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2530 lp->lp_state |= LNET_PEER_PING_FAILED;
2531 lp->lp_ping_error = 0;
2532 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2533 libcfs_nid2str(lp->lp_primary_nid),
2534 pbuf->pb_info.pi_nnis);
2539 * Check the sequence numbers in the reply. These are only
2540 * available if the reply came from a Multi-Rail peer.
2542 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2543 pbuf->pb_info.pi_nnis > 1 &&
2544 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2545 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2546 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2547 libcfs_nid2str(lp->lp_primary_nid),
2548 LNET_PING_BUFFER_SEQNO(pbuf),
2551 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2554 /* We're happy with the state of the data in the buffer. */
2555 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2556 libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2557 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2558 lnet_ping_buffer_decref(lp->lp_data);
2560 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2561 lnet_ping_buffer_addref(pbuf);
2564 lp->lp_state &= ~LNET_PEER_PING_SENT;
2565 spin_unlock(&lp->lp_lock);
2567 lnet_net_lock(LNET_LOCK_EX);
2569 * If this peer is a gateway, call the routing callback to
2570 * handle the ping reply
2572 if (lp->lp_rtr_refcount > 0)
2573 lnet_router_discovery_ping_reply(lp);
2574 lnet_net_unlock(LNET_LOCK_EX);
2578 * Send event handling. Only matters for error cases, where we clean
2579 * up state on the peer and peer_ni that would otherwise be updated in
2580 * the REPLY event handler for a successful Ping, and the ACK event
2581 * handler for a successful Push.
2584 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2591 spin_lock(&lp->lp_lock);
2592 if (ev->msg_type == LNET_MSG_GET) {
2593 lp->lp_state &= ~LNET_PEER_PING_SENT;
2594 lp->lp_state |= LNET_PEER_PING_FAILED;
2595 lp->lp_ping_error = ev->status;
2596 } else { /* ev->msg_type == LNET_MSG_PUT */
2597 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2598 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2599 lp->lp_push_error = ev->status;
2601 spin_unlock(&lp->lp_lock);
2602 rc = LNET_REDISCOVER_PEER;
2604 CDEBUG(D_NET, "%s Send to %s: %d\n",
2605 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2606 libcfs_nid2str(ev->target.nid), rc);
2611 * Unlink event handling. This event is only seen if a call to
2612 * LNetMDUnlink() caused the event to be unlinked. If this call was
2613 * made after the event was set up in LNetGet() or LNetPut() then we
2614 * assume the Ping or Push timed out.
2617 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2619 spin_lock(&lp->lp_lock);
2620 /* We've passed through LNetGet() */
2621 if (lp->lp_state & LNET_PEER_PING_SENT) {
2622 lp->lp_state &= ~LNET_PEER_PING_SENT;
2623 lp->lp_state |= LNET_PEER_PING_FAILED;
2624 lp->lp_ping_error = -ETIMEDOUT;
2625 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2626 libcfs_nid2str(lp->lp_primary_nid));
2628 /* We've passed through LNetPut() */
2629 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2630 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2631 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2632 lp->lp_push_error = -ETIMEDOUT;
2633 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2634 libcfs_nid2str(lp->lp_primary_nid));
2636 spin_unlock(&lp->lp_lock);
2640 * Event handler for the discovery EQ.
2642 * Called with lnet_res_lock(cpt) held. The cpt is the
2643 * lnet_cpt_of_cookie() of the md handle cookie.
2645 static void lnet_discovery_event_handler(struct lnet_event *event)
2647 struct lnet_peer *lp = event->md_user_ptr;
2648 struct lnet_ping_buffer *pbuf;
2651 /* discovery needs to take another look */
2652 rc = LNET_REDISCOVER_PEER;
2654 CDEBUG(D_NET, "Received event: %d\n", event->type);
2656 switch (event->type) {
2657 case LNET_EVENT_ACK:
2658 lnet_discovery_event_ack(lp, event);
2660 case LNET_EVENT_REPLY:
2661 lnet_discovery_event_reply(lp, event);
2663 case LNET_EVENT_SEND:
2664 /* Only send failure triggers a retry. */
2665 rc = lnet_discovery_event_send(lp, event);
2667 case LNET_EVENT_UNLINK:
2668 /* LNetMDUnlink() was called */
2669 lnet_discovery_event_unlink(lp, event);
2672 /* Invalid events. */
2675 lnet_net_lock(LNET_LOCK_EX);
2676 if (event->unlinked) {
2677 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2678 lnet_ping_buffer_decref(pbuf);
2679 lnet_peer_decref_locked(lp);
2682 /* put peer back at end of request queue, if discovery not already
2684 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2685 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2686 wake_up(&the_lnet.ln_dc_waitq);
2688 lnet_net_unlock(LNET_LOCK_EX);
2692 * Build a peer from incoming data.
2694 * The NIDs in the incoming data are supposed to be structured as follows:
2697 * - other NIDs in same net
2698 * - NIDs in second net
2699 * - NIDs in third net
2701 * This due to the way the list of NIDs in the data is created.
2703 * Note that this function will mark the peer uptodate unless an
2704 * ENOMEM is encontered. All other errors are due to a conflict
2705 * between the DLC configuration and what discovery sees. We treat DLC
2706 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2707 * peer from becoming stuck in discovery.
2709 static int lnet_peer_merge_data(struct lnet_peer *lp,
2710 struct lnet_ping_buffer *pbuf)
2712 struct lnet_peer_ni *lpni;
2713 lnet_nid_t *curnis = NULL;
2714 struct lnet_ni_status *addnis = NULL;
2715 lnet_nid_t *delnis = NULL;
2725 flags = LNET_PEER_DISCOVERED;
2726 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2727 flags |= LNET_PEER_MULTI_RAIL;
2730 * Cache the routing feature for the peer; whether it is enabled
2731 * for disabled as reported by the remote peer.
2733 spin_lock(&lp->lp_lock);
2734 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2735 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2737 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2738 spin_unlock(&lp->lp_lock);
2740 nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2741 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2742 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2743 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2744 if (!curnis || !addnis || !delnis) {
2752 /* Construct the list of NIDs present in peer. */
2754 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2755 curnis[ncurnis++] = lpni->lpni_nid;
2758 * Check for NIDs in pbuf not present in curnis[].
2759 * The loop starts at 1 to skip the loopback NID.
2761 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2762 for (j = 0; j < ncurnis; j++)
2763 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2766 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2769 * Check for NIDs in curnis[] not present in pbuf.
2770 * The nested loop starts at 1 to skip the loopback NID.
2772 * But never add the loopback NID to delnis[]: if it is
2773 * present in curnis[] then this peer is for this node.
2775 for (i = 0; i < ncurnis; i++) {
2776 if (curnis[i] == LNET_NID_LO_0)
2778 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2779 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2781 * update the information we cache for the
2782 * peer with the latest information we
2785 lpni = lnet_find_peer_ni_locked(curnis[i]);
2787 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2788 lnet_peer_ni_decref_locked(lpni);
2793 if (j == pbuf->pb_info.pi_nnis)
2794 delnis[ndelnis++] = curnis[i];
2798 * If we get here and the discovery is disabled then we don't want
2799 * to add or delete any NIs. We just updated the ones we have some
2800 * information on, and call it a day
2803 if (lnet_is_discovery_disabled(lp))
2806 for (i = 0; i < naddnis; i++) {
2807 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2809 CERROR("Error adding NID %s to peer %s: %d\n",
2810 libcfs_nid2str(addnis[i].ns_nid),
2811 libcfs_nid2str(lp->lp_primary_nid), rc);
2815 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2817 lpni->lpni_ns_status = addnis[i].ns_status;
2818 lnet_peer_ni_decref_locked(lpni);
2822 for (i = 0; i < ndelnis; i++) {
2824 * for routers it's okay to delete the primary_nid because
2825 * the upper layers don't really rely on it. So if we're
2826 * being told that the router changed its primary_nid
2827 * then it's okay to delete it.
2829 if (lp->lp_rtr_refcount > 0)
2830 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2831 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2833 CERROR("Error deleting NID %s from peer %s: %d\n",
2834 libcfs_nid2str(delnis[i]),
2835 libcfs_nid2str(lp->lp_primary_nid), rc);
2841 * Errors other than -ENOMEM are due to peers having been
2842 * configured with DLC. Ignore these because DLC overrides
2847 CFS_FREE_PTR_ARRAY(curnis, nnis);
2848 CFS_FREE_PTR_ARRAY(addnis, nnis);
2849 CFS_FREE_PTR_ARRAY(delnis, nnis);
2850 lnet_ping_buffer_decref(pbuf);
2851 CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2854 spin_lock(&lp->lp_lock);
2855 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2856 lp->lp_state |= LNET_PEER_FORCE_PING;
2857 spin_unlock(&lp->lp_lock);
2863 * The data in pbuf says lp is its primary peer, but the data was
2864 * received by a different peer. Try to update lp with the data.
2867 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2869 struct lnet_handle_md mdh;
2871 /* Queue lp for discovery, and force it on the request queue. */
2872 lnet_net_lock(LNET_LOCK_EX);
2873 if (lnet_peer_queue_for_discovery(lp))
2874 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2875 lnet_net_unlock(LNET_LOCK_EX);
2877 LNetInvalidateMDHandle(&mdh);
2880 * Decide whether we can move the peer to the DATA_PRESENT state.
2882 * We replace stale data for a multi-rail peer, repair PING_FAILED
2883 * status, and preempt FORCE_PING.
2885 * If after that we have DATA_PRESENT, we merge it into this peer.
2887 spin_lock(&lp->lp_lock);
2888 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2889 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2890 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2891 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2892 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2893 lnet_ping_buffer_decref(pbuf);
2898 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2899 lnet_ping_buffer_decref(lp->lp_data);
2901 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2903 if (lp->lp_state & LNET_PEER_PING_FAILED) {
2904 mdh = lp->lp_ping_mdh;
2905 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2906 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2907 lp->lp_ping_error = 0;
2909 if (lp->lp_state & LNET_PEER_FORCE_PING)
2910 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2911 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2912 spin_unlock(&lp->lp_lock);
2914 if (!LNetMDHandleIsInvalid(mdh))
2918 return lnet_peer_merge_data(lp, pbuf);
2920 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2924 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
2928 for (i = 0; i < pinfo->pi_nnis; i++) {
2929 if (pinfo->pi_ni[i].ns_nid == nid)
2937 * Update a peer using the data received.
2939 static int lnet_peer_data_present(struct lnet_peer *lp)
2940 __must_hold(&lp->lp_lock)
2942 struct lnet_ping_buffer *pbuf;
2943 struct lnet_peer_ni *lpni;
2944 lnet_nid_t nid = LNET_NID_ANY;
2950 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2951 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2952 spin_unlock(&lp->lp_lock);
2955 * Modifications of peer structures are done while holding the
2956 * ln_api_mutex. A global lock is required because we may be
2957 * modifying multiple peer structures, and a mutex greatly
2958 * simplifies memory management.
2960 * The actual changes to the data structures must also protect
2961 * against concurrent lookups, for which the lnet_net_lock in
2962 * LNET_LOCK_EX mode is used.
2964 mutex_lock(&the_lnet.ln_api_mutex);
2965 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2971 * If this peer is not on the peer list then it is being torn
2972 * down, and our reference count may be all that is keeping it
2973 * alive. Don't do any work on it.
2975 if (list_empty(&lp->lp_peer_list))
2978 flags = LNET_PEER_DISCOVERED;
2979 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2980 flags |= LNET_PEER_MULTI_RAIL;
2983 * Check whether the primary NID in the message matches the
2984 * primary NID of the peer. If it does, update the peer, if
2985 * it it does not, check whether there is already a peer with
2986 * that primary NID. If no such peer exists, try to update
2987 * the primary NID of the current peer (allowed if it was
2988 * created due to message traffic) and complete the update.
2989 * If the peer did exist, hand off the data to it.
2991 * The peer for the loopback interface is a special case: this
2992 * is the peer for the local node, and we want to set its
2993 * primary NID to the correct value here. Moreover, this peer
2994 * can show up with only the loopback NID in the ping buffer.
2996 if (pbuf->pb_info.pi_nnis <= 1)
2998 nid = pbuf->pb_info.pi_ni[1].ns_nid;
2999 if (lp->lp_primary_nid == LNET_NID_LO_0) {
3000 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3002 rc = lnet_peer_merge_data(lp, pbuf);
3004 * if the primary nid of the peer is present in the ping info returned
3005 * from the peer, but it's not the local primary peer we have
3006 * cached and discovery is disabled, then we don't want to update
3007 * our local peer info, by adding or removing NIDs, we just want
3008 * to update the status of the nids that we currently have
3009 * recorded in that peer.
3011 } else if (lp->lp_primary_nid == nid ||
3012 (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3013 lnet_is_discovery_disabled(lp))) {
3014 rc = lnet_peer_merge_data(lp, pbuf);
3016 lpni = lnet_find_peer_ni_locked(nid);
3018 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3020 CERROR("Primary NID error %s versus %s: %d\n",
3021 libcfs_nid2str(lp->lp_primary_nid),
3022 libcfs_nid2str(nid), rc);
3024 rc = lnet_peer_merge_data(lp, pbuf);
3027 struct lnet_peer *new_lp;
3028 new_lp = lpni->lpni_peer_net->lpn_peer;
3030 * if lp has discovery/MR enabled that means new_lp
3031 * should have discovery/MR enabled as well, since
3032 * it's the same peer, which we're about to merge
3034 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3035 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3036 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3037 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3039 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3040 lnet_consolidate_routes_locked(lp, new_lp);
3041 lnet_peer_ni_decref_locked(lpni);
3045 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3047 mutex_unlock(&the_lnet.ln_api_mutex);
3049 spin_lock(&lp->lp_lock);
3050 /* Tell discovery to re-check the peer immediately. */
3052 rc = LNET_REDISCOVER_PEER;
3057 * A ping failed. Clear the PING_FAILED state and set the
3058 * FORCE_PING state, to ensure a retry even if discovery is
3059 * disabled. This avoids being left with incorrect state.
3061 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3062 __must_hold(&lp->lp_lock)
3064 struct lnet_handle_md mdh;
3067 mdh = lp->lp_ping_mdh;
3068 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3069 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3070 lp->lp_state |= LNET_PEER_FORCE_PING;
3071 rc = lp->lp_ping_error;
3072 lp->lp_ping_error = 0;
3073 spin_unlock(&lp->lp_lock);
3075 if (!LNetMDHandleIsInvalid(mdh))
3078 CDEBUG(D_NET, "peer %s:%d\n",
3079 libcfs_nid2str(lp->lp_primary_nid), rc);
3081 spin_lock(&lp->lp_lock);
3082 return rc ? rc : LNET_REDISCOVER_PEER;
3086 * Select NID to send a Ping or Push to.
3088 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
3090 struct lnet_peer_ni *lpni;
3092 /* Look for a direct-connected NID for this peer. */
3094 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3095 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
3100 return lpni->lpni_nid;
3102 /* Look for a routed-connected NID for this peer. */
3104 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3105 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
3110 return lpni->lpni_nid;
3112 return LNET_NID_ANY;
3115 /* Active side of ping. */
3116 static int lnet_peer_send_ping(struct lnet_peer *lp)
3117 __must_hold(&lp->lp_lock)
3124 lp->lp_state |= LNET_PEER_PING_SENT;
3125 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3126 spin_unlock(&lp->lp_lock);
3128 cpt = lnet_net_lock_current();
3129 /* Refcount for MD. */
3130 lnet_peer_addref_locked(lp);
3131 pnid = lnet_peer_select_nid(lp);
3132 lnet_net_unlock(cpt);
3134 nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3136 rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
3137 the_lnet.ln_dc_handler, false);
3140 * if LNetMDBind in lnet_send_ping fails we need to decrement the
3141 * refcount on the peer, otherwise LNetMDUnlink will be called
3142 * which will eventually do that.
3146 lnet_peer_decref_locked(lp);
3147 lnet_net_unlock(cpt);
3148 rc = -rc; /* change the rc to negative value */
3150 } else if (rc < 0) {
3154 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3156 spin_lock(&lp->lp_lock);
3160 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3162 * The errors that get us here are considered hard errors and
3163 * cause Discovery to terminate. So we clear PING_SENT, but do
3164 * not set either PING_FAILED or FORCE_PING. In fact we need
3165 * to clear PING_FAILED, because the unlink event handler will
3166 * have set it if we called LNetMDUnlink() above.
3168 spin_lock(&lp->lp_lock);
3169 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3174 * This function exists because you cannot call LNetMDUnlink() from an
3177 static int lnet_peer_push_failed(struct lnet_peer *lp)
3178 __must_hold(&lp->lp_lock)
3180 struct lnet_handle_md mdh;
3183 mdh = lp->lp_push_mdh;
3184 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3185 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3186 rc = lp->lp_push_error;
3187 lp->lp_push_error = 0;
3188 spin_unlock(&lp->lp_lock);
3190 if (!LNetMDHandleIsInvalid(mdh))
3193 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3194 spin_lock(&lp->lp_lock);
3195 return rc ? rc : LNET_REDISCOVER_PEER;
3199 * Mark the peer as discovered.
3201 static int lnet_peer_discovered(struct lnet_peer *lp)
3202 __must_hold(&lp->lp_lock)
3204 lp->lp_state |= LNET_PEER_DISCOVERED;
3205 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3206 LNET_PEER_REDISCOVER);
3208 lp->lp_dc_error = 0;
3210 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3215 /* Active side of push. */
3216 static int lnet_peer_send_push(struct lnet_peer *lp)
3217 __must_hold(&lp->lp_lock)
3219 struct lnet_ping_buffer *pbuf;
3220 struct lnet_process_id id;
3225 /* Don't push to a non-multi-rail peer. */
3226 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3227 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3228 /* if peer's NIDs are uptodate then peer is discovered */
3229 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3230 rc = lnet_peer_discovered(lp);
3237 lp->lp_state |= LNET_PEER_PUSH_SENT;
3238 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3239 spin_unlock(&lp->lp_lock);
3241 cpt = lnet_net_lock_current();
3242 pbuf = the_lnet.ln_ping_target;
3243 lnet_ping_buffer_addref(pbuf);
3244 lnet_net_unlock(cpt);
3246 /* Push source MD */
3247 md.start = &pbuf->pb_info;
3248 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3249 md.threshold = 2; /* Put/Ack */
3251 md.options = LNET_MD_TRACK_RESPONSE;
3252 md.handler = the_lnet.ln_dc_handler;
3255 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3257 lnet_ping_buffer_decref(pbuf);
3258 CERROR("Can't bind push source MD: %d\n", rc);
3261 cpt = lnet_net_lock_current();
3262 /* Refcount for MD. */
3263 lnet_peer_addref_locked(lp);
3264 id.pid = LNET_PID_LUSTRE;
3265 id.nid = lnet_peer_select_nid(lp);
3266 lnet_net_unlock(cpt);
3268 if (id.nid == LNET_NID_ANY) {
3273 rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3274 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3275 LNET_PROTO_PING_MATCHBITS, 0, 0);
3278 * reset the discovery nid. There is no need to restrict sending
3279 * from that source, if we call lnet_push_update_to_peers(). It'll
3280 * get set to a specific NID, if we initiate discovery from the
3283 lp->lp_disc_src_nid = LNET_NID_ANY;
3288 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3290 spin_lock(&lp->lp_lock);
3294 LNetMDUnlink(lp->lp_push_mdh);
3295 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3297 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3299 * The errors that get us here are considered hard errors and
3300 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3301 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3302 * because the unlink event handler will have set it if we
3303 * called LNetMDUnlink() above.
3305 spin_lock(&lp->lp_lock);
3306 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3311 * An unrecoverable error was encountered during discovery.
3312 * Set error status in peer and abort discovery.
3314 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3316 CDEBUG(D_NET, "Discovery error %s: %d\n",
3317 libcfs_nid2str(lp->lp_primary_nid), error);
3319 spin_lock(&lp->lp_lock);
3320 lp->lp_dc_error = error;
3321 lp->lp_state &= ~LNET_PEER_DISCOVERING;
3322 lp->lp_state |= LNET_PEER_REDISCOVER;
3323 spin_unlock(&lp->lp_lock);
3327 * Discovering this peer is taking too long. Cancel any Ping or Push
3328 * that discovery is waiting on by unlinking the relevant MDs. The
3329 * lnet_discovery_event_handler() will proceed from here and complete
3332 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3334 struct lnet_handle_md ping_mdh;
3335 struct lnet_handle_md push_mdh;
3337 LNetInvalidateMDHandle(&ping_mdh);
3338 LNetInvalidateMDHandle(&push_mdh);
3340 spin_lock(&lp->lp_lock);
3341 if (lp->lp_state & LNET_PEER_PING_SENT) {
3342 ping_mdh = lp->lp_ping_mdh;
3343 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3345 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3346 push_mdh = lp->lp_push_mdh;
3347 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3349 spin_unlock(&lp->lp_lock);
3351 if (!LNetMDHandleIsInvalid(ping_mdh))
3352 LNetMDUnlink(ping_mdh);
3353 if (!LNetMDHandleIsInvalid(push_mdh))
3354 LNetMDUnlink(push_mdh);
3358 * Wait for work to be queued or some other change that must be
3359 * attended to. Returns non-zero if the discovery thread should shut
3362 static int lnet_peer_discovery_wait_for_work(void)
3369 cpt = lnet_net_lock_current();
3371 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3372 TASK_INTERRUPTIBLE);
3373 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3375 if (lnet_push_target_resize_needed() ||
3376 the_lnet.ln_push_target->pb_needs_post)
3378 if (!list_empty(&the_lnet.ln_dc_request))
3380 if (!list_empty(&the_lnet.ln_msg_resend))
3382 lnet_net_unlock(cpt);
3385 * wakeup max every second to check if there are peers that
3386 * have been stuck on the working queue for greater than
3389 schedule_timeout(cfs_time_seconds(1));
3390 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3391 cpt = lnet_net_lock_current();
3393 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3395 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3398 lnet_net_unlock(cpt);
3400 CDEBUG(D_NET, "woken: %d\n", rc);
3406 * Messages that were pending on a destroyed peer will be put on a global
3407 * resend list. The message resend list will be checked by
3408 * the discovery thread when it wakes up, and will resend messages. These
3409 * messages can still be sendable in the case the lpni which was the initial
3410 * cause of the message re-queue was transfered to another peer.
3412 * It is possible that LNet could be shutdown while we're iterating
3413 * through the list. lnet_shudown_lndnets() will attempt to access the
3414 * resend list, but will have to wait until the spinlock is released, by
3415 * which time there shouldn't be any more messages on the resend list.
3416 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3417 * for the messages so they can be released. The other case is that
3418 * lnet_shudown_lndnets() can finalize all the messages before this
3419 * function can visit the resend list, in which case this function will be
3422 static void lnet_resend_msgs(void)
3424 struct lnet_msg *msg, *tmp;
3428 spin_lock(&the_lnet.ln_msg_resend_lock);
3429 list_splice(&the_lnet.ln_msg_resend, &resend);
3430 spin_unlock(&the_lnet.ln_msg_resend_lock);
3432 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3433 list_del_init(&msg->msg_list);
3434 rc = lnet_send(msg->msg_src_nid_param, msg,
3435 msg->msg_rtr_nid_param);
3437 CNETERR("Error sending %s to %s: %d\n",
3438 lnet_msgtyp2str(msg->msg_type),
3439 libcfs_id2str(msg->msg_target), rc);
3440 lnet_finalize(msg, rc);
3445 /* The discovery thread. */
3446 static int lnet_peer_discovery(void *arg)
3448 struct lnet_peer *lp;
3451 wait_for_completion(&the_lnet.ln_started);
3453 CDEBUG(D_NET, "started\n");
3456 if (lnet_peer_discovery_wait_for_work())
3459 if (lnet_push_target_resize_needed())
3460 lnet_push_target_resize();
3461 else if (the_lnet.ln_push_target->pb_needs_post)
3462 lnet_push_target_post(the_lnet.ln_push_target,
3463 &the_lnet.ln_push_target_md);
3467 lnet_net_lock(LNET_LOCK_EX);
3468 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3469 lnet_net_unlock(LNET_LOCK_EX);
3474 * Process all incoming discovery work requests. When
3475 * discovery must wait on a peer to change state, it
3476 * is added to the tail of the ln_dc_working queue. A
3477 * timestamp keeps track of when the peer was added,
3478 * so we can time out discovery requests that take too
3481 while (!list_empty(&the_lnet.ln_dc_request)) {
3482 lp = list_first_entry(&the_lnet.ln_dc_request,
3483 struct lnet_peer, lp_dc_list);
3484 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3486 * set the time the peer was put on the dc_working
3487 * queue. It shouldn't remain on the queue
3488 * forever, in case the GET message (for ping)
3489 * doesn't get a REPLY or the PUT message (for
3490 * push) doesn't get an ACK.
3492 lp->lp_last_queued = ktime_get_real_seconds();
3493 lnet_net_unlock(LNET_LOCK_EX);
3495 if (lnet_push_target_resize_needed())
3496 lnet_push_target_resize();
3497 else if (the_lnet.ln_push_target->pb_needs_post)
3498 lnet_push_target_post(the_lnet.ln_push_target,
3499 &the_lnet.ln_push_target_md);
3502 * Select an action depending on the state of
3503 * the peer and whether discovery is disabled.
3504 * The check whether discovery is disabled is
3505 * done after the code that handles processing
3506 * for arrived data, cleanup for failures, and
3507 * forcing a Ping or Push.
3509 spin_lock(&lp->lp_lock);
3510 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3511 libcfs_nid2str(lp->lp_primary_nid), lp,
3513 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3514 rc = lnet_peer_data_present(lp);
3515 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3516 rc = lnet_peer_ping_failed(lp);
3517 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3518 rc = lnet_peer_push_failed(lp);
3519 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3520 rc = lnet_peer_send_ping(lp);
3521 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3522 rc = lnet_peer_send_push(lp);
3523 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3524 rc = lnet_peer_send_ping(lp);
3525 else if (lnet_peer_needs_push(lp))
3526 rc = lnet_peer_send_push(lp);
3528 rc = lnet_peer_discovered(lp);
3529 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3530 libcfs_nid2str(lp->lp_primary_nid), lp,
3532 spin_unlock(&lp->lp_lock);
3534 lnet_net_lock(LNET_LOCK_EX);
3535 if (rc == LNET_REDISCOVER_PEER) {
3536 list_move(&lp->lp_dc_list,
3537 &the_lnet.ln_dc_request);
3539 lnet_peer_discovery_error(lp, rc);
3541 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3542 lnet_peer_discovery_complete(lp);
3543 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3546 if (lp->lp_state & LNET_PEER_MARK_DELETION) {
3547 struct list_head rlist;
3548 struct lnet_route *route, *tmp;
3549 int sensitivity = lp->lp_health_sensitivity;
3551 INIT_LIST_HEAD(&rlist);
3554 * remove the peer from the discovery work
3555 * queue if it's on there in preparation
3558 if (!list_empty(&lp->lp_dc_list))
3559 list_del(&lp->lp_dc_list);
3561 lnet_net_unlock(LNET_LOCK_EX);
3563 mutex_lock(&the_lnet.ln_api_mutex);
3565 lnet_net_lock(LNET_LOCK_EX);
3566 list_for_each_entry_safe(route, tmp,
3569 lnet_move_route(route, NULL, &rlist);
3570 lnet_net_unlock(LNET_LOCK_EX);
3572 /* delete the peer */
3575 list_for_each_entry_safe(route, tmp,
3577 /* re-add these routes */
3578 lnet_add_route(route->lr_net,
3583 LIBCFS_FREE(route, sizeof(*route));
3585 mutex_unlock(&the_lnet.ln_api_mutex);
3587 lnet_net_lock(LNET_LOCK_EX);
3591 lnet_net_unlock(LNET_LOCK_EX);
3594 CDEBUG(D_NET, "stopping\n");
3596 * Clean up before telling lnet_peer_discovery_stop() that
3597 * we're done. Use wake_up() below to somewhat reduce the
3598 * size of the thundering herd if there are multiple threads
3599 * waiting on discovery of a single peer.
3602 /* Queue cleanup 1: stop all pending pings and pushes. */
3603 lnet_net_lock(LNET_LOCK_EX);
3604 while (!list_empty(&the_lnet.ln_dc_working)) {
3605 lp = list_first_entry(&the_lnet.ln_dc_working,
3606 struct lnet_peer, lp_dc_list);
3607 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3608 lnet_net_unlock(LNET_LOCK_EX);
3609 lnet_peer_cancel_discovery(lp);
3610 lnet_net_lock(LNET_LOCK_EX);
3612 lnet_net_unlock(LNET_LOCK_EX);
3614 /* Queue cleanup 2: wait for the expired queue to clear. */
3615 while (!list_empty(&the_lnet.ln_dc_expired))
3616 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3618 /* Queue cleanup 3: clear the request queue. */
3619 lnet_net_lock(LNET_LOCK_EX);
3620 while (!list_empty(&the_lnet.ln_dc_request)) {
3621 lp = list_first_entry(&the_lnet.ln_dc_request,
3622 struct lnet_peer, lp_dc_list);
3623 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3624 lnet_peer_discovery_complete(lp);
3626 lnet_net_unlock(LNET_LOCK_EX);
3628 lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3629 the_lnet.ln_dc_handler = NULL;
3631 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3632 wake_up(&the_lnet.ln_dc_waitq);
3634 CDEBUG(D_NET, "stopped\n");
3639 /* ln_api_mutex is held on entry. */
3640 int lnet_peer_discovery_start(void)
3642 struct task_struct *task;
3645 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3648 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3649 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3650 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3653 CERROR("Can't start peer discovery thread: %d\n", rc);
3655 the_lnet.ln_dc_handler = NULL;
3657 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3660 CDEBUG(D_NET, "discovery start: %d\n", rc);
3665 /* ln_api_mutex is held on entry. */
3666 void lnet_peer_discovery_stop(void)
3668 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3671 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3672 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3674 /* In the LNetNIInit() path we may be stopping discovery before it
3675 * entered its work loop
3677 if (!completion_done(&the_lnet.ln_started))
3678 complete(&the_lnet.ln_started);
3680 wake_up(&the_lnet.ln_dc_waitq);
3682 wait_event(the_lnet.ln_dc_waitq,
3683 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3685 LASSERT(list_empty(&the_lnet.ln_dc_request));
3686 LASSERT(list_empty(&the_lnet.ln_dc_working));
3687 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3689 CDEBUG(D_NET, "discovery stopped\n");
3695 lnet_debug_peer(lnet_nid_t nid)
3697 char *aliveness = "NA";
3698 struct lnet_peer_ni *lp;
3701 cpt = lnet_cpt_of_nid(nid, NULL);
3704 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3706 lnet_net_unlock(cpt);
3707 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3711 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3712 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3714 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3715 libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3716 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3717 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3718 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3720 lnet_peer_ni_decref_locked(lp);
3722 lnet_net_unlock(cpt);
3725 /* Gathering information for userspace. */
3727 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3728 char aliveness[LNET_MAX_STR_LEN],
3729 __u32 *cpt_iter, __u32 *refcount,
3730 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3731 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3732 __u32 *peer_tx_qnob)
3734 struct lnet_peer_table *peer_table;
3735 struct lnet_peer_ni *lp;
3740 /* get the number of CPTs */
3741 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3743 /* if the cpt number to be examined is >= the number of cpts in
3744 * the system then indicate that there are no more cpts to examin
3746 if (*cpt_iter >= lncpt)
3749 /* get the current table */
3750 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3751 /* if the ptable is NULL then there are no more cpts to examine */
3752 if (peer_table == NULL)
3755 lnet_net_lock(*cpt_iter);
3757 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3758 struct list_head *peers = &peer_table->pt_hash[j];
3760 list_for_each_entry(lp, peers, lpni_hashlist) {
3761 if (peer_index-- > 0)
3764 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3765 if (lnet_isrouter(lp) ||
3766 lnet_peer_aliveness_enabled(lp))
3767 snprintf(aliveness, LNET_MAX_STR_LEN,
3768 lnet_is_peer_ni_alive(lp) ? "up" : "down");
3770 *nid = lp->lpni_nid;
3771 *refcount = atomic_read(&lp->lpni_refcount);
3772 *ni_peer_tx_credits =
3773 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3774 *peer_tx_credits = lp->lpni_txcredits;
3775 *peer_rtr_credits = lp->lpni_rtrcredits;
3776 *peer_min_rtr_credits = lp->lpni_mintxcredits;
3777 *peer_tx_qnob = lp->lpni_txqnob;
3783 lnet_net_unlock(*cpt_iter);
3787 return found ? 0 : -ENOENT;
3790 /* ln_api_mutex is held, which keeps the peer list stable */
3791 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3793 struct lnet_ioctl_element_stats *lpni_stats;
3794 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3795 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3796 struct lnet_peer_ni_credit_info *lpni_info;
3797 struct lnet_peer_ni *lpni;
3798 struct lnet_peer *lp;
3803 lp = lnet_find_peer(cfg->prcfg_prim_nid);
3810 size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3811 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3812 size *= lp->lp_nnis;
3813 if (size > cfg->prcfg_size) {
3814 cfg->prcfg_size = size;
3819 cfg->prcfg_prim_nid = lp->lp_primary_nid;
3820 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3821 cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3822 cfg->prcfg_count = lp->lp_nnis;
3823 cfg->prcfg_size = size;
3824 cfg->prcfg_state = lp->lp_state;
3826 /* Allocate helper buffers. */
3828 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3831 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3834 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3835 if (!lpni_msg_stats)
3836 goto out_free_stats;
3837 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3839 goto out_free_msg_stats;
3844 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3845 nid = lpni->lpni_nid;
3846 if (copy_to_user(bulk, &nid, sizeof(nid)))
3847 goto out_free_hstats;
3848 bulk += sizeof(nid);
3850 memset(lpni_info, 0, sizeof(*lpni_info));
3851 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3852 if (lnet_isrouter(lpni) ||
3853 lnet_peer_aliveness_enabled(lpni))
3854 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3855 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3857 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3858 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3859 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3860 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3861 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3862 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3863 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3864 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3865 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3866 goto out_free_hstats;
3867 bulk += sizeof(*lpni_info);
3869 memset(lpni_stats, 0, sizeof(*lpni_stats));
3870 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3871 LNET_STATS_TYPE_SEND);
3872 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3873 LNET_STATS_TYPE_RECV);
3874 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3875 LNET_STATS_TYPE_DROP);
3876 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3877 goto out_free_hstats;
3878 bulk += sizeof(*lpni_stats);
3879 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3880 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3881 goto out_free_hstats;
3882 bulk += sizeof(*lpni_msg_stats);
3883 lpni_hstats->hlpni_network_timeout =
3884 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3885 lpni_hstats->hlpni_remote_dropped =
3886 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3887 lpni_hstats->hlpni_remote_timeout =
3888 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3889 lpni_hstats->hlpni_remote_error =
3890 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3891 lpni_hstats->hlpni_health_value =
3892 atomic_read(&lpni->lpni_healthv);
3893 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3894 goto out_free_hstats;
3895 bulk += sizeof(*lpni_hstats);
3900 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3902 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3904 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3906 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3908 lnet_peer_decref_locked(lp);
3914 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3916 /* the mt could've shutdown and cleaned up the queues */
3917 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3920 if (list_empty(&lpni->lpni_recovery) &&
3921 atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3922 CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
3923 libcfs_nid2str(lpni->lpni_nid),
3924 atomic_read(&lpni->lpni_healthv));
3925 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3926 lnet_peer_ni_addref_locked(lpni);
3930 /* Call with the ln_api_mutex held */
3932 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3934 struct lnet_peer_table *ptable;
3935 struct lnet_peer *lp;
3936 struct lnet_peer_net *lpn;
3937 struct lnet_peer_ni *lpni;
3941 if (the_lnet.ln_state != LNET_STATE_RUNNING)
3945 lnet_net_lock(LNET_LOCK_EX);
3946 lpni = lnet_find_peer_ni_locked(nid);
3948 lnet_net_unlock(LNET_LOCK_EX);
3951 atomic_set(&lpni->lpni_healthv, value);
3952 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3953 lnet_peer_ni_decref_locked(lpni);
3954 lnet_net_unlock(LNET_LOCK_EX);
3958 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3961 * Walk all the peers and reset the healhv for each one to the
3964 lnet_net_lock(LNET_LOCK_EX);
3965 for (cpt = 0; cpt < lncpt; cpt++) {
3966 ptable = the_lnet.ln_peer_tables[cpt];
3967 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
3968 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
3969 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
3971 atomic_set(&lpni->lpni_healthv, value);
3972 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3977 lnet_net_unlock(LNET_LOCK_EX);