4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
41 #include <linux/uaccess.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
158 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
164 INIT_LIST_HEAD(&lpni->lpni_txq);
165 INIT_LIST_HEAD(&lpni->lpni_hashlist);
166 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167 INIT_LIST_HEAD(&lpni->lpni_recovery);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171 spin_lock_init(&lpni->lpni_lock);
173 if (lnet_peers_start_down())
174 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
176 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
177 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
178 lpni->lpni_nid = nid;
179 lpni->lpni_cpt = cpt;
180 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
182 net = lnet_get_net_locked(LNET_NIDNET(nid));
183 lpni->lpni_net = net;
185 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
186 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
187 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
188 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
191 * This peer_ni is not on a local network, so we
192 * cannot add the credits here. In case the net is
193 * added later, add the peer_ni to the remote peer ni
194 * list so it can be easily found and revisited.
196 /* FIXME: per-net implementation instead? */
197 atomic_inc(&lpni->lpni_refcount);
198 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
199 &the_lnet.ln_remote_peer_ni_list);
202 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
207 static struct lnet_peer_net *
208 lnet_peer_net_alloc(__u32 net_id)
210 struct lnet_peer_net *lpn;
212 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
217 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
218 lpn->lpn_net_id = net_id;
220 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
226 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
228 struct lnet_peer *lp;
230 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
232 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
233 LASSERT(list_empty(&lpn->lpn_peer_nis));
234 LASSERT(list_empty(&lpn->lpn_peer_nets));
236 lpn->lpn_peer = NULL;
237 LIBCFS_FREE(lpn, sizeof(*lpn));
239 lnet_peer_decref_locked(lp);
242 static struct lnet_peer *
243 lnet_peer_alloc(lnet_nid_t nid)
245 struct lnet_peer *lp;
247 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
251 INIT_LIST_HEAD(&lp->lp_rtrq);
252 INIT_LIST_HEAD(&lp->lp_routes);
253 INIT_LIST_HEAD(&lp->lp_peer_list);
254 INIT_LIST_HEAD(&lp->lp_peer_nets);
255 INIT_LIST_HEAD(&lp->lp_dc_list);
256 INIT_LIST_HEAD(&lp->lp_dc_pendq);
257 INIT_LIST_HEAD(&lp->lp_rtr_list);
258 init_waitqueue_head(&lp->lp_dc_waitq);
259 spin_lock_init(&lp->lp_lock);
260 lp->lp_primary_nid = nid;
261 if (lnet_peers_start_down())
262 lp->lp_alive = false;
267 * all peers created on a router should have health on
268 * if it's not already on.
270 if (the_lnet.ln_routing && !lnet_health_sensitivity)
271 lp->lp_health_sensitivity = 1;
274 * Turn off discovery for loopback peer. If you're creating a peer
275 * for the loopback interface then that was initiated when we
276 * attempted to send a message over the loopback. There is no need
277 * to ever use a different interface when sending messages to
280 if (nid == LNET_NID_LO_0)
281 lp->lp_state = LNET_PEER_NO_DISCOVERY;
282 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
284 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
290 lnet_destroy_peer_locked(struct lnet_peer *lp)
292 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
294 LASSERT(atomic_read(&lp->lp_refcount) == 0);
295 LASSERT(lp->lp_rtr_refcount == 0);
296 LASSERT(list_empty(&lp->lp_peer_nets));
297 LASSERT(list_empty(&lp->lp_peer_list));
298 LASSERT(list_empty(&lp->lp_dc_list));
301 lnet_ping_buffer_decref(lp->lp_data);
304 * if there are messages still on the pending queue, then make
305 * sure to queue them on the ln_msg_resend list so they can be
306 * resent at a later point if the discovery thread is still
308 * If the discovery thread has stopped, then the wakeup will be a
309 * no-op, and it is expected the lnet_shutdown_lndnets() will
310 * eventually be called, which will traverse this list and
311 * finalize the messages on the list.
312 * We can not resend them now because we're holding the cpt lock.
313 * Releasing the lock can cause an inconsistent state
315 spin_lock(&the_lnet.ln_msg_resend_lock);
316 spin_lock(&lp->lp_lock);
317 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
318 spin_unlock(&lp->lp_lock);
319 spin_unlock(&the_lnet.ln_msg_resend_lock);
320 wake_up(&the_lnet.ln_dc_waitq);
322 LIBCFS_FREE(lp, sizeof(*lp));
326 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
327 * that peer_net, detach the peer_net from the peer.
329 * Call with lnet_net_lock/EX held
332 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
334 struct lnet_peer_table *ptable;
335 struct lnet_peer_net *lpn;
336 struct lnet_peer *lp;
339 * Belts and suspenders: gracefully handle teardown of a
340 * partially connected peer_ni.
342 lpn = lpni->lpni_peer_net;
344 list_del_init(&lpni->lpni_peer_nis);
346 * If there are no lpni's left, we detach lpn from
347 * lp_peer_nets, so it cannot be found anymore.
349 if (list_empty(&lpn->lpn_peer_nis))
350 list_del_init(&lpn->lpn_peer_nets);
352 /* Update peer NID count. */
357 * If there are no more peer nets, make the peer unfindable
358 * via the peer_tables.
360 * Otherwise, if the peer is DISCOVERED, tell discovery to
361 * take another look at it. This is a no-op if discovery for
362 * this peer did the detaching.
364 if (list_empty(&lp->lp_peer_nets)) {
365 list_del_init(&lp->lp_peer_list);
366 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
368 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
369 /* Discovery isn't running, nothing to do here. */
370 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
371 lnet_peer_queue_for_discovery(lp);
372 wake_up(&the_lnet.ln_dc_waitq);
374 CDEBUG(D_NET, "peer %s NID %s\n",
375 libcfs_nid2str(lp->lp_primary_nid),
376 libcfs_nid2str(lpni->lpni_nid));
379 /* called with lnet_net_lock LNET_LOCK_EX held */
381 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
383 struct lnet_peer_table *ptable = NULL;
385 /* don't remove a peer_ni if it's also a gateway */
386 if (lnet_isrouter(lpni) && !force) {
387 CERROR("Peer NI %s is a gateway. Can not delete it\n",
388 libcfs_nid2str(lpni->lpni_nid));
392 lnet_peer_remove_from_remote_list(lpni);
394 /* remove peer ni from the hash list. */
395 list_del_init(&lpni->lpni_hashlist);
398 * indicate the peer is being deleted so the monitor thread can
399 * remove it from the recovery queue.
401 spin_lock(&lpni->lpni_lock);
402 lpni->lpni_state |= LNET_PEER_NI_DELETING;
403 spin_unlock(&lpni->lpni_lock);
405 /* decrement the ref count on the peer table */
406 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
409 * The peer_ni can no longer be found with a lookup. But there
410 * can be current users, so keep track of it on the zombie
411 * list until the reference count has gone to zero.
413 * The last reference may be lost in a place where the
414 * lnet_net_lock locks only a single cpt, and that cpt may not
415 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
418 spin_lock(&ptable->pt_zombie_lock);
419 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
420 ptable->pt_zombies++;
421 spin_unlock(&ptable->pt_zombie_lock);
423 /* no need to keep this peer_ni on the hierarchy anymore */
424 lnet_peer_detach_peer_ni_locked(lpni);
426 /* remove hashlist reference on peer_ni */
427 lnet_peer_ni_decref_locked(lpni);
432 void lnet_peer_uninit(void)
434 struct lnet_peer_ni *lpni, *tmp;
436 lnet_net_lock(LNET_LOCK_EX);
438 /* remove all peer_nis from the remote peer and the hash list */
439 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
440 lpni_on_remote_peer_ni_list)
441 lnet_peer_ni_del_locked(lpni, false);
443 lnet_peer_tables_destroy();
445 lnet_net_unlock(LNET_LOCK_EX);
449 lnet_peer_del_locked(struct lnet_peer *peer)
451 struct lnet_peer_ni *lpni = NULL, *lpni2;
454 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
456 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
457 while (lpni != NULL) {
458 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
459 rc = lnet_peer_ni_del_locked(lpni, false);
469 lnet_peer_del(struct lnet_peer *peer)
471 lnet_net_lock(LNET_LOCK_EX);
472 lnet_peer_del_locked(peer);
473 lnet_net_unlock(LNET_LOCK_EX);
479 * Delete a NID from a peer. Call with ln_api_mutex held.
482 * -EPERM: Non-DLC deletion from DLC-configured peer.
483 * -ENOENT: No lnet_peer_ni corresponding to the nid.
484 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
485 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
488 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
490 struct lnet_peer_ni *lpni;
491 lnet_nid_t primary_nid = lp->lp_primary_nid;
493 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
495 if (!(flags & LNET_PEER_CONFIGURED)) {
496 if (lp->lp_state & LNET_PEER_CONFIGURED) {
501 lpni = lnet_find_peer_ni_locked(nid);
506 lnet_peer_ni_decref_locked(lpni);
507 if (lp != lpni->lpni_peer_net->lpn_peer) {
513 * This function only allows deletion of the primary NID if it
516 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
521 lnet_net_lock(LNET_LOCK_EX);
523 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
524 struct lnet_peer_ni *lpni2;
525 /* assign the next peer_ni to be the primary */
526 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
528 lp->lp_primary_nid = lpni2->lpni_nid;
530 rc = lnet_peer_ni_del_locked(lpni, force);
532 lnet_net_unlock(LNET_LOCK_EX);
535 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
536 libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
542 lnet_peer_table_cleanup_locked(struct lnet_net *net,
543 struct lnet_peer_table *ptable)
546 struct lnet_peer_ni *next;
547 struct lnet_peer_ni *lpni;
548 struct lnet_peer *peer;
550 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
551 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
553 if (net != NULL && net != lpni->lpni_net)
556 peer = lpni->lpni_peer_net->lpn_peer;
557 if (peer->lp_primary_nid != lpni->lpni_nid) {
558 lnet_peer_ni_del_locked(lpni, false);
562 * Removing the primary NID implies removing
563 * the entire peer. Advance next beyond any
564 * peer_ni that belongs to the same peer.
566 list_for_each_entry_from(next, &ptable->pt_hash[i],
568 if (next->lpni_peer_net->lpn_peer != peer)
571 lnet_peer_del_locked(peer);
577 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
579 wait_var_event_warning(&ptable->pt_zombies,
580 ptable->pt_zombies == 0,
581 "Waiting for %d zombies on peer table\n",
586 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
587 struct lnet_peer_table *ptable)
589 struct lnet_peer_ni *lp;
590 struct lnet_peer_ni *tmp;
594 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
595 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
597 if (net != lp->lpni_net)
600 if (!lnet_isrouter(lp))
603 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
605 lnet_net_unlock(LNET_LOCK_EX);
606 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), gw_nid);
607 lnet_net_lock(LNET_LOCK_EX);
613 lnet_peer_tables_cleanup(struct lnet_net *net)
616 struct lnet_peer_table *ptable;
618 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
619 /* If just deleting the peers for a NI, get rid of any routes these
620 * peers are gateways for. */
621 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
622 lnet_net_lock(LNET_LOCK_EX);
623 lnet_peer_table_del_rtrs_locked(net, ptable);
624 lnet_net_unlock(LNET_LOCK_EX);
627 /* Start the cleanup process */
628 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
629 lnet_net_lock(LNET_LOCK_EX);
630 lnet_peer_table_cleanup_locked(net, ptable);
631 lnet_net_unlock(LNET_LOCK_EX);
634 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
635 lnet_peer_ni_finalize_wait(ptable);
638 static struct lnet_peer_ni *
639 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
641 struct list_head *peers;
642 struct lnet_peer_ni *lp;
644 if (the_lnet.ln_state != LNET_STATE_RUNNING)
647 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
648 list_for_each_entry(lp, peers, lpni_hashlist) {
649 if (lp->lpni_nid == nid) {
650 lnet_peer_ni_addref_locked(lp);
658 struct lnet_peer_ni *
659 lnet_find_peer_ni_locked(lnet_nid_t nid)
661 struct lnet_peer_ni *lpni;
662 struct lnet_peer_table *ptable;
665 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
667 ptable = the_lnet.ln_peer_tables[cpt];
668 lpni = lnet_get_peer_ni_locked(ptable, nid);
673 struct lnet_peer_ni *
674 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
676 struct lnet_peer_net *lpn;
677 struct lnet_peer_ni *lpni;
679 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
683 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
684 if (lpni->lpni_nid == nid)
692 lnet_find_peer(lnet_nid_t nid)
694 struct lnet_peer_ni *lpni;
695 struct lnet_peer *lp = NULL;
698 cpt = lnet_net_lock_current();
699 lpni = lnet_find_peer_ni_locked(nid);
701 lp = lpni->lpni_peer_net->lpn_peer;
702 lnet_peer_addref_locked(lp);
703 lnet_peer_ni_decref_locked(lpni);
705 lnet_net_unlock(cpt);
710 struct lnet_peer_net *
711 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
713 struct lnet_peer_net *net;
716 /* no net id provided return the first net */
717 net = list_first_entry_or_null(&lp->lp_peer_nets,
718 struct lnet_peer_net,
724 /* find the net after the one provided */
725 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
726 if (net->lpn_net_id == prev_lpn_id) {
728 * if we reached the end of the list loop to the
731 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
732 return list_first_entry_or_null(&lp->lp_peer_nets,
733 struct lnet_peer_net,
736 return list_next_entry(net, lpn_peer_nets);
743 struct lnet_peer_ni *
744 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
745 struct lnet_peer_net *peer_net,
746 struct lnet_peer_ni *prev)
748 struct lnet_peer_ni *lpni;
749 struct lnet_peer_net *net = peer_net;
753 if (list_empty(&peer->lp_peer_nets))
756 net = list_entry(peer->lp_peer_nets.next,
757 struct lnet_peer_net,
760 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
766 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
768 * if you reached the end of the peer ni list and the peer
769 * net is specified then there are no more peer nis in that
776 * we reached the end of this net ni list. move to the
779 if (prev->lpni_peer_net->lpn_peer_nets.next ==
781 /* no more nets and no more NIs. */
784 /* get the next net */
785 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
786 struct lnet_peer_net,
788 /* get the ni on it */
789 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
795 /* there are more nis left */
796 lpni = list_entry(prev->lpni_peer_nis.next,
797 struct lnet_peer_ni, lpni_peer_nis);
802 /* Call with the ln_api_mutex held */
803 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
805 struct lnet_process_id id;
806 struct lnet_peer_table *ptable;
807 struct lnet_peer *lp;
816 if (the_lnet.ln_state != LNET_STATE_RUNNING)
819 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
822 * Count the number of peers, and return E2BIG if the buffer
823 * is too small. We'll also return the desired size.
826 for (cpt = 0; cpt < lncpt; cpt++) {
827 ptable = the_lnet.ln_peer_tables[cpt];
828 count += ptable->pt_peers;
830 size = count * sizeof(*ids);
835 * Walk the peer lists and copy out the primary nids.
836 * This is safe because the peer lists are only modified
837 * while the ln_api_mutex is held. So we don't need to
838 * hold the lnet_net_lock as well, and can therefore
839 * directly call copy_to_user().
842 memset(&id, 0, sizeof(id));
843 id.pid = LNET_PID_LUSTRE;
845 for (cpt = 0; cpt < lncpt; cpt++) {
846 ptable = the_lnet.ln_peer_tables[cpt];
847 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
850 id.nid = lp->lp_primary_nid;
851 if (copy_to_user(&ids[i], &id, sizeof(id)))
864 * Start pushes to peers that need to be updated for a configuration
865 * change on this node.
868 lnet_push_update_to_peers(int force)
870 struct lnet_peer_table *ptable;
871 struct lnet_peer *lp;
875 lnet_net_lock(LNET_LOCK_EX);
876 if (lnet_peer_discovery_disabled)
878 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
879 for (cpt = 0; cpt < lncpt; cpt++) {
880 ptable = the_lnet.ln_peer_tables[cpt];
881 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
883 spin_lock(&lp->lp_lock);
884 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
885 lp->lp_state |= LNET_PEER_FORCE_PUSH;
886 spin_unlock(&lp->lp_lock);
888 if (lnet_peer_needs_push(lp))
889 lnet_peer_queue_for_discovery(lp);
892 lnet_net_unlock(LNET_LOCK_EX);
893 wake_up(&the_lnet.ln_dc_waitq);
897 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
898 * this is a preferred point-to-point path. Call with lnet_net_lock in
902 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
906 if (lpni->lpni_pref_nnids == 0)
908 if (lpni->lpni_pref_nnids == 1)
909 return lpni->lpni_pref.nid == nid;
910 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
911 if (lpni->lpni_pref.nids[i] == nid)
918 * Set a single ni as preferred, provided no preferred ni is already
919 * defined. Only to be used for non-multi-rail peer_ni.
922 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
926 spin_lock(&lpni->lpni_lock);
927 if (nid == LNET_NID_ANY) {
929 } else if (lpni->lpni_pref_nnids > 0) {
931 } else if (lpni->lpni_pref_nnids == 0) {
932 lpni->lpni_pref.nid = nid;
933 lpni->lpni_pref_nnids = 1;
934 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
936 spin_unlock(&lpni->lpni_lock);
938 CDEBUG(D_NET, "peer %s nid %s: %d\n",
939 libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
944 * Clear the preferred NID from a non-multi-rail peer_ni, provided
945 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
948 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
952 spin_lock(&lpni->lpni_lock);
953 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
954 lpni->lpni_pref_nnids = 0;
955 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
956 } else if (lpni->lpni_pref_nnids == 0) {
961 spin_unlock(&lpni->lpni_lock);
963 CDEBUG(D_NET, "peer %s: %d\n",
964 libcfs_nid2str(lpni->lpni_nid), rc);
969 * Clear the preferred NIDs from a non-multi-rail peer.
972 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
974 struct lnet_peer_ni *lpni = NULL;
976 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
977 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
981 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
983 lnet_nid_t *nids = NULL;
984 lnet_nid_t *oldnids = NULL;
985 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
990 if (nid == LNET_NID_ANY) {
995 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1000 /* A non-MR node may have only one preferred NI per peer_ni */
1001 if (lpni->lpni_pref_nnids > 0) {
1002 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1008 if (lpni->lpni_pref_nnids != 0) {
1009 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
1010 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
1015 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
1016 if (lpni->lpni_pref.nids[i] == nid) {
1017 LIBCFS_FREE(nids, size);
1021 nids[i] = lpni->lpni_pref.nids[i];
1026 lnet_net_lock(LNET_LOCK_EX);
1027 spin_lock(&lpni->lpni_lock);
1028 if (lpni->lpni_pref_nnids == 0) {
1029 lpni->lpni_pref.nid = nid;
1031 oldnids = lpni->lpni_pref.nids;
1032 lpni->lpni_pref.nids = nids;
1034 lpni->lpni_pref_nnids++;
1035 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1036 spin_unlock(&lpni->lpni_lock);
1037 lnet_net_unlock(LNET_LOCK_EX);
1040 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1041 CFS_FREE_PTR_ARRAY(oldnids, size);
1044 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1045 spin_lock(&lpni->lpni_lock);
1046 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1047 spin_unlock(&lpni->lpni_lock);
1049 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1050 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1055 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1057 lnet_nid_t *nids = NULL;
1058 lnet_nid_t *oldnids = NULL;
1059 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1064 if (lpni->lpni_pref_nnids == 0) {
1069 if (lpni->lpni_pref_nnids == 1) {
1070 if (lpni->lpni_pref.nid != nid) {
1074 } else if (lpni->lpni_pref_nnids == 2) {
1075 if (lpni->lpni_pref.nids[0] != nid &&
1076 lpni->lpni_pref.nids[1] != nid) {
1081 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1082 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
1087 for (i = 0, j = 0; i < lpni->lpni_pref_nnids; i++) {
1088 if (lpni->lpni_pref.nids[i] != nid)
1090 nids[j++] = lpni->lpni_pref.nids[i];
1092 /* Check if we actually removed a nid. */
1093 if (j == lpni->lpni_pref_nnids) {
1094 LIBCFS_FREE(nids, size);
1100 lnet_net_lock(LNET_LOCK_EX);
1101 spin_lock(&lpni->lpni_lock);
1102 if (lpni->lpni_pref_nnids == 1) {
1103 lpni->lpni_pref.nid = LNET_NID_ANY;
1104 } else if (lpni->lpni_pref_nnids == 2) {
1105 oldnids = lpni->lpni_pref.nids;
1106 if (oldnids[0] == nid)
1107 lpni->lpni_pref.nid = oldnids[1];
1109 lpni->lpni_pref.nid = oldnids[2];
1111 oldnids = lpni->lpni_pref.nids;
1112 lpni->lpni_pref.nids = nids;
1114 lpni->lpni_pref_nnids--;
1115 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1116 spin_unlock(&lpni->lpni_lock);
1117 lnet_net_unlock(LNET_LOCK_EX);
1120 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
1121 CFS_FREE_PTR_ARRAY(oldnids, size);
1124 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1125 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1130 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1132 struct lnet_peer_ni *lpni;
1133 lnet_nid_t primary_nid = nid;
1135 lpni = lnet_find_peer_ni_locked(nid);
1137 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1138 lnet_peer_ni_decref_locked(lpni);
1145 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1146 __must_hold(&lp->lp_lock)
1148 if (lnet_peer_discovery_disabled)
1151 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1152 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1163 lnet_is_discovery_disabled(struct lnet_peer *lp)
1167 spin_lock(&lp->lp_lock);
1168 rc = lnet_is_discovery_disabled_locked(lp);
1169 spin_unlock(&lp->lp_lock);
1175 LNetPrimaryNID(lnet_nid_t nid)
1177 struct lnet_peer *lp;
1178 struct lnet_peer_ni *lpni;
1179 lnet_nid_t primary_nid = nid;
1183 if (nid == LNET_NID_LO_0)
1184 return LNET_NID_LO_0;
1186 cpt = lnet_net_lock_current();
1187 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1192 lp = lpni->lpni_peer_net->lpn_peer;
1194 while (!lnet_peer_is_uptodate(lp)) {
1195 rc = lnet_discover_peer_locked(lpni, cpt, true);
1198 lp = lpni->lpni_peer_net->lpn_peer;
1200 /* Only try once if discovery is disabled */
1201 if (lnet_is_discovery_disabled(lp))
1204 primary_nid = lp->lp_primary_nid;
1206 lnet_peer_ni_decref_locked(lpni);
1208 lnet_net_unlock(cpt);
1210 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1211 libcfs_nid2str(primary_nid), rc);
1214 EXPORT_SYMBOL(LNetPrimaryNID);
1216 struct lnet_peer_net *
1217 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1219 struct lnet_peer_net *peer_net;
1220 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1221 if (peer_net->lpn_net_id == net_id)
1228 * Attach a peer_ni to a peer_net and peer. This function assumes
1229 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1230 * may be attached to a different peer, in which case it will be
1231 * properly detached first. The whole operation is done atomically.
1233 * Always returns 0. This is the last function called from functions
1234 * that do return an int, so returning 0 here allows the compiler to
1238 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1239 struct lnet_peer_net *lpn,
1240 struct lnet_peer_ni *lpni,
1243 struct lnet_peer_table *ptable;
1245 /* Install the new peer_ni */
1246 lnet_net_lock(LNET_LOCK_EX);
1247 /* Add peer_ni to global peer table hash, if necessary. */
1248 if (list_empty(&lpni->lpni_hashlist)) {
1249 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1251 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1252 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1253 ptable->pt_version++;
1254 /* This is the 1st refcount on lpni. */
1255 atomic_inc(&lpni->lpni_refcount);
1258 /* Detach the peer_ni from an existing peer, if necessary. */
1259 if (lpni->lpni_peer_net) {
1260 LASSERT(lpni->lpni_peer_net != lpn);
1261 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1262 lnet_peer_detach_peer_ni_locked(lpni);
1263 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1264 lpni->lpni_peer_net = NULL;
1267 /* Add peer_ni to peer_net */
1268 lpni->lpni_peer_net = lpn;
1269 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1270 lnet_update_peer_net_healthv(lpni);
1271 lnet_peer_net_addref_locked(lpn);
1273 /* Add peer_net to peer */
1274 if (!lpn->lpn_peer) {
1276 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1277 lnet_peer_addref_locked(lp);
1280 /* Add peer to global peer list, if necessary */
1281 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1282 if (list_empty(&lp->lp_peer_list)) {
1283 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1288 /* Update peer state */
1289 spin_lock(&lp->lp_lock);
1290 if (flags & LNET_PEER_CONFIGURED) {
1291 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1292 lp->lp_state |= LNET_PEER_CONFIGURED;
1294 if (flags & LNET_PEER_MULTI_RAIL) {
1295 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1296 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1297 lnet_peer_clr_non_mr_pref_nids(lp);
1300 spin_unlock(&lp->lp_lock);
1303 lnet_net_unlock(LNET_LOCK_EX);
1305 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1306 libcfs_nid2str(lp->lp_primary_nid),
1307 libcfs_nid2str(lpni->lpni_nid), flags);
1313 * Create a new peer, with nid as its primary nid.
1315 * Call with the lnet_api_mutex held.
1318 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1320 struct lnet_peer *lp;
1321 struct lnet_peer_net *lpn;
1322 struct lnet_peer_ni *lpni;
1325 LASSERT(nid != LNET_NID_ANY);
1328 * No need for the lnet_net_lock here, because the
1329 * lnet_api_mutex is held.
1331 lpni = lnet_find_peer_ni_locked(nid);
1333 /* A peer with this NID already exists. */
1334 lp = lpni->lpni_peer_net->lpn_peer;
1335 lnet_peer_ni_decref_locked(lpni);
1337 * This is an error if the peer was configured and the
1338 * primary NID differs or an attempt is made to change
1339 * the Multi-Rail flag. Otherwise the assumption is
1340 * that an existing peer is being modified.
1342 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1343 if (lp->lp_primary_nid != nid)
1345 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1349 /* Delete and recreate as a configured peer. */
1353 /* Create peer, peer_net, and peer_ni. */
1355 lp = lnet_peer_alloc(nid);
1358 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1361 lpni = lnet_peer_ni_alloc(nid);
1365 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1368 LIBCFS_FREE(lpn, sizeof(*lpn));
1370 LIBCFS_FREE(lp, sizeof(*lp));
1372 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1373 libcfs_nid2str(nid), flags, rc);
1378 * Add a NID to a peer. Call with ln_api_mutex held.
1381 * -EPERM: Non-DLC addition to a DLC-configured peer.
1382 * -EEXIST: The NID was configured by DLC for a different peer.
1383 * -ENOMEM: Out of memory.
1384 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1385 * non-multi-rail peer.
1388 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1390 struct lnet_peer_net *lpn;
1391 struct lnet_peer_ni *lpni;
1395 LASSERT(nid != LNET_NID_ANY);
1397 /* A configured peer can only be updated through configuration. */
1398 if (!(flags & LNET_PEER_CONFIGURED)) {
1399 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1406 * The MULTI_RAIL flag can be set but not cleared, because
1407 * that would leave the peer struct in an invalid state.
1409 if (flags & LNET_PEER_MULTI_RAIL) {
1410 spin_lock(&lp->lp_lock);
1411 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1412 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1413 lnet_peer_clr_non_mr_pref_nids(lp);
1415 spin_unlock(&lp->lp_lock);
1416 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1421 lpni = lnet_find_peer_ni_locked(nid);
1424 * A peer_ni already exists. This is only a problem if
1425 * it is not connected to this peer and was configured
1428 lnet_peer_ni_decref_locked(lpni);
1429 if (lpni->lpni_peer_net->lpn_peer == lp)
1431 if (lnet_peer_ni_is_configured(lpni)) {
1435 /* If this is the primary NID, destroy the peer. */
1436 if (lnet_peer_ni_is_primary(lpni)) {
1437 struct lnet_peer *rtr_lp =
1438 lpni->lpni_peer_net->lpn_peer;
1439 int rtr_refcount = rtr_lp->lp_rtr_refcount;
1441 * if we're trying to delete a router it means
1442 * we're moving this peer NI to a new peer so must
1443 * transfer router properties to the new peer
1445 if (rtr_refcount > 0) {
1446 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1447 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1449 lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1450 lpni = lnet_peer_ni_alloc(nid);
1457 lpni = lnet_peer_ni_alloc(nid);
1465 * Get the peer_net. Check that we're not adding a second
1466 * peer_ni on a peer_net of a non-multi-rail peer.
1468 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1470 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1475 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1480 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1483 /* If the peer_ni was allocated above its peer_net pointer is NULL */
1484 if (!lpni->lpni_peer_net)
1485 LIBCFS_FREE(lpni, sizeof(*lpni));
1487 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1488 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1494 * Update the primary NID of a peer, if possible.
1496 * Call with the lnet_api_mutex held.
1499 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1501 lnet_nid_t old = lp->lp_primary_nid;
1504 if (lp->lp_primary_nid == nid)
1506 rc = lnet_peer_add_nid(lp, nid, flags);
1509 lp->lp_primary_nid = nid;
1511 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1512 libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1517 * lpni creation initiated due to traffic either sending or receiving.
1520 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1522 struct lnet_peer *lp;
1523 struct lnet_peer_net *lpn;
1524 struct lnet_peer_ni *lpni;
1528 if (nid == LNET_NID_ANY) {
1533 /* lnet_net_lock is not needed here because ln_api_lock is held */
1534 lpni = lnet_find_peer_ni_locked(nid);
1537 * We must have raced with another thread. Since we
1538 * know next to nothing about a peer_ni created by
1539 * traffic, we just assume everything is ok and
1542 lnet_peer_ni_decref_locked(lpni);
1546 /* Create peer, peer_net, and peer_ni. */
1548 lp = lnet_peer_alloc(nid);
1551 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1554 lpni = lnet_peer_ni_alloc(nid);
1557 if (pref != LNET_NID_ANY)
1558 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1560 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1563 LIBCFS_FREE(lpn, sizeof(*lpn));
1565 LIBCFS_FREE(lp, sizeof(*lp));
1567 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1572 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1574 * This API handles the following combinations:
1575 * Create a peer with its primary NI if only the prim_nid is provided
1576 * Add a NID to a peer identified by the prim_nid. The peer identified
1577 * by the prim_nid must already exist.
1578 * The peer being created may be non-MR.
1580 * The caller must hold ln_api_mutex. This prevents the peer from
1581 * being created/modified/deleted by a different thread.
1584 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1586 struct lnet_peer *lp = NULL;
1587 struct lnet_peer_ni *lpni;
1590 /* The prim_nid must always be specified */
1591 if (prim_nid == LNET_NID_ANY)
1594 flags = LNET_PEER_CONFIGURED;
1596 flags |= LNET_PEER_MULTI_RAIL;
1599 * If nid isn't specified, we must create a new peer with
1600 * prim_nid as its primary nid.
1602 if (nid == LNET_NID_ANY)
1603 return lnet_peer_add(prim_nid, flags);
1605 /* Look up the prim_nid, which must exist. */
1606 lpni = lnet_find_peer_ni_locked(prim_nid);
1609 lnet_peer_ni_decref_locked(lpni);
1610 lp = lpni->lpni_peer_net->lpn_peer;
1612 /* Peer must have been configured. */
1613 if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1614 CDEBUG(D_NET, "peer %s was not configured\n",
1615 libcfs_nid2str(prim_nid));
1619 /* Primary NID must match */
1620 if (lp->lp_primary_nid != prim_nid) {
1621 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1622 libcfs_nid2str(prim_nid),
1623 libcfs_nid2str(lp->lp_primary_nid));
1627 /* Multi-Rail flag must match. */
1628 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1629 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1630 libcfs_nid2str(prim_nid));
1634 return lnet_peer_add_nid(lp, nid, flags);
1638 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1640 * This API handles the following combinations:
1641 * Delete a NI from a peer if both prim_nid and nid are provided.
1642 * Delete a peer if only prim_nid is provided.
1643 * Delete a peer if its primary nid is provided.
1645 * The caller must hold ln_api_mutex. This prevents the peer from
1646 * being modified/deleted by a different thread.
1649 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1651 struct lnet_peer *lp;
1652 struct lnet_peer_ni *lpni;
1655 if (prim_nid == LNET_NID_ANY)
1658 lpni = lnet_find_peer_ni_locked(prim_nid);
1661 lnet_peer_ni_decref_locked(lpni);
1662 lp = lpni->lpni_peer_net->lpn_peer;
1664 if (prim_nid != lp->lp_primary_nid) {
1665 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1666 libcfs_nid2str(prim_nid),
1667 libcfs_nid2str(lp->lp_primary_nid));
1671 lnet_net_lock(LNET_LOCK_EX);
1672 if (lp->lp_rtr_refcount > 0) {
1673 lnet_net_unlock(LNET_LOCK_EX);
1674 CERROR("%s is a router. Can not be deleted\n",
1675 libcfs_nid2str(prim_nid));
1678 lnet_net_unlock(LNET_LOCK_EX);
1680 if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1681 return lnet_peer_del(lp);
1683 flags = LNET_PEER_CONFIGURED;
1684 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1685 flags |= LNET_PEER_MULTI_RAIL;
1687 return lnet_peer_del_nid(lp, nid, flags);
1691 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1693 struct lnet_peer_table *ptable;
1694 struct lnet_peer_net *lpn;
1696 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1698 LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1699 LASSERT(list_empty(&lpni->lpni_txq));
1700 LASSERT(lpni->lpni_txqnob == 0);
1701 LASSERT(list_empty(&lpni->lpni_peer_nis));
1702 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1704 lpn = lpni->lpni_peer_net;
1705 lpni->lpni_peer_net = NULL;
1706 lpni->lpni_net = NULL;
1708 /* remove the peer ni from the zombie list */
1709 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1710 spin_lock(&ptable->pt_zombie_lock);
1711 list_del_init(&lpni->lpni_hashlist);
1712 ptable->pt_zombies--;
1713 spin_unlock(&ptable->pt_zombie_lock);
1715 if (lpni->lpni_pref_nnids > 1)
1716 CFS_FREE_PTR_ARRAY(lpni->lpni_pref.nids, lpni->lpni_pref_nnids);
1718 LIBCFS_FREE(lpni, sizeof(*lpni));
1720 lnet_peer_net_decref_locked(lpn);
1723 struct lnet_peer_ni *
1724 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1726 struct lnet_peer_ni *lpni = NULL;
1729 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1730 return ERR_PTR(-ESHUTDOWN);
1733 * find if a peer_ni already exists.
1734 * If so then just return that.
1736 lpni = lnet_find_peer_ni_locked(nid);
1740 lnet_net_unlock(cpt);
1742 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1745 goto out_net_relock;
1748 lpni = lnet_find_peer_ni_locked(nid);
1758 * Get a peer_ni for the given nid, create it if necessary. Takes a
1759 * hold on the peer_ni.
1761 struct lnet_peer_ni *
1762 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1764 struct lnet_peer_ni *lpni = NULL;
1767 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1768 return ERR_PTR(-ESHUTDOWN);
1771 * find if a peer_ni already exists.
1772 * If so then just return that.
1774 lpni = lnet_find_peer_ni_locked(nid);
1780 * use the lnet_api_mutex to serialize the creation of the peer_ni
1781 * and the creation/deletion of the local ni/net. When a local ni is
1782 * created, if there exists a set of peer_nis on that network,
1783 * they need to be traversed and updated. When a local NI is
1784 * deleted, which could result in a network being deleted, then
1785 * all peer nis on that network need to be removed as well.
1787 * Creation through traffic should also be serialized with
1788 * creation through DLC.
1790 lnet_net_unlock(cpt);
1791 mutex_lock(&the_lnet.ln_api_mutex);
1793 * Shutdown is only set under the ln_api_lock, so a single
1794 * check here is sufficent.
1796 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1797 lpni = ERR_PTR(-ESHUTDOWN);
1798 goto out_mutex_unlock;
1801 rc = lnet_peer_ni_traffic_add(nid, pref);
1804 goto out_mutex_unlock;
1807 lpni = lnet_find_peer_ni_locked(nid);
1811 mutex_unlock(&the_lnet.ln_api_mutex);
1814 /* Lock has been dropped, check again for shutdown. */
1815 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1817 lnet_peer_ni_decref_locked(lpni);
1818 lpni = ERR_PTR(-ESHUTDOWN);
1825 lnet_peer_gw_discovery(struct lnet_peer *lp)
1829 spin_lock(&lp->lp_lock);
1830 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
1832 spin_unlock(&lp->lp_lock);
1838 lnet_peer_is_uptodate(struct lnet_peer *lp)
1842 spin_lock(&lp->lp_lock);
1843 rc = lnet_peer_is_uptodate_locked(lp);
1844 spin_unlock(&lp->lp_lock);
1849 * Is a peer uptodate from the point of view of discovery?
1851 * If it is currently being processed, obviously not.
1852 * A forced Ping or Push is also handled by the discovery thread.
1854 * Otherwise look at whether the peer needs rediscovering.
1857 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
1858 __must_hold(&lp->lp_lock)
1862 if (lp->lp_state & (LNET_PEER_DISCOVERING |
1863 LNET_PEER_FORCE_PING |
1864 LNET_PEER_FORCE_PUSH)) {
1866 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
1868 } else if (lnet_peer_needs_push(lp)) {
1870 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
1871 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
1883 * Queue a peer for the attention of the discovery thread. Call with
1884 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
1885 * -EALREADY if the peer was already queued.
1887 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
1891 spin_lock(&lp->lp_lock);
1892 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
1893 lp->lp_state |= LNET_PEER_DISCOVERING;
1894 spin_unlock(&lp->lp_lock);
1895 if (list_empty(&lp->lp_dc_list)) {
1896 lnet_peer_addref_locked(lp);
1897 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
1898 wake_up(&the_lnet.ln_dc_waitq);
1904 CDEBUG(D_NET, "Queue peer %s: %d\n",
1905 libcfs_nid2str(lp->lp_primary_nid), rc);
1911 * Discovery of a peer is complete. Wake all waiters on the peer.
1912 * Call with lnet_net_lock/EX held.
1914 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
1916 struct lnet_msg *msg, *tmp;
1918 LIST_HEAD(pending_msgs);
1920 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
1921 libcfs_nid2str(lp->lp_primary_nid));
1923 list_del_init(&lp->lp_dc_list);
1924 spin_lock(&lp->lp_lock);
1925 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
1926 spin_unlock(&lp->lp_lock);
1927 wake_up_all(&lp->lp_dc_waitq);
1929 if (lp->lp_rtr_refcount > 0)
1930 lnet_router_discovery_complete(lp);
1932 lnet_net_unlock(LNET_LOCK_EX);
1934 /* iterate through all pending messages and send them again */
1935 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
1936 list_del_init(&msg->msg_list);
1937 if (lp->lp_dc_error) {
1938 lnet_finalize(msg, lp->lp_dc_error);
1942 CDEBUG(D_NET, "sending pending message %s to target %s\n",
1943 lnet_msgtyp2str(msg->msg_type),
1944 libcfs_id2str(msg->msg_target));
1945 rc = lnet_send(msg->msg_src_nid_param, msg,
1946 msg->msg_rtr_nid_param);
1948 CNETERR("Error sending %s to %s: %d\n",
1949 lnet_msgtyp2str(msg->msg_type),
1950 libcfs_id2str(msg->msg_target), rc);
1951 lnet_finalize(msg, rc);
1954 lnet_net_lock(LNET_LOCK_EX);
1955 lnet_peer_decref_locked(lp);
1959 * Handle inbound push.
1960 * Like any event handler, called with lnet_res_lock/CPT held.
1962 void lnet_peer_push_event(struct lnet_event *ev)
1964 struct lnet_ping_buffer *pbuf;
1965 struct lnet_peer *lp;
1967 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
1969 /* lnet_find_peer() adds a refcount */
1970 lp = lnet_find_peer(ev->source.nid);
1972 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
1973 libcfs_nid2str(ev->initiator.nid),
1974 libcfs_nid2str(ev->source.nid));
1975 pbuf->pb_needs_post = true;
1979 /* Ensure peer state remains consistent while we modify it. */
1980 spin_lock(&lp->lp_lock);
1983 * If some kind of error happened the contents of the message
1984 * cannot be used. Clear the NIDS_UPTODATE and set the
1985 * FORCE_PING flag to trigger a ping.
1988 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1989 lp->lp_state |= LNET_PEER_FORCE_PING;
1990 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
1992 libcfs_nid2str(lp->lp_primary_nid),
1993 libcfs_nid2str(ev->source.nid));
1998 * A push with invalid or corrupted info. Clear the UPTODATE
1999 * flag to trigger a ping.
2001 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2002 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2003 lp->lp_state |= LNET_PEER_FORCE_PING;
2004 CDEBUG(D_NET, "Corrupted Push from %s\n",
2005 libcfs_nid2str(lp->lp_primary_nid));
2010 * Make sure we'll allocate the correct size ping buffer when
2013 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2014 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2017 * A non-Multi-Rail peer is not supposed to be capable of
2020 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2021 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2022 libcfs_nid2str(lp->lp_primary_nid));
2027 * The peer may have discovery disabled at its end. Set
2028 * NO_DISCOVERY as appropriate.
2030 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2031 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2032 libcfs_nid2str(lp->lp_primary_nid));
2034 * If the peer is going from discovery enabled to
2035 * discovery disabled, we need to reflect that in our
2036 * representation of the peer.
2038 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2039 LNET_PEER_DISCOVERING)))
2040 lp->lp_state |= LNET_PEER_MARK_DELETION;
2041 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2042 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2043 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2044 libcfs_nid2str(lp->lp_primary_nid));
2045 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2049 * Update the MULTI_RAIL flag based on the push. If the peer
2050 * was configured with DLC then the setting should match what
2052 * NB: We verified above that the MR feature bit is set in pi_features
2054 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2055 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2056 libcfs_nid2str(lp->lp_primary_nid), lp);
2057 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2058 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2059 libcfs_nid2str(lp->lp_primary_nid));
2060 } else if (lnet_peer_discovery_disabled) {
2061 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2062 libcfs_nid2str(lp->lp_primary_nid), lp);
2063 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2064 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2065 libcfs_nid2str(lp->lp_primary_nid), lp);
2067 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2068 libcfs_nid2str(lp->lp_primary_nid), lp);
2069 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2070 lnet_peer_clr_non_mr_pref_nids(lp);
2074 * Check for truncation of the Put message. Clear the
2075 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2076 * and tell discovery to allocate a bigger buffer.
2078 if (ev->mlength < ev->rlength) {
2079 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2080 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2081 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2082 lp->lp_state |= LNET_PEER_FORCE_PING;
2083 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2084 libcfs_nid2str(lp->lp_primary_nid),
2085 pbuf->pb_info.pi_nnis);
2089 /* always assume new data */
2090 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2091 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2094 * If there is data present that hasn't been processed yet,
2095 * we'll replace it if the Put contained newer data and it
2096 * fits. We're racing with a Ping or earlier Push in this
2099 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2100 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2101 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2102 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2103 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2104 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2105 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2106 libcfs_nid2str(lp->lp_primary_nid),
2107 LNET_PING_BUFFER_SEQNO(pbuf),
2108 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2114 * Allocate a buffer to copy the data. On a failure we drop
2115 * the Push and set FORCE_PING to force the discovery
2116 * thread to fix the problem by pinging the peer.
2118 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2120 lp->lp_state |= LNET_PEER_FORCE_PING;
2121 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2122 libcfs_nid2str(lp->lp_primary_nid),
2123 LNET_PING_BUFFER_SEQNO(pbuf));
2128 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2129 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2130 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2131 CDEBUG(D_NET, "Received Push %s %u\n",
2132 libcfs_nid2str(lp->lp_primary_nid),
2133 LNET_PING_BUFFER_SEQNO(pbuf));
2136 /* We've processed this buffer. It can be reposted */
2137 pbuf->pb_needs_post = true;
2140 * Queue the peer for discovery if not done, force it on the request
2141 * queue and wake the discovery thread if the peer was already queued,
2142 * because its status changed.
2144 spin_unlock(&lp->lp_lock);
2145 lnet_net_lock(LNET_LOCK_EX);
2146 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2147 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2148 wake_up(&the_lnet.ln_dc_waitq);
2150 /* Drop refcount from lookup */
2151 lnet_peer_decref_locked(lp);
2152 lnet_net_unlock(LNET_LOCK_EX);
2156 * Clear the discovery error state, unless we're already discovering
2157 * this peer, in which case the error is current.
2159 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2161 spin_lock(&lp->lp_lock);
2162 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2163 lp->lp_dc_error = 0;
2164 spin_unlock(&lp->lp_lock);
2168 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2169 * dropped/retaken within this function. An lnet_peer_ni is passed in
2170 * because discovery could tear down an lnet_peer.
2173 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2176 struct lnet_peer *lp;
2181 lnet_net_unlock(cpt);
2182 lnet_net_lock(LNET_LOCK_EX);
2183 lp = lpni->lpni_peer_net->lpn_peer;
2184 lnet_peer_clear_discovery_error(lp);
2187 * We're willing to be interrupted. The lpni can become a
2188 * zombie if we race with DLC, so we must check for that.
2191 /* Keep lp alive when the lnet_net_lock is unlocked */
2192 lnet_peer_addref_locked(lp);
2193 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2194 if (signal_pending(current))
2196 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2199 * Don't repeat discovery if discovery is disabled. This is
2200 * done to ensure we can use discovery as a standard ping as
2201 * well for backwards compatibility with routers which do not
2202 * have discovery or have discovery disabled
2204 if (lnet_is_discovery_disabled(lp) && count > 0)
2206 if (lp->lp_dc_error)
2208 if (lnet_peer_is_uptodate(lp))
2210 lnet_peer_queue_for_discovery(lp);
2212 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2215 * If caller requested a non-blocking operation then
2216 * return immediately. Once discovery is complete any
2217 * pending messages that were stopped due to discovery
2218 * will be transmitted.
2223 lnet_net_unlock(LNET_LOCK_EX);
2225 finish_wait(&lp->lp_dc_waitq, &wait);
2226 lnet_net_lock(LNET_LOCK_EX);
2227 lnet_peer_decref_locked(lp);
2228 /* Peer may have changed */
2229 lp = lpni->lpni_peer_net->lpn_peer;
2231 finish_wait(&lp->lp_dc_waitq, &wait);
2233 lnet_net_unlock(LNET_LOCK_EX);
2235 lnet_peer_decref_locked(lp);
2237 * The peer may have changed, so re-check and rediscover if that turns
2238 * out to have been the case. The reference count on lp ensured that
2239 * even if it was unlinked from lpni the memory could not be recycled.
2240 * Thus the check below is sufficient to determine whether the peer
2241 * changed. If the peer changed, then lp must not be dereferenced.
2243 if (lp != lpni->lpni_peer_net->lpn_peer)
2246 if (signal_pending(current))
2248 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2250 else if (lp->lp_dc_error)
2251 rc = lp->lp_dc_error;
2253 CDEBUG(D_NET, "non-blocking discovery\n");
2254 else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2257 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2258 (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2259 libcfs_nid2str(lpni->lpni_nid), rc,
2260 (!block) ? "pending discovery" : "discovery complete");
2265 /* Handle an incoming ack for a push. */
2267 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2269 struct lnet_ping_buffer *pbuf;
2271 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2272 spin_lock(&lp->lp_lock);
2273 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2274 lp->lp_push_error = ev->status;
2276 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2278 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2279 spin_unlock(&lp->lp_lock);
2281 CDEBUG(D_NET, "peer %s ev->status %d\n",
2282 libcfs_nid2str(lp->lp_primary_nid), ev->status);
2285 /* Handle a Reply message. This is the reply to a Ping message. */
2287 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2289 struct lnet_ping_buffer *pbuf;
2292 spin_lock(&lp->lp_lock);
2295 * If some kind of error happened the contents of message
2296 * cannot be used. Set PING_FAILED to trigger a retry.
2299 lp->lp_state |= LNET_PEER_PING_FAILED;
2300 lp->lp_ping_error = ev->status;
2301 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2303 libcfs_nid2str(lp->lp_primary_nid),
2304 libcfs_nid2str(ev->source.nid));
2308 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2309 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2310 lnet_swap_pinginfo(pbuf);
2313 * A reply with invalid or corrupted info. Set PING_FAILED to
2316 rc = lnet_ping_info_validate(&pbuf->pb_info);
2318 lp->lp_state |= LNET_PEER_PING_FAILED;
2319 lp->lp_ping_error = 0;
2320 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2321 libcfs_nid2str(lp->lp_primary_nid), rc);
2327 * The peer may have discovery disabled at its end. Set
2328 * NO_DISCOVERY as appropriate.
2330 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2331 !lnet_peer_discovery_disabled) {
2332 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2333 libcfs_nid2str(lp->lp_primary_nid));
2334 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2336 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2337 libcfs_nid2str(lp->lp_primary_nid));
2338 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2342 * Update the MULTI_RAIL flag based on the reply. If the peer
2343 * was configured with DLC then the setting should match what
2346 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2347 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2348 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2349 libcfs_nid2str(lp->lp_primary_nid), lp);
2350 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2351 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2352 libcfs_nid2str(lp->lp_primary_nid));
2353 } else if (lnet_peer_discovery_disabled) {
2355 "peer %s(%p) not MR: DD disabled locally\n",
2356 libcfs_nid2str(lp->lp_primary_nid), lp);
2357 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2359 "peer %s(%p) not MR: DD disabled remotely\n",
2360 libcfs_nid2str(lp->lp_primary_nid), lp);
2362 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2363 libcfs_nid2str(lp->lp_primary_nid), lp);
2364 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2365 lnet_peer_clr_non_mr_pref_nids(lp);
2367 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2368 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2369 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2370 libcfs_nid2str(lp->lp_primary_nid));
2372 CERROR("Multi-Rail state vanished from %s\n",
2373 libcfs_nid2str(lp->lp_primary_nid));
2374 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2379 * Make sure we'll allocate the correct size ping buffer when
2382 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2383 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2386 * Check for truncation of the Reply. Clear PING_SENT and set
2387 * PING_FAILED to trigger a retry.
2389 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2390 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2391 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2392 lp->lp_state |= LNET_PEER_PING_FAILED;
2393 lp->lp_ping_error = 0;
2394 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2395 libcfs_nid2str(lp->lp_primary_nid),
2396 pbuf->pb_info.pi_nnis);
2401 * Check the sequence numbers in the reply. These are only
2402 * available if the reply came from a Multi-Rail peer.
2404 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2405 pbuf->pb_info.pi_nnis > 1 &&
2406 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2407 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2408 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2409 libcfs_nid2str(lp->lp_primary_nid),
2410 LNET_PING_BUFFER_SEQNO(pbuf),
2413 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2416 /* We're happy with the state of the data in the buffer. */
2417 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2418 libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2419 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2420 lnet_ping_buffer_decref(lp->lp_data);
2422 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2423 lnet_ping_buffer_addref(pbuf);
2426 lp->lp_state &= ~LNET_PEER_PING_SENT;
2427 spin_unlock(&lp->lp_lock);
2429 lnet_net_lock(LNET_LOCK_EX);
2431 * If this peer is a gateway, call the routing callback to
2432 * handle the ping reply
2434 if (lp->lp_rtr_refcount > 0)
2435 lnet_router_discovery_ping_reply(lp);
2436 lnet_net_unlock(LNET_LOCK_EX);
2440 * Send event handling. Only matters for error cases, where we clean
2441 * up state on the peer and peer_ni that would otherwise be updated in
2442 * the REPLY event handler for a successful Ping, and the ACK event
2443 * handler for a successful Push.
2446 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2453 spin_lock(&lp->lp_lock);
2454 if (ev->msg_type == LNET_MSG_GET) {
2455 lp->lp_state &= ~LNET_PEER_PING_SENT;
2456 lp->lp_state |= LNET_PEER_PING_FAILED;
2457 lp->lp_ping_error = ev->status;
2458 } else { /* ev->msg_type == LNET_MSG_PUT */
2459 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2460 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2461 lp->lp_push_error = ev->status;
2463 spin_unlock(&lp->lp_lock);
2464 rc = LNET_REDISCOVER_PEER;
2466 CDEBUG(D_NET, "%s Send to %s: %d\n",
2467 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2468 libcfs_nid2str(ev->target.nid), rc);
2473 * Unlink event handling. This event is only seen if a call to
2474 * LNetMDUnlink() caused the event to be unlinked. If this call was
2475 * made after the event was set up in LNetGet() or LNetPut() then we
2476 * assume the Ping or Push timed out.
2479 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2481 spin_lock(&lp->lp_lock);
2482 /* We've passed through LNetGet() */
2483 if (lp->lp_state & LNET_PEER_PING_SENT) {
2484 lp->lp_state &= ~LNET_PEER_PING_SENT;
2485 lp->lp_state |= LNET_PEER_PING_FAILED;
2486 lp->lp_ping_error = -ETIMEDOUT;
2487 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2488 libcfs_nid2str(lp->lp_primary_nid));
2490 /* We've passed through LNetPut() */
2491 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2492 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2493 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2494 lp->lp_push_error = -ETIMEDOUT;
2495 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2496 libcfs_nid2str(lp->lp_primary_nid));
2498 spin_unlock(&lp->lp_lock);
2502 * Event handler for the discovery EQ.
2504 * Called with lnet_res_lock(cpt) held. The cpt is the
2505 * lnet_cpt_of_cookie() of the md handle cookie.
2507 static void lnet_discovery_event_handler(struct lnet_event *event)
2509 struct lnet_peer *lp = event->md_user_ptr;
2510 struct lnet_ping_buffer *pbuf;
2513 /* discovery needs to take another look */
2514 rc = LNET_REDISCOVER_PEER;
2516 CDEBUG(D_NET, "Received event: %d\n", event->type);
2518 switch (event->type) {
2519 case LNET_EVENT_ACK:
2520 lnet_discovery_event_ack(lp, event);
2522 case LNET_EVENT_REPLY:
2523 lnet_discovery_event_reply(lp, event);
2525 case LNET_EVENT_SEND:
2526 /* Only send failure triggers a retry. */
2527 rc = lnet_discovery_event_send(lp, event);
2529 case LNET_EVENT_UNLINK:
2530 /* LNetMDUnlink() was called */
2531 lnet_discovery_event_unlink(lp, event);
2534 /* Invalid events. */
2537 lnet_net_lock(LNET_LOCK_EX);
2538 if (event->unlinked) {
2539 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2540 lnet_ping_buffer_decref(pbuf);
2541 lnet_peer_decref_locked(lp);
2544 /* put peer back at end of request queue, if discovery not already
2546 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2547 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2548 wake_up(&the_lnet.ln_dc_waitq);
2550 lnet_net_unlock(LNET_LOCK_EX);
2554 * Build a peer from incoming data.
2556 * The NIDs in the incoming data are supposed to be structured as follows:
2559 * - other NIDs in same net
2560 * - NIDs in second net
2561 * - NIDs in third net
2563 * This due to the way the list of NIDs in the data is created.
2565 * Note that this function will mark the peer uptodate unless an
2566 * ENOMEM is encontered. All other errors are due to a conflict
2567 * between the DLC configuration and what discovery sees. We treat DLC
2568 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2569 * peer from becoming stuck in discovery.
2571 static int lnet_peer_merge_data(struct lnet_peer *lp,
2572 struct lnet_ping_buffer *pbuf)
2574 struct lnet_peer_ni *lpni;
2575 lnet_nid_t *curnis = NULL;
2576 struct lnet_ni_status *addnis = NULL;
2577 lnet_nid_t *delnis = NULL;
2587 flags = LNET_PEER_DISCOVERED;
2588 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2589 flags |= LNET_PEER_MULTI_RAIL;
2592 * Cache the routing feature for the peer; whether it is enabled
2593 * for disabled as reported by the remote peer.
2595 spin_lock(&lp->lp_lock);
2596 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2597 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2599 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2600 spin_unlock(&lp->lp_lock);
2602 nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2603 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2604 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2605 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2606 if (!curnis || !addnis || !delnis) {
2614 /* Construct the list of NIDs present in peer. */
2616 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2617 curnis[ncurnis++] = lpni->lpni_nid;
2620 * Check for NIDs in pbuf not present in curnis[].
2621 * The loop starts at 1 to skip the loopback NID.
2623 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2624 for (j = 0; j < ncurnis; j++)
2625 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2628 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2631 * Check for NIDs in curnis[] not present in pbuf.
2632 * The nested loop starts at 1 to skip the loopback NID.
2634 * But never add the loopback NID to delnis[]: if it is
2635 * present in curnis[] then this peer is for this node.
2637 for (i = 0; i < ncurnis; i++) {
2638 if (curnis[i] == LNET_NID_LO_0)
2640 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2641 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2643 * update the information we cache for the
2644 * peer with the latest information we
2647 lpni = lnet_find_peer_ni_locked(curnis[i]);
2649 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2650 lnet_peer_ni_decref_locked(lpni);
2655 if (j == pbuf->pb_info.pi_nnis)
2656 delnis[ndelnis++] = curnis[i];
2660 * If we get here and the discovery is disabled then we don't want
2661 * to add or delete any NIs. We just updated the ones we have some
2662 * information on, and call it a day
2665 if (lnet_is_discovery_disabled(lp))
2668 for (i = 0; i < naddnis; i++) {
2669 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2671 CERROR("Error adding NID %s to peer %s: %d\n",
2672 libcfs_nid2str(addnis[i].ns_nid),
2673 libcfs_nid2str(lp->lp_primary_nid), rc);
2677 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2679 lpni->lpni_ns_status = addnis[i].ns_status;
2680 lnet_peer_ni_decref_locked(lpni);
2684 for (i = 0; i < ndelnis; i++) {
2686 * for routers it's okay to delete the primary_nid because
2687 * the upper layers don't really rely on it. So if we're
2688 * being told that the router changed its primary_nid
2689 * then it's okay to delete it.
2691 if (lp->lp_rtr_refcount > 0)
2692 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2693 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2695 CERROR("Error deleting NID %s from peer %s: %d\n",
2696 libcfs_nid2str(delnis[i]),
2697 libcfs_nid2str(lp->lp_primary_nid), rc);
2703 * Errors other than -ENOMEM are due to peers having been
2704 * configured with DLC. Ignore these because DLC overrides
2709 CFS_FREE_PTR_ARRAY(curnis, nnis);
2710 CFS_FREE_PTR_ARRAY(addnis, nnis);
2711 CFS_FREE_PTR_ARRAY(delnis, nnis);
2712 lnet_ping_buffer_decref(pbuf);
2713 CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2716 spin_lock(&lp->lp_lock);
2717 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2718 lp->lp_state |= LNET_PEER_FORCE_PING;
2719 spin_unlock(&lp->lp_lock);
2725 * The data in pbuf says lp is its primary peer, but the data was
2726 * received by a different peer. Try to update lp with the data.
2729 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2731 struct lnet_handle_md mdh;
2733 /* Queue lp for discovery, and force it on the request queue. */
2734 lnet_net_lock(LNET_LOCK_EX);
2735 if (lnet_peer_queue_for_discovery(lp))
2736 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2737 lnet_net_unlock(LNET_LOCK_EX);
2739 LNetInvalidateMDHandle(&mdh);
2742 * Decide whether we can move the peer to the DATA_PRESENT state.
2744 * We replace stale data for a multi-rail peer, repair PING_FAILED
2745 * status, and preempt FORCE_PING.
2747 * If after that we have DATA_PRESENT, we merge it into this peer.
2749 spin_lock(&lp->lp_lock);
2750 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2751 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2752 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2753 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2754 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2755 lnet_ping_buffer_decref(pbuf);
2760 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2761 lnet_ping_buffer_decref(lp->lp_data);
2763 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2765 if (lp->lp_state & LNET_PEER_PING_FAILED) {
2766 mdh = lp->lp_ping_mdh;
2767 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2768 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2769 lp->lp_ping_error = 0;
2771 if (lp->lp_state & LNET_PEER_FORCE_PING)
2772 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2773 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2774 spin_unlock(&lp->lp_lock);
2776 if (!LNetMDHandleIsInvalid(mdh))
2780 return lnet_peer_merge_data(lp, pbuf);
2782 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2786 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
2790 for (i = 0; i < pinfo->pi_nnis; i++) {
2791 if (pinfo->pi_ni[i].ns_nid == nid)
2799 * Update a peer using the data received.
2801 static int lnet_peer_data_present(struct lnet_peer *lp)
2802 __must_hold(&lp->lp_lock)
2804 struct lnet_ping_buffer *pbuf;
2805 struct lnet_peer_ni *lpni;
2806 lnet_nid_t nid = LNET_NID_ANY;
2812 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2813 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2814 spin_unlock(&lp->lp_lock);
2817 * Modifications of peer structures are done while holding the
2818 * ln_api_mutex. A global lock is required because we may be
2819 * modifying multiple peer structures, and a mutex greatly
2820 * simplifies memory management.
2822 * The actual changes to the data structures must also protect
2823 * against concurrent lookups, for which the lnet_net_lock in
2824 * LNET_LOCK_EX mode is used.
2826 mutex_lock(&the_lnet.ln_api_mutex);
2827 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2833 * If this peer is not on the peer list then it is being torn
2834 * down, and our reference count may be all that is keeping it
2835 * alive. Don't do any work on it.
2837 if (list_empty(&lp->lp_peer_list))
2840 flags = LNET_PEER_DISCOVERED;
2841 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2842 flags |= LNET_PEER_MULTI_RAIL;
2845 * Check whether the primary NID in the message matches the
2846 * primary NID of the peer. If it does, update the peer, if
2847 * it it does not, check whether there is already a peer with
2848 * that primary NID. If no such peer exists, try to update
2849 * the primary NID of the current peer (allowed if it was
2850 * created due to message traffic) and complete the update.
2851 * If the peer did exist, hand off the data to it.
2853 * The peer for the loopback interface is a special case: this
2854 * is the peer for the local node, and we want to set its
2855 * primary NID to the correct value here. Moreover, this peer
2856 * can show up with only the loopback NID in the ping buffer.
2858 if (pbuf->pb_info.pi_nnis <= 1)
2860 nid = pbuf->pb_info.pi_ni[1].ns_nid;
2861 if (lp->lp_primary_nid == LNET_NID_LO_0) {
2862 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2864 rc = lnet_peer_merge_data(lp, pbuf);
2866 * if the primary nid of the peer is present in the ping info returned
2867 * from the peer, but it's not the local primary peer we have
2868 * cached and discovery is disabled, then we don't want to update
2869 * our local peer info, by adding or removing NIDs, we just want
2870 * to update the status of the nids that we currently have
2871 * recorded in that peer.
2873 } else if (lp->lp_primary_nid == nid ||
2874 (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
2875 lnet_is_discovery_disabled(lp))) {
2876 rc = lnet_peer_merge_data(lp, pbuf);
2878 lpni = lnet_find_peer_ni_locked(nid);
2880 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2882 CERROR("Primary NID error %s versus %s: %d\n",
2883 libcfs_nid2str(lp->lp_primary_nid),
2884 libcfs_nid2str(nid), rc);
2886 rc = lnet_peer_merge_data(lp, pbuf);
2889 struct lnet_peer *new_lp;
2890 new_lp = lpni->lpni_peer_net->lpn_peer;
2892 * if lp has discovery/MR enabled that means new_lp
2893 * should have discovery/MR enabled as well, since
2894 * it's the same peer, which we're about to merge
2896 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
2897 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2898 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2899 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
2901 rc = lnet_peer_set_primary_data(new_lp, pbuf);
2902 lnet_consolidate_routes_locked(lp, new_lp);
2903 lnet_peer_ni_decref_locked(lpni);
2907 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
2909 mutex_unlock(&the_lnet.ln_api_mutex);
2911 spin_lock(&lp->lp_lock);
2912 /* Tell discovery to re-check the peer immediately. */
2914 rc = LNET_REDISCOVER_PEER;
2919 * A ping failed. Clear the PING_FAILED state and set the
2920 * FORCE_PING state, to ensure a retry even if discovery is
2921 * disabled. This avoids being left with incorrect state.
2923 static int lnet_peer_ping_failed(struct lnet_peer *lp)
2924 __must_hold(&lp->lp_lock)
2926 struct lnet_handle_md mdh;
2929 mdh = lp->lp_ping_mdh;
2930 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2931 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2932 lp->lp_state |= LNET_PEER_FORCE_PING;
2933 rc = lp->lp_ping_error;
2934 lp->lp_ping_error = 0;
2935 spin_unlock(&lp->lp_lock);
2937 if (!LNetMDHandleIsInvalid(mdh))
2940 CDEBUG(D_NET, "peer %s:%d\n",
2941 libcfs_nid2str(lp->lp_primary_nid), rc);
2943 spin_lock(&lp->lp_lock);
2944 return rc ? rc : LNET_REDISCOVER_PEER;
2948 * Select NID to send a Ping or Push to.
2950 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
2952 struct lnet_peer_ni *lpni;
2954 /* Look for a direct-connected NID for this peer. */
2956 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2957 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
2962 return lpni->lpni_nid;
2964 /* Look for a routed-connected NID for this peer. */
2966 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2967 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
2972 return lpni->lpni_nid;
2974 return LNET_NID_ANY;
2977 /* Active side of ping. */
2978 static int lnet_peer_send_ping(struct lnet_peer *lp)
2979 __must_hold(&lp->lp_lock)
2986 lp->lp_state |= LNET_PEER_PING_SENT;
2987 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2988 spin_unlock(&lp->lp_lock);
2990 cpt = lnet_net_lock_current();
2991 /* Refcount for MD. */
2992 lnet_peer_addref_locked(lp);
2993 pnid = lnet_peer_select_nid(lp);
2994 lnet_net_unlock(cpt);
2996 nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
2998 rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
2999 the_lnet.ln_dc_handler, false);
3002 * if LNetMDBind in lnet_send_ping fails we need to decrement the
3003 * refcount on the peer, otherwise LNetMDUnlink will be called
3004 * which will eventually do that.
3008 lnet_peer_decref_locked(lp);
3009 lnet_net_unlock(cpt);
3010 rc = -rc; /* change the rc to negative value */
3012 } else if (rc < 0) {
3016 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3018 spin_lock(&lp->lp_lock);
3022 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3024 * The errors that get us here are considered hard errors and
3025 * cause Discovery to terminate. So we clear PING_SENT, but do
3026 * not set either PING_FAILED or FORCE_PING. In fact we need
3027 * to clear PING_FAILED, because the unlink event handler will
3028 * have set it if we called LNetMDUnlink() above.
3030 spin_lock(&lp->lp_lock);
3031 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3036 * This function exists because you cannot call LNetMDUnlink() from an
3039 static int lnet_peer_push_failed(struct lnet_peer *lp)
3040 __must_hold(&lp->lp_lock)
3042 struct lnet_handle_md mdh;
3045 mdh = lp->lp_push_mdh;
3046 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3047 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3048 rc = lp->lp_push_error;
3049 lp->lp_push_error = 0;
3050 spin_unlock(&lp->lp_lock);
3052 if (!LNetMDHandleIsInvalid(mdh))
3055 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3056 spin_lock(&lp->lp_lock);
3057 return rc ? rc : LNET_REDISCOVER_PEER;
3060 /* Active side of push. */
3061 static int lnet_peer_send_push(struct lnet_peer *lp)
3062 __must_hold(&lp->lp_lock)
3064 struct lnet_ping_buffer *pbuf;
3065 struct lnet_process_id id;
3070 /* Don't push to a non-multi-rail peer. */
3071 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3072 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3076 lp->lp_state |= LNET_PEER_PUSH_SENT;
3077 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3078 spin_unlock(&lp->lp_lock);
3080 cpt = lnet_net_lock_current();
3081 pbuf = the_lnet.ln_ping_target;
3082 lnet_ping_buffer_addref(pbuf);
3083 lnet_net_unlock(cpt);
3085 /* Push source MD */
3086 md.start = &pbuf->pb_info;
3087 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3088 md.threshold = 2; /* Put/Ack */
3091 md.handler = the_lnet.ln_dc_handler;
3094 rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh);
3096 lnet_ping_buffer_decref(pbuf);
3097 CERROR("Can't bind push source MD: %d\n", rc);
3100 cpt = lnet_net_lock_current();
3101 /* Refcount for MD. */
3102 lnet_peer_addref_locked(lp);
3103 id.pid = LNET_PID_LUSTRE;
3104 id.nid = lnet_peer_select_nid(lp);
3105 lnet_net_unlock(cpt);
3107 if (id.nid == LNET_NID_ANY) {
3112 rc = LNetPut(LNET_NID_ANY, lp->lp_push_mdh,
3113 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3114 LNET_PROTO_PING_MATCHBITS, 0, 0);
3119 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3121 spin_lock(&lp->lp_lock);
3125 LNetMDUnlink(lp->lp_push_mdh);
3126 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3128 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3130 * The errors that get us here are considered hard errors and
3131 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3132 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3133 * because the unlink event handler will have set it if we
3134 * called LNetMDUnlink() above.
3136 spin_lock(&lp->lp_lock);
3137 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3142 * An unrecoverable error was encountered during discovery.
3143 * Set error status in peer and abort discovery.
3145 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3147 CDEBUG(D_NET, "Discovery error %s: %d\n",
3148 libcfs_nid2str(lp->lp_primary_nid), error);
3150 spin_lock(&lp->lp_lock);
3151 lp->lp_dc_error = error;
3152 lp->lp_state &= ~LNET_PEER_DISCOVERING;
3153 lp->lp_state |= LNET_PEER_REDISCOVER;
3154 spin_unlock(&lp->lp_lock);
3158 * Mark the peer as discovered.
3160 static int lnet_peer_discovered(struct lnet_peer *lp)
3161 __must_hold(&lp->lp_lock)
3163 lp->lp_state |= LNET_PEER_DISCOVERED;
3164 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3165 LNET_PEER_REDISCOVER);
3167 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3174 * Discovering this peer is taking too long. Cancel any Ping or Push
3175 * that discovery is waiting on by unlinking the relevant MDs. The
3176 * lnet_discovery_event_handler() will proceed from here and complete
3179 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3181 struct lnet_handle_md ping_mdh;
3182 struct lnet_handle_md push_mdh;
3184 LNetInvalidateMDHandle(&ping_mdh);
3185 LNetInvalidateMDHandle(&push_mdh);
3187 spin_lock(&lp->lp_lock);
3188 if (lp->lp_state & LNET_PEER_PING_SENT) {
3189 ping_mdh = lp->lp_ping_mdh;
3190 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3192 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3193 push_mdh = lp->lp_push_mdh;
3194 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3196 spin_unlock(&lp->lp_lock);
3198 if (!LNetMDHandleIsInvalid(ping_mdh))
3199 LNetMDUnlink(ping_mdh);
3200 if (!LNetMDHandleIsInvalid(push_mdh))
3201 LNetMDUnlink(push_mdh);
3205 * Wait for work to be queued or some other change that must be
3206 * attended to. Returns non-zero if the discovery thread should shut
3209 static int lnet_peer_discovery_wait_for_work(void)
3216 cpt = lnet_net_lock_current();
3218 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3219 TASK_INTERRUPTIBLE);
3220 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3222 if (lnet_push_target_resize_needed() ||
3223 the_lnet.ln_push_target->pb_needs_post)
3225 if (!list_empty(&the_lnet.ln_dc_request))
3227 if (!list_empty(&the_lnet.ln_msg_resend))
3229 lnet_net_unlock(cpt);
3232 * wakeup max every second to check if there are peers that
3233 * have been stuck on the working queue for greater than
3236 schedule_timeout(cfs_time_seconds(1));
3237 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3238 cpt = lnet_net_lock_current();
3240 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3242 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3245 lnet_net_unlock(cpt);
3247 CDEBUG(D_NET, "woken: %d\n", rc);
3253 * Messages that were pending on a destroyed peer will be put on a global
3254 * resend list. The message resend list will be checked by
3255 * the discovery thread when it wakes up, and will resend messages. These
3256 * messages can still be sendable in the case the lpni which was the initial
3257 * cause of the message re-queue was transfered to another peer.
3259 * It is possible that LNet could be shutdown while we're iterating
3260 * through the list. lnet_shudown_lndnets() will attempt to access the
3261 * resend list, but will have to wait until the spinlock is released, by
3262 * which time there shouldn't be any more messages on the resend list.
3263 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3264 * for the messages so they can be released. The other case is that
3265 * lnet_shudown_lndnets() can finalize all the messages before this
3266 * function can visit the resend list, in which case this function will be
3269 static void lnet_resend_msgs(void)
3271 struct lnet_msg *msg, *tmp;
3275 spin_lock(&the_lnet.ln_msg_resend_lock);
3276 list_splice(&the_lnet.ln_msg_resend, &resend);
3277 spin_unlock(&the_lnet.ln_msg_resend_lock);
3279 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3280 list_del_init(&msg->msg_list);
3281 rc = lnet_send(msg->msg_src_nid_param, msg,
3282 msg->msg_rtr_nid_param);
3284 CNETERR("Error sending %s to %s: %d\n",
3285 lnet_msgtyp2str(msg->msg_type),
3286 libcfs_id2str(msg->msg_target), rc);
3287 lnet_finalize(msg, rc);
3292 /* The discovery thread. */
3293 static int lnet_peer_discovery(void *arg)
3295 struct lnet_peer *lp;
3298 wait_for_completion(&the_lnet.ln_started);
3300 CDEBUG(D_NET, "started\n");
3303 if (lnet_peer_discovery_wait_for_work())
3306 if (lnet_push_target_resize_needed())
3307 lnet_push_target_resize();
3308 else if (the_lnet.ln_push_target->pb_needs_post)
3309 lnet_push_target_post(the_lnet.ln_push_target,
3310 &the_lnet.ln_push_target_md);
3314 lnet_net_lock(LNET_LOCK_EX);
3315 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3316 lnet_net_unlock(LNET_LOCK_EX);
3321 * Process all incoming discovery work requests. When
3322 * discovery must wait on a peer to change state, it
3323 * is added to the tail of the ln_dc_working queue. A
3324 * timestamp keeps track of when the peer was added,
3325 * so we can time out discovery requests that take too
3328 while (!list_empty(&the_lnet.ln_dc_request)) {
3329 lp = list_first_entry(&the_lnet.ln_dc_request,
3330 struct lnet_peer, lp_dc_list);
3331 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3333 * set the time the peer was put on the dc_working
3334 * queue. It shouldn't remain on the queue
3335 * forever, in case the GET message (for ping)
3336 * doesn't get a REPLY or the PUT message (for
3337 * push) doesn't get an ACK.
3339 lp->lp_last_queued = ktime_get_real_seconds();
3340 lnet_net_unlock(LNET_LOCK_EX);
3342 if (lnet_push_target_resize_needed())
3343 lnet_push_target_resize();
3344 else if (the_lnet.ln_push_target->pb_needs_post)
3345 lnet_push_target_post(the_lnet.ln_push_target,
3346 &the_lnet.ln_push_target_md);
3349 * Select an action depending on the state of
3350 * the peer and whether discovery is disabled.
3351 * The check whether discovery is disabled is
3352 * done after the code that handles processing
3353 * for arrived data, cleanup for failures, and
3354 * forcing a Ping or Push.
3356 spin_lock(&lp->lp_lock);
3357 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3358 libcfs_nid2str(lp->lp_primary_nid), lp,
3360 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3361 rc = lnet_peer_data_present(lp);
3362 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3363 rc = lnet_peer_ping_failed(lp);
3364 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3365 rc = lnet_peer_push_failed(lp);
3366 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3367 rc = lnet_peer_send_ping(lp);
3368 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3369 rc = lnet_peer_send_push(lp);
3370 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3371 rc = lnet_peer_send_ping(lp);
3372 else if (lnet_peer_needs_push(lp))
3373 rc = lnet_peer_send_push(lp);
3375 rc = lnet_peer_discovered(lp);
3376 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3377 libcfs_nid2str(lp->lp_primary_nid), lp,
3379 spin_unlock(&lp->lp_lock);
3381 lnet_net_lock(LNET_LOCK_EX);
3382 if (rc == LNET_REDISCOVER_PEER) {
3383 list_move(&lp->lp_dc_list,
3384 &the_lnet.ln_dc_request);
3386 lnet_peer_discovery_error(lp, rc);
3388 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3389 lnet_peer_discovery_complete(lp);
3390 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3393 if (lp->lp_state & LNET_PEER_MARK_DELETION) {
3394 struct list_head rlist;
3395 struct lnet_route *route, *tmp;
3396 int sensitivity = lp->lp_health_sensitivity;
3398 INIT_LIST_HEAD(&rlist);
3401 * remove the peer from the discovery work
3402 * queue if it's on there in preparation
3405 if (!list_empty(&lp->lp_dc_list))
3406 list_del(&lp->lp_dc_list);
3408 lnet_net_unlock(LNET_LOCK_EX);
3410 mutex_lock(&the_lnet.ln_api_mutex);
3412 lnet_net_lock(LNET_LOCK_EX);
3413 list_for_each_entry_safe(route, tmp,
3416 lnet_move_route(route, NULL, &rlist);
3417 lnet_net_unlock(LNET_LOCK_EX);
3419 /* delete the peer */
3422 list_for_each_entry_safe(route, tmp,
3424 /* re-add these routes */
3425 lnet_add_route(route->lr_net,
3430 LIBCFS_FREE(route, sizeof(*route));
3432 mutex_unlock(&the_lnet.ln_api_mutex);
3434 lnet_net_lock(LNET_LOCK_EX);
3438 lnet_net_unlock(LNET_LOCK_EX);
3441 CDEBUG(D_NET, "stopping\n");
3443 * Clean up before telling lnet_peer_discovery_stop() that
3444 * we're done. Use wake_up() below to somewhat reduce the
3445 * size of the thundering herd if there are multiple threads
3446 * waiting on discovery of a single peer.
3449 /* Queue cleanup 1: stop all pending pings and pushes. */
3450 lnet_net_lock(LNET_LOCK_EX);
3451 while (!list_empty(&the_lnet.ln_dc_working)) {
3452 lp = list_first_entry(&the_lnet.ln_dc_working,
3453 struct lnet_peer, lp_dc_list);
3454 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3455 lnet_net_unlock(LNET_LOCK_EX);
3456 lnet_peer_cancel_discovery(lp);
3457 lnet_net_lock(LNET_LOCK_EX);
3459 lnet_net_unlock(LNET_LOCK_EX);
3461 /* Queue cleanup 2: wait for the expired queue to clear. */
3462 while (!list_empty(&the_lnet.ln_dc_expired))
3463 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3465 /* Queue cleanup 3: clear the request queue. */
3466 lnet_net_lock(LNET_LOCK_EX);
3467 while (!list_empty(&the_lnet.ln_dc_request)) {
3468 lp = list_first_entry(&the_lnet.ln_dc_request,
3469 struct lnet_peer, lp_dc_list);
3470 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3471 lnet_peer_discovery_complete(lp);
3473 lnet_net_unlock(LNET_LOCK_EX);
3475 lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3476 the_lnet.ln_dc_handler = NULL;
3478 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3479 wake_up(&the_lnet.ln_dc_waitq);
3481 CDEBUG(D_NET, "stopped\n");
3486 /* ln_api_mutex is held on entry. */
3487 int lnet_peer_discovery_start(void)
3489 struct task_struct *task;
3492 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3495 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3496 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3497 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3500 CERROR("Can't start peer discovery thread: %d\n", rc);
3502 the_lnet.ln_dc_handler = NULL;
3504 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3507 CDEBUG(D_NET, "discovery start: %d\n", rc);
3512 /* ln_api_mutex is held on entry. */
3513 void lnet_peer_discovery_stop(void)
3515 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3518 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3519 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3521 /* In the LNetNIInit() path we may be stopping discovery before it
3522 * entered its work loop
3524 if (!completion_done(&the_lnet.ln_started))
3525 complete(&the_lnet.ln_started);
3527 wake_up(&the_lnet.ln_dc_waitq);
3529 wait_event(the_lnet.ln_dc_waitq,
3530 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3532 LASSERT(list_empty(&the_lnet.ln_dc_request));
3533 LASSERT(list_empty(&the_lnet.ln_dc_working));
3534 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3536 CDEBUG(D_NET, "discovery stopped\n");
3542 lnet_debug_peer(lnet_nid_t nid)
3544 char *aliveness = "NA";
3545 struct lnet_peer_ni *lp;
3548 cpt = lnet_cpt_of_nid(nid, NULL);
3551 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3553 lnet_net_unlock(cpt);
3554 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3558 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3559 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3561 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3562 libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3563 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3564 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3565 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3567 lnet_peer_ni_decref_locked(lp);
3569 lnet_net_unlock(cpt);
3572 /* Gathering information for userspace. */
3574 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3575 char aliveness[LNET_MAX_STR_LEN],
3576 __u32 *cpt_iter, __u32 *refcount,
3577 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3578 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3579 __u32 *peer_tx_qnob)
3581 struct lnet_peer_table *peer_table;
3582 struct lnet_peer_ni *lp;
3587 /* get the number of CPTs */
3588 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3590 /* if the cpt number to be examined is >= the number of cpts in
3591 * the system then indicate that there are no more cpts to examin
3593 if (*cpt_iter >= lncpt)
3596 /* get the current table */
3597 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3598 /* if the ptable is NULL then there are no more cpts to examine */
3599 if (peer_table == NULL)
3602 lnet_net_lock(*cpt_iter);
3604 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3605 struct list_head *peers = &peer_table->pt_hash[j];
3607 list_for_each_entry(lp, peers, lpni_hashlist) {
3608 if (peer_index-- > 0)
3611 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3612 if (lnet_isrouter(lp) ||
3613 lnet_peer_aliveness_enabled(lp))
3614 snprintf(aliveness, LNET_MAX_STR_LEN,
3615 lnet_is_peer_ni_alive(lp) ? "up" : "down");
3617 *nid = lp->lpni_nid;
3618 *refcount = atomic_read(&lp->lpni_refcount);
3619 *ni_peer_tx_credits =
3620 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3621 *peer_tx_credits = lp->lpni_txcredits;
3622 *peer_rtr_credits = lp->lpni_rtrcredits;
3623 *peer_min_rtr_credits = lp->lpni_mintxcredits;
3624 *peer_tx_qnob = lp->lpni_txqnob;
3630 lnet_net_unlock(*cpt_iter);
3634 return found ? 0 : -ENOENT;
3637 /* ln_api_mutex is held, which keeps the peer list stable */
3638 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3640 struct lnet_ioctl_element_stats *lpni_stats;
3641 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3642 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3643 struct lnet_peer_ni_credit_info *lpni_info;
3644 struct lnet_peer_ni *lpni;
3645 struct lnet_peer *lp;
3650 lp = lnet_find_peer(cfg->prcfg_prim_nid);
3657 size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3658 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3659 size *= lp->lp_nnis;
3660 if (size > cfg->prcfg_size) {
3661 cfg->prcfg_size = size;
3666 cfg->prcfg_prim_nid = lp->lp_primary_nid;
3667 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3668 cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3669 cfg->prcfg_count = lp->lp_nnis;
3670 cfg->prcfg_size = size;
3671 cfg->prcfg_state = lp->lp_state;
3673 /* Allocate helper buffers. */
3675 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3678 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3681 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3682 if (!lpni_msg_stats)
3683 goto out_free_stats;
3684 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3686 goto out_free_msg_stats;
3691 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3692 nid = lpni->lpni_nid;
3693 if (copy_to_user(bulk, &nid, sizeof(nid)))
3694 goto out_free_hstats;
3695 bulk += sizeof(nid);
3697 memset(lpni_info, 0, sizeof(*lpni_info));
3698 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3699 if (lnet_isrouter(lpni) ||
3700 lnet_peer_aliveness_enabled(lpni))
3701 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3702 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3704 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3705 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3706 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3707 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3708 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3709 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3710 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3711 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3712 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3713 goto out_free_hstats;
3714 bulk += sizeof(*lpni_info);
3716 memset(lpni_stats, 0, sizeof(*lpni_stats));
3717 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3718 LNET_STATS_TYPE_SEND);
3719 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3720 LNET_STATS_TYPE_RECV);
3721 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3722 LNET_STATS_TYPE_DROP);
3723 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3724 goto out_free_hstats;
3725 bulk += sizeof(*lpni_stats);
3726 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3727 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3728 goto out_free_hstats;
3729 bulk += sizeof(*lpni_msg_stats);
3730 lpni_hstats->hlpni_network_timeout =
3731 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3732 lpni_hstats->hlpni_remote_dropped =
3733 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3734 lpni_hstats->hlpni_remote_timeout =
3735 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3736 lpni_hstats->hlpni_remote_error =
3737 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3738 lpni_hstats->hlpni_health_value =
3739 atomic_read(&lpni->lpni_healthv);
3740 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3741 goto out_free_hstats;
3742 bulk += sizeof(*lpni_hstats);
3747 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3749 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3751 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3753 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3755 lnet_peer_decref_locked(lp);
3761 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3763 /* the mt could've shutdown and cleaned up the queues */
3764 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3767 if (list_empty(&lpni->lpni_recovery) &&
3768 atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3769 CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
3770 libcfs_nid2str(lpni->lpni_nid),
3771 atomic_read(&lpni->lpni_healthv));
3772 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3773 lnet_peer_ni_addref_locked(lpni);
3777 /* Call with the ln_api_mutex held */
3779 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3781 struct lnet_peer_table *ptable;
3782 struct lnet_peer *lp;
3783 struct lnet_peer_net *lpn;
3784 struct lnet_peer_ni *lpni;
3788 if (the_lnet.ln_state != LNET_STATE_RUNNING)
3792 lnet_net_lock(LNET_LOCK_EX);
3793 lpni = lnet_find_peer_ni_locked(nid);
3795 lnet_net_unlock(LNET_LOCK_EX);
3798 atomic_set(&lpni->lpni_healthv, value);
3799 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3800 lnet_peer_ni_decref_locked(lpni);
3801 lnet_net_unlock(LNET_LOCK_EX);
3805 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3808 * Walk all the peers and reset the healhv for each one to the
3811 lnet_net_lock(LNET_LOCK_EX);
3812 for (cpt = 0; cpt < lncpt; cpt++) {
3813 ptable = the_lnet.ln_peer_tables[cpt];
3814 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
3815 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
3816 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
3818 atomic_set(&lpni->lpni_healthv, value);
3819 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3824 lnet_net_unlock(LNET_LOCK_EX);