4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
34 #define DEBUG_SUBSYSTEM S_LNET
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
40 #include <linux/uaccess.h>
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(struct lnet_nid *nid)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
158 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
164 INIT_LIST_HEAD(&lpni->lpni_txq);
165 INIT_LIST_HEAD(&lpni->lpni_hashlist);
166 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167 INIT_LIST_HEAD(&lpni->lpni_recovery);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171 kref_init(&lpni->lpni_kref);
172 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
174 spin_lock_init(&lpni->lpni_lock);
176 if (lnet_peers_start_down())
177 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
179 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181 lpni->lpni_nid = *nid;
182 lpni->lpni_cpt = cpt;
183 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
185 net = lnet_get_net_locked(LNET_NID_NET(nid));
186 lpni->lpni_net = net;
188 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
194 * This peer_ni is not on a local network, so we
195 * cannot add the credits here. In case the net is
196 * added later, add the peer_ni to the remote peer ni
197 * list so it can be easily found and revisited.
199 /* FIXME: per-net implementation instead? */
200 lnet_peer_ni_addref_locked(lpni);
201 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202 &the_lnet.ln_remote_peer_ni_list);
205 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
213 struct lnet_peer_net *lpn;
215 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
219 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221 lpn->lpn_net_id = net_id;
222 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
224 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
232 struct lnet_peer *lp;
234 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
236 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237 LASSERT(list_empty(&lpn->lpn_peer_nis));
238 LASSERT(list_empty(&lpn->lpn_peer_nets));
240 lpn->lpn_peer = NULL;
241 LIBCFS_FREE(lpn, sizeof(*lpn));
243 lnet_peer_decref_locked(lp);
246 static struct lnet_peer *
247 lnet_peer_alloc(struct lnet_nid *nid)
249 struct lnet_peer *lp;
251 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
255 INIT_LIST_HEAD(&lp->lp_rtrq);
256 INIT_LIST_HEAD(&lp->lp_routes);
257 INIT_LIST_HEAD(&lp->lp_peer_list);
258 INIT_LIST_HEAD(&lp->lp_peer_nets);
259 INIT_LIST_HEAD(&lp->lp_dc_list);
260 INIT_LIST_HEAD(&lp->lp_dc_pendq);
261 INIT_LIST_HEAD(&lp->lp_rtr_list);
262 init_waitqueue_head(&lp->lp_dc_waitq);
263 spin_lock_init(&lp->lp_lock);
264 lp->lp_primary_nid = *nid;
265 lp->lp_disc_src_nid = LNET_ANY_NID;
266 lp->lp_disc_dst_nid = LNET_ANY_NID;
267 if (lnet_peers_start_down())
268 lp->lp_alive = false;
273 * all peers created on a router should have health on
274 * if it's not already on.
276 if (the_lnet.ln_routing && !lnet_health_sensitivity)
277 lp->lp_health_sensitivity = 1;
280 * Turn off discovery for loopback peer. If you're creating a peer
281 * for the loopback interface then that was initiated when we
282 * attempted to send a message over the loopback. There is no need
283 * to ever use a different interface when sending messages to
287 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
290 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
298 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
300 LASSERT(atomic_read(&lp->lp_refcount) == 0);
301 LASSERT(lp->lp_rtr_refcount == 0);
302 LASSERT(list_empty(&lp->lp_peer_nets));
303 LASSERT(list_empty(&lp->lp_peer_list));
304 LASSERT(list_empty(&lp->lp_dc_list));
307 lnet_ping_buffer_decref(lp->lp_data);
310 * if there are messages still on the pending queue, then make
311 * sure to queue them on the ln_msg_resend list so they can be
312 * resent at a later point if the discovery thread is still
314 * If the discovery thread has stopped, then the wakeup will be a
315 * no-op, and it is expected the lnet_shutdown_lndnets() will
316 * eventually be called, which will traverse this list and
317 * finalize the messages on the list.
318 * We can not resend them now because we're holding the cpt lock.
319 * Releasing the lock can cause an inconsistent state
321 spin_lock(&the_lnet.ln_msg_resend_lock);
322 spin_lock(&lp->lp_lock);
323 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324 spin_unlock(&lp->lp_lock);
325 spin_unlock(&the_lnet.ln_msg_resend_lock);
326 wake_up(&the_lnet.ln_dc_waitq);
328 LIBCFS_FREE(lp, sizeof(*lp));
332 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333 * that peer_net, detach the peer_net from the peer.
335 * Call with lnet_net_lock/EX held
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
340 struct lnet_peer_table *ptable;
341 struct lnet_peer_net *lpn;
342 struct lnet_peer *lp;
345 * Belts and suspenders: gracefully handle teardown of a
346 * partially connected peer_ni.
348 lpn = lpni->lpni_peer_net;
350 list_del_init(&lpni->lpni_peer_nis);
352 * If there are no lpni's left, we detach lpn from
353 * lp_peer_nets, so it cannot be found anymore.
355 if (list_empty(&lpn->lpn_peer_nis))
356 list_del_init(&lpn->lpn_peer_nets);
358 /* Update peer NID count. */
363 * If there are no more peer nets, make the peer unfindable
364 * via the peer_tables.
366 * Otherwise, if the peer is DISCOVERED, tell discovery to
367 * take another look at it. This is a no-op if discovery for
368 * this peer did the detaching.
370 if (list_empty(&lp->lp_peer_nets)) {
371 list_del_init(&lp->lp_peer_list);
372 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
374 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375 /* Discovery isn't running, nothing to do here. */
376 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377 lnet_peer_queue_for_discovery(lp);
378 wake_up(&the_lnet.ln_dc_waitq);
380 CDEBUG(D_NET, "peer %s NID %s\n",
381 libcfs_nidstr(&lp->lp_primary_nid),
382 libcfs_nidstr(&lpni->lpni_nid));
385 /* called with lnet_net_lock LNET_LOCK_EX held */
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
389 struct lnet_peer_table *ptable = NULL;
391 /* don't remove a peer_ni if it's also a gateway */
392 if (lnet_isrouter(lpni) && !force) {
393 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394 libcfs_nidstr(&lpni->lpni_nid));
398 lnet_peer_remove_from_remote_list(lpni);
400 /* remove peer ni from the hash list. */
401 list_del_init(&lpni->lpni_hashlist);
404 * indicate the peer is being deleted so the monitor thread can
405 * remove it from the recovery queue.
407 spin_lock(&lpni->lpni_lock);
408 lpni->lpni_state |= LNET_PEER_NI_DELETING;
409 spin_unlock(&lpni->lpni_lock);
411 /* decrement the ref count on the peer table */
412 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
415 * The peer_ni can no longer be found with a lookup. But there
416 * can be current users, so keep track of it on the zombie
417 * list until the reference count has gone to zero.
419 * The last reference may be lost in a place where the
420 * lnet_net_lock locks only a single cpt, and that cpt may not
421 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
424 spin_lock(&ptable->pt_zombie_lock);
425 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426 ptable->pt_zombies++;
427 spin_unlock(&ptable->pt_zombie_lock);
429 /* no need to keep this peer_ni on the hierarchy anymore */
430 lnet_peer_detach_peer_ni_locked(lpni);
432 /* remove hashlist reference on peer_ni */
433 lnet_peer_ni_decref_locked(lpni);
438 void lnet_peer_uninit(void)
440 struct lnet_peer_ni *lpni, *tmp;
442 lnet_net_lock(LNET_LOCK_EX);
444 /* remove all peer_nis from the remote peer and the hash list */
445 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446 lpni_on_remote_peer_ni_list)
447 lnet_peer_ni_del_locked(lpni, false);
449 lnet_peer_tables_destroy();
451 lnet_net_unlock(LNET_LOCK_EX);
455 lnet_peer_del_locked(struct lnet_peer *peer)
457 struct lnet_peer_ni *lpni = NULL, *lpni2;
460 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
462 spin_lock(&peer->lp_lock);
463 peer->lp_state |= LNET_PEER_MARK_DELETED;
464 spin_unlock(&peer->lp_lock);
466 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467 while (lpni != NULL) {
468 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469 rc = lnet_peer_ni_del_locked(lpni, false);
479 * Discovering this peer is taking too long. Cancel any Ping or Push
480 * that discovery is waiting on by unlinking the relevant MDs. The
481 * lnet_discovery_event_handler() will proceed from here and complete
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
486 struct lnet_handle_md ping_mdh;
487 struct lnet_handle_md push_mdh;
489 LNetInvalidateMDHandle(&ping_mdh);
490 LNetInvalidateMDHandle(&push_mdh);
492 spin_lock(&lp->lp_lock);
493 if (lp->lp_state & LNET_PEER_PING_SENT) {
494 ping_mdh = lp->lp_ping_mdh;
495 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
497 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498 push_mdh = lp->lp_push_mdh;
499 LNetInvalidateMDHandle(&lp->lp_push_mdh);
501 spin_unlock(&lp->lp_lock);
503 if (!LNetMDHandleIsInvalid(ping_mdh))
504 LNetMDUnlink(ping_mdh);
505 if (!LNetMDHandleIsInvalid(push_mdh))
506 LNetMDUnlink(push_mdh);
510 lnet_peer_del(struct lnet_peer *peer)
514 lnet_peer_cancel_discovery(peer);
515 lnet_net_lock(LNET_LOCK_EX);
516 rc = lnet_peer_del_locked(peer);
517 lnet_net_unlock(LNET_LOCK_EX);
523 * Delete a NID from a peer. Call with ln_api_mutex held.
526 * -EPERM: Non-DLC deletion from DLC-configured peer.
527 * -ENOENT: No lnet_peer_ni corresponding to the nid.
528 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
529 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
532 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
535 struct lnet_peer_ni *lpni;
536 struct lnet_nid primary_nid = lp->lp_primary_nid;
538 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
540 if (!(flags & LNET_PEER_CONFIGURED)) {
541 if (lp->lp_state & LNET_PEER_CONFIGURED) {
547 lpni = lnet_peer_ni_find_locked(nid);
552 lnet_peer_ni_decref_locked(lpni);
553 if (lp != lpni->lpni_peer_net->lpn_peer) {
559 * This function only allows deletion of the primary NID if it
562 if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
567 lnet_net_lock(LNET_LOCK_EX);
569 if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
570 struct lnet_peer_ni *lpni2;
571 /* assign the next peer_ni to be the primary */
572 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
574 lp->lp_primary_nid = lpni2->lpni_nid;
576 rc = lnet_peer_ni_del_locked(lpni, force);
578 lnet_net_unlock(LNET_LOCK_EX);
581 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
582 libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
589 lnet_peer_table_cleanup_locked(struct lnet_net *net,
590 struct lnet_peer_table *ptable)
593 struct lnet_peer_ni *next;
594 struct lnet_peer_ni *lpni;
595 struct lnet_peer *peer;
597 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
598 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
600 if (net != NULL && net != lpni->lpni_net)
603 peer = lpni->lpni_peer_net->lpn_peer;
604 if (!nid_same(&peer->lp_primary_nid,
606 lnet_peer_ni_del_locked(lpni, false);
610 * Removing the primary NID implies removing
611 * the entire peer. Advance next beyond any
612 * peer_ni that belongs to the same peer.
614 list_for_each_entry_from(next, &ptable->pt_hash[i],
616 if (next->lpni_peer_net->lpn_peer != peer)
619 lnet_peer_del_locked(peer);
625 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
627 wait_var_event_warning(&ptable->pt_zombies,
628 ptable->pt_zombies == 0,
629 "Waiting for %d zombies on peer table\n",
634 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
635 struct lnet_peer_table *ptable)
637 struct lnet_peer_ni *lp;
638 struct lnet_peer_ni *tmp;
639 struct lnet_nid gw_nid;
642 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
643 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
645 if (net != lp->lpni_net)
648 if (!lnet_isrouter(lp))
651 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
653 lnet_net_unlock(LNET_LOCK_EX);
654 lnet_del_route(LNET_NET_ANY, &gw_nid);
655 lnet_net_lock(LNET_LOCK_EX);
661 lnet_peer_tables_cleanup(struct lnet_net *net)
664 struct lnet_peer_table *ptable;
666 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
667 /* If just deleting the peers for a NI, get rid of any routes these
668 * peers are gateways for. */
669 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
670 lnet_net_lock(LNET_LOCK_EX);
671 lnet_peer_table_del_rtrs_locked(net, ptable);
672 lnet_net_unlock(LNET_LOCK_EX);
675 /* Start the cleanup process */
676 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
677 lnet_net_lock(LNET_LOCK_EX);
678 lnet_peer_table_cleanup_locked(net, ptable);
679 lnet_net_unlock(LNET_LOCK_EX);
682 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
683 lnet_peer_ni_finalize_wait(ptable);
686 static struct lnet_peer_ni *
687 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
689 struct list_head *peers;
690 struct lnet_peer_ni *lp;
692 if (the_lnet.ln_state != LNET_STATE_RUNNING)
695 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
696 list_for_each_entry(lp, peers, lpni_hashlist) {
697 if (nid_same(&lp->lpni_nid, nid)) {
698 lnet_peer_ni_addref_locked(lp);
706 struct lnet_peer_ni *
707 lnet_peer_ni_find_locked(struct lnet_nid *nid)
709 struct lnet_peer_ni *lpni;
710 struct lnet_peer_table *ptable;
713 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
715 ptable = the_lnet.ln_peer_tables[cpt];
716 lpni = lnet_get_peer_ni_locked(ptable, nid);
721 struct lnet_peer_ni *
722 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
724 struct lnet_peer_net *lpn;
725 struct lnet_peer_ni *lpni;
727 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
731 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
732 if (nid_same(&lpni->lpni_nid, nid))
740 lnet_find_peer(struct lnet_nid *nid)
742 struct lnet_peer_ni *lpni;
743 struct lnet_peer *lp = NULL;
746 cpt = lnet_net_lock_current();
747 lpni = lnet_peer_ni_find_locked(nid);
749 lp = lpni->lpni_peer_net->lpn_peer;
750 lnet_peer_addref_locked(lp);
751 lnet_peer_ni_decref_locked(lpni);
753 lnet_net_unlock(cpt);
758 struct lnet_peer_net *
759 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
761 struct lnet_peer_net *net;
764 /* no net id provided return the first net */
765 net = list_first_entry_or_null(&lp->lp_peer_nets,
766 struct lnet_peer_net,
772 /* find the net after the one provided */
773 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
774 if (net->lpn_net_id == prev_lpn_id) {
776 * if we reached the end of the list loop to the
779 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
780 return list_first_entry_or_null(&lp->lp_peer_nets,
781 struct lnet_peer_net,
784 return list_next_entry(net, lpn_peer_nets);
791 struct lnet_peer_ni *
792 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
793 struct lnet_peer_net *peer_net,
794 struct lnet_peer_ni *prev)
796 struct lnet_peer_ni *lpni;
797 struct lnet_peer_net *net = peer_net;
801 if (list_empty(&peer->lp_peer_nets))
804 net = list_first_entry(&peer->lp_peer_nets,
805 struct lnet_peer_net,
808 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
814 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
816 * if you reached the end of the peer ni list and the peer
817 * net is specified then there are no more peer nis in that
824 * we reached the end of this net ni list. move to the
827 if (prev->lpni_peer_net->lpn_peer_nets.next ==
829 /* no more nets and no more NIs. */
832 /* get the next net */
833 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
834 struct lnet_peer_net,
836 /* get the ni on it */
837 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
843 /* there are more nis left */
844 lpni = list_first_entry(&prev->lpni_peer_nis,
845 struct lnet_peer_ni, lpni_peer_nis);
850 /* Call with the ln_api_mutex held */
851 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
853 struct lnet_process_id id;
854 struct lnet_peer_table *ptable;
855 struct lnet_peer *lp;
864 if (the_lnet.ln_state != LNET_STATE_RUNNING)
867 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
870 * Count the number of peers, and return E2BIG if the buffer
871 * is too small. We'll also return the desired size.
874 for (cpt = 0; cpt < lncpt; cpt++) {
875 ptable = the_lnet.ln_peer_tables[cpt];
876 count += ptable->pt_peers;
878 size = count * sizeof(*ids);
883 * Walk the peer lists and copy out the primary nids.
884 * This is safe because the peer lists are only modified
885 * while the ln_api_mutex is held. So we don't need to
886 * hold the lnet_net_lock as well, and can therefore
887 * directly call copy_to_user().
890 memset(&id, 0, sizeof(id));
891 id.pid = LNET_PID_LUSTRE;
893 for (cpt = 0; cpt < lncpt; cpt++) {
894 ptable = the_lnet.ln_peer_tables[cpt];
895 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
896 if (!nid_is_nid4(&lp->lp_primary_nid))
900 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
901 if (copy_to_user(&ids[i], &id, sizeof(id)))
914 * Start pushes to peers that need to be updated for a configuration
915 * change on this node.
918 lnet_push_update_to_peers(int force)
920 struct lnet_peer_table *ptable;
921 struct lnet_peer *lp;
925 lnet_net_lock(LNET_LOCK_EX);
926 if (lnet_peer_discovery_disabled)
928 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
929 for (cpt = 0; cpt < lncpt; cpt++) {
930 ptable = the_lnet.ln_peer_tables[cpt];
931 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
933 spin_lock(&lp->lp_lock);
934 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
935 lp->lp_state |= LNET_PEER_FORCE_PUSH;
936 spin_unlock(&lp->lp_lock);
938 if (lnet_peer_needs_push(lp))
939 lnet_peer_queue_for_discovery(lp);
942 lnet_net_unlock(LNET_LOCK_EX);
943 wake_up(&the_lnet.ln_dc_waitq);
946 /* find the NID in the preferred gateways for the remote peer
948 * false: list is not empty and NID is not preferred
949 * false: list is empty
950 * true: nid is found in the list
953 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
954 struct lnet_nid *gw_nid)
956 struct lnet_nid_list *ne;
958 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
959 libcfs_nidstr(&lpni->lpni_nid),
960 list_empty(&lpni->lpni_rtr_pref_nids));
962 if (list_empty(&lpni->lpni_rtr_pref_nids))
965 /* iterate through all the preferred NIDs and see if any of them
966 * matches the provided gw_nid
968 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
969 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
970 libcfs_nidstr(&ne->nl_nid),
971 libcfs_nidstr(gw_nid));
972 if (nid_same(&ne->nl_nid, gw_nid))
980 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
982 struct list_head zombies;
983 struct lnet_nid_list *ne;
984 struct lnet_nid_list *tmp;
985 int cpt = lpni->lpni_cpt;
987 INIT_LIST_HEAD(&zombies);
990 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
991 lnet_net_unlock(cpt);
993 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
994 list_del(&ne->nl_list);
995 LIBCFS_FREE(ne, sizeof(*ne));
1000 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1001 struct lnet_nid *gw_nid)
1003 int cpt = lpni->lpni_cpt;
1004 struct lnet_nid_list *ne = NULL;
1006 /* This function is called with api_mutex held. When the api_mutex
1007 * is held the list can not be modified, as it is only modified as
1008 * a result of applying a UDSP and that happens under api_mutex
1011 __must_hold(&the_lnet.ln_api_mutex);
1013 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1014 if (nid_same(&ne->nl_nid, gw_nid))
1018 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1022 ne->nl_nid = *gw_nid;
1024 /* Lock the cpt to protect against addition and checks in the
1025 * selection algorithm
1028 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1029 lnet_net_unlock(cpt);
1035 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1036 * this is a preferred point-to-point path. Call with lnet_net_lock in
1040 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1042 struct lnet_nid_list *ne;
1044 if (lpni->lpni_pref_nnids == 0)
1046 if (lpni->lpni_pref_nnids == 1)
1047 return nid_same(&lpni->lpni_pref.nid, nid);
1048 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1049 if (nid_same(&ne->nl_nid, nid))
1056 * Set a single ni as preferred, provided no preferred ni is already
1057 * defined. Only to be used for non-multi-rail peer_ni.
1060 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1061 struct lnet_nid *nid)
1067 spin_lock(&lpni->lpni_lock);
1068 if (LNET_NID_IS_ANY(nid)) {
1070 } else if (lpni->lpni_pref_nnids > 0) {
1072 } else if (lpni->lpni_pref_nnids == 0) {
1073 lpni->lpni_pref.nid = *nid;
1074 lpni->lpni_pref_nnids = 1;
1075 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1077 spin_unlock(&lpni->lpni_lock);
1079 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1080 libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1085 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1086 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1089 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1093 spin_lock(&lpni->lpni_lock);
1094 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1095 lpni->lpni_pref_nnids = 0;
1096 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1097 } else if (lpni->lpni_pref_nnids == 0) {
1102 spin_unlock(&lpni->lpni_lock);
1104 CDEBUG(D_NET, "peer %s: %d\n",
1105 libcfs_nidstr(&lpni->lpni_nid), rc);
1110 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1112 lpni->lpni_sel_priority = priority;
1116 * Clear the preferred NIDs from a non-multi-rail peer.
1119 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1121 struct lnet_peer_ni *lpni = NULL;
1123 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1124 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1128 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1130 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1131 struct lnet_nid_list *ne1 = NULL;
1132 struct lnet_nid_list *ne2 = NULL;
1133 struct lnet_nid *tmp_nid = NULL;
1136 if (LNET_NID_IS_ANY(nid)) {
1141 if (lpni->lpni_pref_nnids == 1 &&
1142 nid_same(&lpni->lpni_pref.nid, nid)) {
1147 /* A non-MR node may have only one preferred NI per peer_ni */
1148 if (lpni->lpni_pref_nnids > 0 &&
1149 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1154 /* add the new preferred nid to the list of preferred nids */
1155 if (lpni->lpni_pref_nnids != 0) {
1156 size_t alloc_size = sizeof(*ne1);
1158 if (lpni->lpni_pref_nnids == 1) {
1159 tmp_nid = &lpni->lpni_pref.nid;
1160 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1163 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1164 if (nid_same(&ne1->nl_nid, nid)) {
1170 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1177 /* move the originally stored nid to the list */
1178 if (lpni->lpni_pref_nnids == 1) {
1179 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1180 lpni->lpni_cpt, alloc_size);
1185 INIT_LIST_HEAD(&ne2->nl_list);
1186 ne2->nl_nid = *tmp_nid;
1191 lnet_net_lock(LNET_LOCK_EX);
1192 spin_lock(&lpni->lpni_lock);
1193 if (lpni->lpni_pref_nnids == 0) {
1194 lpni->lpni_pref.nid = *nid;
1197 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1198 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1200 lpni->lpni_pref_nnids++;
1201 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1202 spin_unlock(&lpni->lpni_lock);
1203 lnet_net_unlock(LNET_LOCK_EX);
1206 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1207 spin_lock(&lpni->lpni_lock);
1208 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1209 spin_unlock(&lpni->lpni_lock);
1211 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1212 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1217 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1219 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1220 struct lnet_nid_list *ne = NULL;
1223 if (lpni->lpni_pref_nnids == 0) {
1228 if (lpni->lpni_pref_nnids == 1) {
1229 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1234 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1235 if (nid_same(&ne->nl_nid, nid))
1236 goto remove_nid_entry;
1244 lnet_net_lock(LNET_LOCK_EX);
1245 spin_lock(&lpni->lpni_lock);
1246 if (lpni->lpni_pref_nnids == 1)
1247 lpni->lpni_pref.nid = LNET_ANY_NID;
1249 list_del_init(&ne->nl_list);
1250 if (lpni->lpni_pref_nnids == 2) {
1251 struct lnet_nid_list *ne, *tmp;
1253 list_for_each_entry_safe(ne, tmp,
1254 &lpni->lpni_pref.nids,
1256 lpni->lpni_pref.nid = ne->nl_nid;
1257 list_del_init(&ne->nl_list);
1258 LIBCFS_FREE(ne, sizeof(*ne));
1262 lpni->lpni_pref_nnids--;
1263 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1264 spin_unlock(&lpni->lpni_lock);
1265 lnet_net_unlock(LNET_LOCK_EX);
1268 LIBCFS_FREE(ne, sizeof(*ne));
1270 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1271 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1276 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1278 struct list_head zombies;
1279 struct lnet_nid_list *ne;
1280 struct lnet_nid_list *tmp;
1282 INIT_LIST_HEAD(&zombies);
1284 lnet_net_lock(LNET_LOCK_EX);
1285 if (lpni->lpni_pref_nnids == 1)
1286 lpni->lpni_pref.nid = LNET_ANY_NID;
1287 else if (lpni->lpni_pref_nnids > 1)
1288 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1289 lpni->lpni_pref_nnids = 0;
1290 lnet_net_unlock(LNET_LOCK_EX);
1292 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1293 list_del_init(&ne->nl_list);
1294 LIBCFS_FREE(ne, sizeof(*ne));
1299 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1301 struct lnet_peer_ni *lpni;
1304 lpni = lnet_peer_ni_find_locked(nid);
1306 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1307 lnet_peer_ni_decref_locked(lpni);
1312 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1313 __must_hold(&lp->lp_lock)
1315 if (lnet_peer_discovery_disabled)
1318 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1319 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1330 lnet_is_discovery_disabled(struct lnet_peer *lp)
1334 spin_lock(&lp->lp_lock);
1335 rc = lnet_is_discovery_disabled_locked(lp);
1336 spin_unlock(&lp->lp_lock);
1342 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1344 struct lnet_nid pnid = LNET_ANY_NID;
1348 if (!nids || num_nids < 1)
1351 rc = LNetNIInit(LNET_PID_ANY);
1355 mutex_lock(&the_lnet.ln_api_mutex);
1357 mr = lnet_peer_discovery_disabled == 0;
1360 for (i = 0; i < num_nids; i++) {
1361 struct lnet_nid nid;
1363 if (nids[i] == LNET_NID_LO_0)
1366 lnet_nid4_to_nid(nids[i], &nid);
1367 if (LNET_NID_IS_ANY(&pnid)) {
1368 lnet_nid4_to_nid(nids[i], &pnid);
1369 rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, true);
1370 } else if (lnet_peer_discovery_disabled) {
1371 lnet_nid4_to_nid(nids[i], &nid);
1372 rc = lnet_add_peer_ni(&nid, &LNET_ANY_NID, mr, true);
1374 lnet_nid4_to_nid(nids[i], &nid);
1375 rc = lnet_add_peer_ni(&pnid, &nid, mr, true);
1378 if (rc && rc != -EEXIST)
1383 mutex_unlock(&the_lnet.ln_api_mutex);
1387 return rc == -EEXIST ? 0 : rc;
1389 EXPORT_SYMBOL(LNetAddPeer);
1391 void LNetPrimaryNID(struct lnet_nid *nid)
1393 struct lnet_peer *lp;
1394 struct lnet_peer_ni *lpni;
1395 struct lnet_nid orig;
1399 if (!nid || nid_is_lo0(nid))
1403 cpt = lnet_net_lock_current();
1404 lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1409 lp = lpni->lpni_peer_net->lpn_peer;
1411 /* If discovery is disabled locally then we needn't bother running
1412 * discovery here because discovery will not modify whatever
1413 * primary NID is currently set for this peer. If the specified peer is
1414 * down then this discovery can introduce long delays into the mount
1415 * process, so skip it if it isn't necessary.
1417 while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1418 spin_lock(&lp->lp_lock);
1419 /* force a full discovery cycle */
1420 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1421 spin_unlock(&lp->lp_lock);
1423 rc = lnet_discover_peer_locked(lpni, cpt, true);
1426 /* The lpni (or lp) for this NID may have changed and our ref is
1427 * the only thing keeping the old one around. Release the ref
1428 * and lookup the lpni again
1430 lnet_peer_ni_decref_locked(lpni);
1431 lpni = lnet_peer_ni_find_locked(nid);
1436 lp = lpni->lpni_peer_net->lpn_peer;
1438 /* If we find that the peer has discovery disabled then we will
1439 * not modify whatever primary NID is currently set for this
1440 * peer. Thus, we can break out of this loop even if the peer
1441 * is not fully up to date.
1443 if (lnet_is_discovery_disabled(lp))
1446 *nid = lp->lp_primary_nid;
1448 lnet_peer_ni_decref_locked(lpni);
1450 lnet_net_unlock(cpt);
1452 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1453 libcfs_nidstr(nid), rc);
1455 EXPORT_SYMBOL(LNetPrimaryNID);
1457 struct lnet_peer_net *
1458 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1460 struct lnet_peer_net *peer_net;
1461 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1462 if (peer_net->lpn_net_id == net_id)
1469 * Attach a peer_ni to a peer_net and peer. This function assumes
1470 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1471 * may be attached to a different peer, in which case it will be
1472 * properly detached first. The whole operation is done atomically.
1474 * This function consumes the reference on lpni and Always returns 0.
1475 * This is the last function called from functions that do return an
1476 * int, so returning 0 here allows the compiler to do a tail call.
1479 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1480 struct lnet_peer_net *lpn,
1481 struct lnet_peer_ni *lpni,
1484 struct lnet_peer_table *ptable;
1485 bool new_lpn = false;
1488 /* Install the new peer_ni */
1489 lnet_net_lock(LNET_LOCK_EX);
1490 /* Add peer_ni to global peer table hash, if necessary. */
1491 if (list_empty(&lpni->lpni_hashlist)) {
1492 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1494 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1495 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1496 ptable->pt_version++;
1497 lnet_peer_ni_addref_locked(lpni);
1500 /* Detach the peer_ni from an existing peer, if necessary. */
1501 if (lpni->lpni_peer_net) {
1502 LASSERT(lpni->lpni_peer_net != lpn);
1503 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1504 lnet_peer_detach_peer_ni_locked(lpni);
1505 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1506 lpni->lpni_peer_net = NULL;
1509 /* Add peer_ni to peer_net */
1510 lpni->lpni_peer_net = lpn;
1511 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1512 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1514 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1515 lnet_update_peer_net_healthv(lpni);
1516 lnet_peer_net_addref_locked(lpn);
1518 /* Add peer_net to peer */
1519 if (!lpn->lpn_peer) {
1522 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1523 list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1525 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1526 lnet_peer_addref_locked(lp);
1529 /* Add peer to global peer list, if necessary */
1530 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1531 if (list_empty(&lp->lp_peer_list)) {
1532 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1537 /* Update peer state */
1538 spin_lock(&lp->lp_lock);
1539 if (flags & LNET_PEER_CONFIGURED) {
1540 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1541 lp->lp_state |= LNET_PEER_CONFIGURED;
1543 if (flags & LNET_PEER_MULTI_RAIL) {
1544 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1545 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1546 lnet_peer_clr_non_mr_pref_nids(lp);
1549 spin_unlock(&lp->lp_lock);
1555 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1557 CERROR("Failed to apply UDSPs on lpn %s\n",
1558 libcfs_net2str(lpn->lpn_net_id));
1560 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1562 CERROR("Failed to apply UDSPs on lpni %s\n",
1563 libcfs_nidstr(&lpni->lpni_nid));
1565 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1566 libcfs_nidstr(&lp->lp_primary_nid),
1567 libcfs_nidstr(&lpni->lpni_nid), flags);
1568 lnet_peer_ni_decref_locked(lpni);
1569 lnet_net_unlock(LNET_LOCK_EX);
1575 * Create a new peer, with nid as its primary nid.
1577 * Call with the lnet_api_mutex held.
1580 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1582 struct lnet_peer *lp;
1583 struct lnet_peer_net *lpn;
1584 struct lnet_peer_ni *lpni;
1590 * No need for the lnet_net_lock here, because the
1591 * lnet_api_mutex is held.
1593 lpni = lnet_peer_ni_find_locked(nid);
1595 /* A peer with this NID already exists. */
1596 lp = lpni->lpni_peer_net->lpn_peer;
1597 lnet_peer_ni_decref_locked(lpni);
1599 * This is an error if the peer was configured and the
1600 * primary NID differs or an attempt is made to change
1601 * the Multi-Rail flag. Otherwise the assumption is
1602 * that an existing peer is being modified.
1604 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1605 if (!nid_same(&lp->lp_primary_nid, nid))
1607 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1610 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1611 if (nid_same(&lp->lp_primary_nid, nid)) {
1616 /* Delete and recreate as a configured peer. */
1617 rc = lnet_peer_del(lp);
1622 /* Create peer, peer_net, and peer_ni. */
1624 lp = lnet_peer_alloc(nid);
1627 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1630 lpni = lnet_peer_ni_alloc(nid);
1634 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1637 LIBCFS_FREE(lpn, sizeof(*lpn));
1639 LIBCFS_FREE(lp, sizeof(*lp));
1641 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1642 libcfs_nidstr(nid), flags, rc);
1647 * Add a NID to a peer. Call with ln_api_mutex held.
1650 * -EPERM: Non-DLC addition to a DLC-configured peer.
1651 * -EEXIST: The NID was configured by DLC for a different peer.
1652 * -ENOMEM: Out of memory.
1653 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1654 * non-multi-rail peer.
1657 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1660 struct lnet_peer_net *lpn;
1661 struct lnet_peer_ni *lpni;
1667 /* A configured peer can only be updated through configuration. */
1668 if (!(flags & LNET_PEER_CONFIGURED)) {
1669 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1676 * The MULTI_RAIL flag can be set but not cleared, because
1677 * that would leave the peer struct in an invalid state.
1679 if (flags & LNET_PEER_MULTI_RAIL) {
1680 spin_lock(&lp->lp_lock);
1681 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1682 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1683 lnet_peer_clr_non_mr_pref_nids(lp);
1685 spin_unlock(&lp->lp_lock);
1686 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1691 lpni = lnet_peer_ni_find_locked(nid);
1694 * A peer_ni already exists. This is only a problem if
1695 * it is not connected to this peer and was configured
1698 if (lpni->lpni_peer_net->lpn_peer == lp)
1700 if (lnet_peer_ni_is_configured(lpni)) {
1704 /* If this is the primary NID, destroy the peer. */
1705 if (lnet_peer_ni_is_primary(lpni)) {
1706 struct lnet_peer *rtr_lp =
1707 lpni->lpni_peer_net->lpn_peer;
1708 int rtr_refcount = rtr_lp->lp_rtr_refcount;
1710 * if we're trying to delete a router it means
1711 * we're moving this peer NI to a new peer so must
1712 * transfer router properties to the new peer
1714 if (rtr_refcount > 0) {
1715 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1716 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1718 lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1719 lnet_peer_ni_decref_locked(lpni);
1720 lpni = lnet_peer_ni_alloc(nid);
1727 lpni = lnet_peer_ni_alloc(nid);
1735 * Get the peer_net. Check that we're not adding a second
1736 * peer_ni on a peer_net of a non-multi-rail peer.
1738 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1740 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1745 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1750 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1753 lnet_peer_ni_decref_locked(lpni);
1755 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1756 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1762 * Update the primary NID of a peer, if possible.
1764 * Call with the lnet_api_mutex held.
1767 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1770 struct lnet_nid old = lp->lp_primary_nid;
1773 if (nid_same(&lp->lp_primary_nid, nid))
1776 lp->lp_primary_nid = *nid;
1778 rc = lnet_peer_add_nid(lp, nid, flags);
1780 lp->lp_primary_nid = old;
1784 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1785 libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1791 * lpni creation initiated due to traffic either sending or receiving.
1792 * Callers must hold ln_api_mutex
1793 * Ref taken on lnet_peer_ni returned by this function
1795 static struct lnet_peer_ni *
1796 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1797 __must_hold(&the_lnet.ln_api_mutex)
1799 struct lnet_peer *lp = NULL;
1800 struct lnet_peer_net *lpn = NULL;
1801 struct lnet_peer_ni *lpni;
1805 if (LNET_NID_IS_ANY(nid)) {
1810 /* lnet_net_lock is not needed here because ln_api_lock is held */
1811 lpni = lnet_peer_ni_find_locked(nid);
1814 * We must have raced with another thread. Since we
1815 * know next to nothing about a peer_ni created by
1816 * traffic, we just assume everything is ok and
1822 /* Create peer, peer_net, and peer_ni. */
1824 lp = lnet_peer_alloc(nid);
1827 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1830 lpni = lnet_peer_ni_alloc(nid);
1833 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1835 /* lnet_peer_attach_peer_ni() always returns 0 */
1836 rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1838 lnet_peer_ni_addref_locked(lpni);
1843 LIBCFS_FREE(lpn, sizeof(*lpn));
1845 LIBCFS_FREE(lp, sizeof(*lp));
1849 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1854 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1856 * This API handles the following combinations:
1857 * Create a peer with its primary NI if only the prim_nid is provided
1858 * Add a NID to a peer identified by the prim_nid. The peer identified
1859 * by the prim_nid must already exist.
1860 * The peer being created may be non-MR.
1862 * The caller must hold ln_api_mutex. This prevents the peer from
1863 * being created/modified/deleted by a different thread.
1866 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
1869 struct lnet_peer *lp = NULL;
1870 struct lnet_peer_ni *lpni;
1871 unsigned int flags = 0;
1873 /* The prim_nid must always be specified */
1874 if (LNET_NID_IS_ANY(prim_nid))
1878 flags = LNET_PEER_CONFIGURED;
1881 flags |= LNET_PEER_MULTI_RAIL;
1884 * If nid isn't specified, we must create a new peer with
1885 * prim_nid as its primary nid.
1887 if (LNET_NID_IS_ANY(nid))
1888 return lnet_peer_add(prim_nid, flags);
1890 /* Look up the prim_nid, which must exist. */
1891 lpni = lnet_peer_ni_find_locked(prim_nid);
1894 lnet_peer_ni_decref_locked(lpni);
1895 lp = lpni->lpni_peer_net->lpn_peer;
1897 /* Peer must have been configured. */
1898 if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1899 CDEBUG(D_NET, "peer %s was not configured\n",
1900 libcfs_nidstr(prim_nid));
1904 /* Primary NID must match */
1905 if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
1906 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1907 libcfs_nidstr(prim_nid),
1908 libcfs_nidstr(&lp->lp_primary_nid));
1912 /* Multi-Rail flag must match. */
1913 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1914 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1915 libcfs_nidstr(prim_nid));
1919 return lnet_peer_add_nid(lp, nid, flags);
1923 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1925 * This API handles the following combinations:
1926 * Delete a NI from a peer if both prim_nid and nid are provided.
1927 * Delete a peer if only prim_nid is provided.
1928 * Delete a peer if its primary nid is provided.
1930 * The caller must hold ln_api_mutex. This prevents the peer from
1931 * being modified/deleted by a different thread.
1934 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid)
1936 struct lnet_peer *lp;
1937 struct lnet_peer_ni *lpni;
1940 if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
1943 lpni = lnet_peer_ni_find_locked(prim_nid);
1946 lnet_peer_ni_decref_locked(lpni);
1947 lp = lpni->lpni_peer_net->lpn_peer;
1949 if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
1950 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1951 libcfs_nidstr(prim_nid),
1952 libcfs_nidstr(&lp->lp_primary_nid));
1956 lnet_net_lock(LNET_LOCK_EX);
1957 if (lp->lp_rtr_refcount > 0) {
1958 lnet_net_unlock(LNET_LOCK_EX);
1959 CERROR("%s is a router. Can not be deleted\n",
1960 libcfs_nidstr(prim_nid));
1963 lnet_net_unlock(LNET_LOCK_EX);
1965 if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid))
1966 return lnet_peer_del(lp);
1968 flags = LNET_PEER_CONFIGURED;
1969 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1970 flags |= LNET_PEER_MULTI_RAIL;
1972 return lnet_peer_del_nid(lp, nid, flags);
1976 lnet_destroy_peer_ni_locked(struct kref *ref)
1978 struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1980 struct lnet_peer_table *ptable;
1981 struct lnet_peer_net *lpn;
1983 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
1985 LASSERT(kref_read(&lpni->lpni_kref) == 0);
1986 LASSERT(list_empty(&lpni->lpni_txq));
1987 LASSERT(lpni->lpni_txqnob == 0);
1988 LASSERT(list_empty(&lpni->lpni_peer_nis));
1989 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1991 lpn = lpni->lpni_peer_net;
1992 lpni->lpni_peer_net = NULL;
1993 lpni->lpni_net = NULL;
1995 if (!list_empty(&lpni->lpni_hashlist)) {
1996 /* remove the peer ni from the zombie list */
1997 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1998 spin_lock(&ptable->pt_zombie_lock);
1999 list_del_init(&lpni->lpni_hashlist);
2000 ptable->pt_zombies--;
2001 spin_unlock(&ptable->pt_zombie_lock);
2004 if (lpni->lpni_pref_nnids > 1) {
2005 struct lnet_nid_list *ne, *tmp;
2007 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2009 list_del_init(&ne->nl_list);
2010 LIBCFS_FREE(ne, sizeof(*ne));
2013 LIBCFS_FREE(lpni, sizeof(*lpni));
2016 lnet_peer_net_decref_locked(lpn);
2019 struct lnet_peer_ni *
2020 lnet_nid2peerni_ex(struct lnet_nid *nid)
2021 __must_hold(&the_lnet.ln_api_mutex)
2023 struct lnet_peer_ni *lpni = NULL;
2025 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2026 return ERR_PTR(-ESHUTDOWN);
2029 * find if a peer_ni already exists.
2030 * If so then just return that.
2032 lpni = lnet_peer_ni_find_locked(nid);
2036 lnet_net_unlock(LNET_LOCK_EX);
2038 lpni = lnet_peer_ni_traffic_add(nid, NULL);
2040 lnet_net_lock(LNET_LOCK_EX);
2046 * Get a peer_ni for the given nid, create it if necessary. Takes a
2047 * hold on the peer_ni.
2049 struct lnet_peer_ni *
2050 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2051 struct lnet_nid *pref, int cpt)
2053 struct lnet_peer_ni *lpni = NULL;
2055 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2056 return ERR_PTR(-ESHUTDOWN);
2059 * find if a peer_ni already exists.
2060 * If so then just return that.
2062 lpni = lnet_peer_ni_find_locked(nid);
2068 * use the lnet_api_mutex to serialize the creation of the peer_ni
2069 * and the creation/deletion of the local ni/net. When a local ni is
2070 * created, if there exists a set of peer_nis on that network,
2071 * they need to be traversed and updated. When a local NI is
2072 * deleted, which could result in a network being deleted, then
2073 * all peer nis on that network need to be removed as well.
2075 * Creation through traffic should also be serialized with
2076 * creation through DLC.
2078 lnet_net_unlock(cpt);
2079 mutex_lock(&the_lnet.ln_api_mutex);
2081 * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2082 * check here is sufficent.
2084 if (the_lnet.ln_state == LNET_STATE_RUNNING)
2085 lpni = lnet_peer_ni_traffic_add(nid, pref);
2087 mutex_unlock(&the_lnet.ln_api_mutex);
2090 /* Lock has been dropped, check again for shutdown. */
2091 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2092 if (!IS_ERR_OR_NULL(lpni))
2093 lnet_peer_ni_decref_locked(lpni);
2094 lpni = ERR_PTR(-ESHUTDOWN);
2101 lnet_peer_gw_discovery(struct lnet_peer *lp)
2105 spin_lock(&lp->lp_lock);
2106 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2108 spin_unlock(&lp->lp_lock);
2114 lnet_peer_is_uptodate(struct lnet_peer *lp)
2118 spin_lock(&lp->lp_lock);
2119 rc = lnet_peer_is_uptodate_locked(lp);
2120 spin_unlock(&lp->lp_lock);
2125 * Is a peer uptodate from the point of view of discovery?
2127 * If it is currently being processed, obviously not.
2128 * A forced Ping or Push is also handled by the discovery thread.
2130 * Otherwise look at whether the peer needs rediscovering.
2133 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2134 __must_hold(&lp->lp_lock)
2138 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2139 LNET_PEER_FORCE_PING |
2140 LNET_PEER_FORCE_PUSH)) {
2142 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2144 } else if (lnet_peer_needs_push(lp)) {
2146 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2147 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2158 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2160 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2162 /* The discovery thread holds net_lock/EX and lp_lock when it splices
2163 * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2164 * when adding to the list and queuing the peer to ensure that we do not
2165 * strand any messages on the lp_dc_pendq. This scheme ensures the
2166 * message will be resent even if the peer is already being discovered.
2167 * Therefore we needn't check the return value of
2168 * lnet_peer_queue_for_discovery(lp).
2170 lnet_net_lock(LNET_LOCK_EX);
2171 spin_lock(&lp->lp_lock);
2172 list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2173 spin_unlock(&lp->lp_lock);
2174 lnet_peer_queue_for_discovery(lp);
2175 lnet_net_unlock(LNET_LOCK_EX);
2179 * Queue a peer for the attention of the discovery thread. Call with
2180 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2181 * -EALREADY if the peer was already queued.
2183 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2187 spin_lock(&lp->lp_lock);
2188 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2189 lp->lp_state |= LNET_PEER_DISCOVERING;
2190 spin_unlock(&lp->lp_lock);
2191 if (list_empty(&lp->lp_dc_list)) {
2192 lnet_peer_addref_locked(lp);
2193 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2194 wake_up(&the_lnet.ln_dc_waitq);
2200 CDEBUG(D_NET, "Queue peer %s: %d\n",
2201 libcfs_nidstr(&lp->lp_primary_nid), rc);
2207 * Discovery of a peer is complete. Wake all waiters on the peer.
2208 * Call with lnet_net_lock/EX held.
2210 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2212 struct lnet_msg *msg, *tmp;
2214 LIST_HEAD(pending_msgs);
2216 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2217 libcfs_nidstr(&lp->lp_primary_nid));
2219 list_del_init(&lp->lp_dc_list);
2220 spin_lock(&lp->lp_lock);
2222 lp->lp_dc_error = dc_error;
2223 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2224 lp->lp_state |= LNET_PEER_REDISCOVER;
2226 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2227 spin_unlock(&lp->lp_lock);
2228 wake_up(&lp->lp_dc_waitq);
2230 if (lp->lp_rtr_refcount > 0)
2231 lnet_router_discovery_complete(lp);
2233 lnet_net_unlock(LNET_LOCK_EX);
2235 /* iterate through all pending messages and send them again */
2236 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2237 list_del_init(&msg->msg_list);
2239 lnet_finalize(msg, dc_error);
2243 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2244 lnet_msgtyp2str(msg->msg_type),
2245 libcfs_idstr(&msg->msg_target));
2246 rc = lnet_send(&msg->msg_src_nid_param, msg,
2247 &msg->msg_rtr_nid_param);
2249 CNETERR("Error sending %s to %s: %d\n",
2250 lnet_msgtyp2str(msg->msg_type),
2251 libcfs_idstr(&msg->msg_target), rc);
2252 lnet_finalize(msg, rc);
2255 lnet_net_lock(LNET_LOCK_EX);
2256 lnet_peer_decref_locked(lp);
2260 * Handle inbound push.
2261 * Like any event handler, called with lnet_res_lock/CPT held.
2263 void lnet_peer_push_event(struct lnet_event *ev)
2265 struct lnet_ping_buffer *pbuf;
2266 struct lnet_peer *lp;
2268 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2270 /* lnet_find_peer() adds a refcount */
2271 lp = lnet_find_peer(&ev->source.nid);
2273 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2274 libcfs_nidstr(&ev->initiator.nid),
2275 libcfs_nidstr(&ev->source.nid));
2276 pbuf->pb_needs_post = true;
2280 /* Ensure peer state remains consistent while we modify it. */
2281 spin_lock(&lp->lp_lock);
2284 * If some kind of error happened the contents of the message
2285 * cannot be used. Clear the NIDS_UPTODATE and set the
2286 * FORCE_PING flag to trigger a ping.
2289 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2290 lp->lp_state |= LNET_PEER_FORCE_PING;
2291 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2293 libcfs_nidstr(&lp->lp_primary_nid),
2294 libcfs_nidstr(&ev->source.nid));
2299 * A push with invalid or corrupted info. Clear the UPTODATE
2300 * flag to trigger a ping.
2302 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2303 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2304 lp->lp_state |= LNET_PEER_FORCE_PING;
2305 CDEBUG(D_NET, "Corrupted Push from %s\n",
2306 libcfs_nidstr(&lp->lp_primary_nid));
2311 * Make sure we'll allocate the correct size ping buffer when
2314 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2315 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2318 * A non-Multi-Rail peer is not supposed to be capable of
2321 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2322 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2323 libcfs_nidstr(&lp->lp_primary_nid));
2328 * The peer may have discovery disabled at its end. Set
2329 * NO_DISCOVERY as appropriate.
2331 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2332 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2333 libcfs_nidstr(&lp->lp_primary_nid));
2335 * Mark the peer for deletion if we already know about it
2336 * and it's going from discovery set to no discovery set
2338 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2339 LNET_PEER_DISCOVERING)) &&
2340 lp->lp_state & LNET_PEER_DISCOVERED) {
2341 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2342 libcfs_nidstr(&lp->lp_primary_nid),
2344 lp->lp_state |= LNET_PEER_MARK_DELETION;
2346 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2347 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2348 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2349 libcfs_nidstr(&lp->lp_primary_nid));
2350 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2354 * Update the MULTI_RAIL flag based on the push. If the peer
2355 * was configured with DLC then the setting should match what
2357 * NB: We verified above that the MR feature bit is set in pi_features
2359 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2360 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2361 libcfs_nidstr(&lp->lp_primary_nid), lp);
2362 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2363 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2364 libcfs_nidstr(&lp->lp_primary_nid));
2365 } else if (lnet_peer_discovery_disabled) {
2366 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2367 libcfs_nidstr(&lp->lp_primary_nid), lp);
2368 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2369 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2370 libcfs_nidstr(&lp->lp_primary_nid), lp);
2372 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2373 libcfs_nidstr(&lp->lp_primary_nid), lp);
2374 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2375 lnet_peer_clr_non_mr_pref_nids(lp);
2379 * Check for truncation of the Put message. Clear the
2380 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2381 * and tell discovery to allocate a bigger buffer.
2383 if (ev->mlength < ev->rlength) {
2384 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2385 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2386 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2387 lp->lp_state |= LNET_PEER_FORCE_PING;
2388 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2389 libcfs_nidstr(&lp->lp_primary_nid),
2390 pbuf->pb_info.pi_nnis);
2394 /* always assume new data */
2395 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2396 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2399 * If there is data present that hasn't been processed yet,
2400 * we'll replace it if the Put contained newer data and it
2401 * fits. We're racing with a Ping or earlier Push in this
2404 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2405 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2406 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2407 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2408 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2409 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2410 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2411 libcfs_nidstr(&lp->lp_primary_nid),
2412 LNET_PING_BUFFER_SEQNO(pbuf),
2413 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2419 * Allocate a buffer to copy the data. On a failure we drop
2420 * the Push and set FORCE_PING to force the discovery
2421 * thread to fix the problem by pinging the peer.
2423 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2425 lp->lp_state |= LNET_PEER_FORCE_PING;
2426 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2427 libcfs_nidstr(&lp->lp_primary_nid),
2428 LNET_PING_BUFFER_SEQNO(pbuf));
2433 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2434 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2435 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2436 CDEBUG(D_NET, "Received Push %s %u\n",
2437 libcfs_nidstr(&lp->lp_primary_nid),
2438 LNET_PING_BUFFER_SEQNO(pbuf));
2441 /* We've processed this buffer. It can be reposted */
2442 pbuf->pb_needs_post = true;
2445 * Queue the peer for discovery if not done, force it on the request
2446 * queue and wake the discovery thread if the peer was already queued,
2447 * because its status changed.
2449 spin_unlock(&lp->lp_lock);
2450 lnet_net_lock(LNET_LOCK_EX);
2451 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2452 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2453 wake_up(&the_lnet.ln_dc_waitq);
2455 /* Drop refcount from lookup */
2456 lnet_peer_decref_locked(lp);
2457 lnet_net_unlock(LNET_LOCK_EX);
2461 * Clear the discovery error state, unless we're already discovering
2462 * this peer, in which case the error is current.
2464 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2466 spin_lock(&lp->lp_lock);
2467 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2468 lp->lp_dc_error = 0;
2469 spin_unlock(&lp->lp_lock);
2473 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2474 * dropped/retaken within this function. An lnet_peer_ni is passed in
2475 * because discovery could tear down an lnet_peer.
2478 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2481 struct lnet_peer *lp;
2486 lnet_net_unlock(cpt);
2487 lnet_net_lock(LNET_LOCK_EX);
2488 lp = lpni->lpni_peer_net->lpn_peer;
2489 lnet_peer_clear_discovery_error(lp);
2492 * We're willing to be interrupted. The lpni can become a
2493 * zombie if we race with DLC, so we must check for that.
2496 /* Keep lp alive when the lnet_net_lock is unlocked */
2497 lnet_peer_addref_locked(lp);
2498 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2499 if (signal_pending(current))
2501 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2504 * Don't repeat discovery if discovery is disabled. This is
2505 * done to ensure we can use discovery as a standard ping as
2506 * well for backwards compatibility with routers which do not
2507 * have discovery or have discovery disabled
2509 if (lnet_is_discovery_disabled(lp) && count > 0)
2511 if (lp->lp_dc_error)
2513 if (lnet_peer_is_uptodate(lp))
2515 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2517 lnet_peer_queue_for_discovery(lp);
2519 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2522 * If caller requested a non-blocking operation then
2523 * return immediately. Once discovery is complete any
2524 * pending messages that were stopped due to discovery
2525 * will be transmitted.
2530 lnet_net_unlock(LNET_LOCK_EX);
2532 finish_wait(&lp->lp_dc_waitq, &wait);
2533 lnet_net_lock(LNET_LOCK_EX);
2534 lnet_peer_decref_locked(lp);
2535 /* Peer may have changed */
2536 lp = lpni->lpni_peer_net->lpn_peer;
2538 finish_wait(&lp->lp_dc_waitq, &wait);
2540 lnet_net_unlock(LNET_LOCK_EX);
2542 lnet_peer_decref_locked(lp);
2544 * The peer may have changed, so re-check and rediscover if that turns
2545 * out to have been the case. The reference count on lp ensured that
2546 * even if it was unlinked from lpni the memory could not be recycled.
2547 * Thus the check below is sufficient to determine whether the peer
2548 * changed. If the peer changed, then lp must not be dereferenced.
2550 if (lp != lpni->lpni_peer_net->lpn_peer)
2553 if (signal_pending(current))
2555 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2557 else if (lp->lp_dc_error)
2558 rc = lp->lp_dc_error;
2560 CDEBUG(D_NET, "non-blocking discovery\n");
2561 else if (!lnet_peer_is_uptodate(lp) &&
2562 !(lnet_is_discovery_disabled(lp) ||
2563 (lp->lp_state & LNET_PEER_MARK_DELETED)))
2566 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2567 (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2568 libcfs_nidstr(&lpni->lpni_nid), rc,
2569 (!block) ? "pending discovery" : "discovery complete");
2574 /* Handle an incoming ack for a push. */
2576 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2578 struct lnet_ping_buffer *pbuf;
2580 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2581 spin_lock(&lp->lp_lock);
2582 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2583 lp->lp_push_error = ev->status;
2585 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2587 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2588 spin_unlock(&lp->lp_lock);
2590 CDEBUG(D_NET, "peer %s ev->status %d\n",
2591 libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2594 /* Handle a Reply message. This is the reply to a Ping message. */
2596 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2598 struct lnet_ping_buffer *pbuf;
2601 spin_lock(&lp->lp_lock);
2603 lp->lp_disc_src_nid = ev->target.nid;
2604 lp->lp_disc_dst_nid = ev->source.nid;
2607 * If some kind of error happened the contents of message
2608 * cannot be used. Set PING_FAILED to trigger a retry.
2611 lp->lp_state |= LNET_PEER_PING_FAILED;
2612 lp->lp_ping_error = ev->status;
2613 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2615 libcfs_nidstr(&lp->lp_primary_nid),
2616 libcfs_nidstr(&ev->source.nid));
2620 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2621 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2622 lnet_swap_pinginfo(pbuf);
2625 * A reply with invalid or corrupted info. Set PING_FAILED to
2628 rc = lnet_ping_info_validate(&pbuf->pb_info);
2630 lp->lp_state |= LNET_PEER_PING_FAILED;
2631 lp->lp_ping_error = 0;
2632 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2633 libcfs_nidstr(&lp->lp_primary_nid), rc);
2638 * The peer may have discovery disabled at its end. Set
2639 * NO_DISCOVERY as appropriate.
2641 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2642 lnet_peer_discovery_disabled) {
2643 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2644 libcfs_nidstr(&lp->lp_primary_nid));
2646 /* Detect whether this peer has toggled discovery from on to
2647 * off and whether we can delete and re-create the peer. Peers
2648 * that were manually configured cannot be deleted by discovery.
2649 * We need to delete this peer and re-create it if the peer was
2650 * not configured manually, is currently considered DD capable,
2652 * 1. We've already discovered the peer (the peer has toggled
2653 * the discovery feature from on to off), or
2654 * 2. The peer is considered MR, but it was not user configured
2655 * (this was a "temporary" peer created via the kernel APIs
2656 * that we're discovering for the first time)
2658 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2659 LNET_PEER_NO_DISCOVERY)) &&
2660 (lp->lp_state & (LNET_PEER_DISCOVERED |
2661 LNET_PEER_MULTI_RAIL))) {
2662 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2663 libcfs_nidstr(&lp->lp_primary_nid),
2665 lp->lp_state |= LNET_PEER_MARK_DELETION;
2667 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2669 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2670 libcfs_nidstr(&lp->lp_primary_nid));
2671 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2675 * Update the MULTI_RAIL flag based on the reply. If the peer
2676 * was configured with DLC then the setting should match what
2679 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2680 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2681 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2682 libcfs_nidstr(&lp->lp_primary_nid), lp);
2683 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2684 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2685 libcfs_nidstr(&lp->lp_primary_nid));
2686 } else if (lnet_peer_discovery_disabled) {
2688 "peer %s(%p) not MR: DD disabled locally\n",
2689 libcfs_nidstr(&lp->lp_primary_nid), lp);
2690 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2692 "peer %s(%p) not MR: DD disabled remotely\n",
2693 libcfs_nidstr(&lp->lp_primary_nid), lp);
2695 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2696 libcfs_nidstr(&lp->lp_primary_nid), lp);
2697 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2698 lnet_peer_clr_non_mr_pref_nids(lp);
2700 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2701 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2702 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2703 libcfs_nidstr(&lp->lp_primary_nid));
2705 CERROR("Multi-Rail state vanished from %s\n",
2706 libcfs_nidstr(&lp->lp_primary_nid));
2707 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2712 * Make sure we'll allocate the correct size ping buffer when
2715 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2716 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2719 * Check for truncation of the Reply. Clear PING_SENT and set
2720 * PING_FAILED to trigger a retry.
2722 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2723 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2724 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2725 lp->lp_state |= LNET_PEER_PING_FAILED;
2726 lp->lp_ping_error = 0;
2727 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2728 libcfs_nidstr(&lp->lp_primary_nid),
2729 pbuf->pb_info.pi_nnis);
2734 * Check the sequence numbers in the reply. These are only
2735 * available if the reply came from a Multi-Rail peer.
2737 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2738 pbuf->pb_info.pi_nnis > 1 &&
2739 lnet_nid_to_nid4(&lp->lp_primary_nid) ==
2740 pbuf->pb_info.pi_ni[1].ns_nid) {
2741 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2742 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2743 libcfs_nidstr(&lp->lp_primary_nid),
2744 LNET_PING_BUFFER_SEQNO(pbuf),
2747 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2750 /* We're happy with the state of the data in the buffer. */
2751 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2752 libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2754 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2755 lnet_ping_buffer_decref(lp->lp_data);
2757 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2758 lnet_ping_buffer_addref(pbuf);
2761 lp->lp_state &= ~LNET_PEER_PING_SENT;
2762 spin_unlock(&lp->lp_lock);
2766 * Send event handling. Only matters for error cases, where we clean
2767 * up state on the peer and peer_ni that would otherwise be updated in
2768 * the REPLY event handler for a successful Ping, and the ACK event
2769 * handler for a successful Push.
2772 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2779 spin_lock(&lp->lp_lock);
2780 if (ev->msg_type == LNET_MSG_GET) {
2781 lp->lp_state &= ~LNET_PEER_PING_SENT;
2782 lp->lp_state |= LNET_PEER_PING_FAILED;
2783 lp->lp_ping_error = ev->status;
2784 } else { /* ev->msg_type == LNET_MSG_PUT */
2785 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2786 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2787 lp->lp_push_error = ev->status;
2789 spin_unlock(&lp->lp_lock);
2790 rc = LNET_REDISCOVER_PEER;
2792 CDEBUG(D_NET, "%s Send to %s: %d\n",
2793 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2794 libcfs_nidstr(&ev->target.nid), rc);
2799 * Unlink event handling. This event is only seen if a call to
2800 * LNetMDUnlink() caused the event to be unlinked. If this call was
2801 * made after the event was set up in LNetGet() or LNetPut() then we
2802 * assume the Ping or Push timed out.
2805 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2807 spin_lock(&lp->lp_lock);
2808 /* We've passed through LNetGet() */
2809 if (lp->lp_state & LNET_PEER_PING_SENT) {
2810 lp->lp_state &= ~LNET_PEER_PING_SENT;
2811 lp->lp_state |= LNET_PEER_PING_FAILED;
2812 lp->lp_ping_error = -ETIMEDOUT;
2813 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2814 libcfs_nidstr(&lp->lp_primary_nid));
2816 /* We've passed through LNetPut() */
2817 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2818 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2819 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2820 lp->lp_push_error = -ETIMEDOUT;
2821 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2822 libcfs_nidstr(&lp->lp_primary_nid));
2824 spin_unlock(&lp->lp_lock);
2828 * Event handler for the discovery EQ.
2830 * Called with lnet_res_lock(cpt) held. The cpt is the
2831 * lnet_cpt_of_cookie() of the md handle cookie.
2833 static void lnet_discovery_event_handler(struct lnet_event *event)
2835 struct lnet_peer *lp = event->md_user_ptr;
2836 struct lnet_ping_buffer *pbuf;
2839 /* discovery needs to take another look */
2840 rc = LNET_REDISCOVER_PEER;
2842 CDEBUG(D_NET, "Received event: %d\n", event->type);
2844 switch (event->type) {
2845 case LNET_EVENT_ACK:
2846 lnet_discovery_event_ack(lp, event);
2848 case LNET_EVENT_REPLY:
2849 lnet_discovery_event_reply(lp, event);
2851 case LNET_EVENT_SEND:
2852 /* Only send failure triggers a retry. */
2853 rc = lnet_discovery_event_send(lp, event);
2855 case LNET_EVENT_UNLINK:
2856 /* LNetMDUnlink() was called */
2857 lnet_discovery_event_unlink(lp, event);
2860 /* Invalid events. */
2863 lnet_net_lock(LNET_LOCK_EX);
2864 if (event->unlinked) {
2865 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2866 lnet_ping_buffer_decref(pbuf);
2867 lnet_peer_decref_locked(lp);
2870 /* put peer back at end of request queue, if discovery not already
2872 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2873 lnet_peer_queue_for_discovery(lp)) {
2874 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2875 wake_up(&the_lnet.ln_dc_waitq);
2877 lnet_net_unlock(LNET_LOCK_EX);
2881 * Build a peer from incoming data.
2883 * The NIDs in the incoming data are supposed to be structured as follows:
2886 * - other NIDs in same net
2887 * - NIDs in second net
2888 * - NIDs in third net
2890 * This due to the way the list of NIDs in the data is created.
2892 * Note that this function will mark the peer uptodate unless an
2893 * ENOMEM is encontered. All other errors are due to a conflict
2894 * between the DLC configuration and what discovery sees. We treat DLC
2895 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2896 * peer from becoming stuck in discovery.
2898 static int lnet_peer_merge_data(struct lnet_peer *lp,
2899 struct lnet_ping_buffer *pbuf)
2901 struct lnet_peer_net *lpn;
2902 struct lnet_peer_ni *lpni;
2903 lnet_nid_t *curnis = NULL;
2904 struct lnet_ni_status *addnis = NULL;
2905 lnet_nid_t *delnis = NULL;
2906 struct lnet_nid nid;
2916 flags = LNET_PEER_DISCOVERED;
2917 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2918 flags |= LNET_PEER_MULTI_RAIL;
2921 * Cache the routing feature for the peer; whether it is enabled
2922 * for disabled as reported by the remote peer.
2924 spin_lock(&lp->lp_lock);
2925 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2926 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2928 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2929 spin_unlock(&lp->lp_lock);
2931 nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2932 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2933 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2934 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2935 if (!curnis || !addnis || !delnis) {
2943 /* Construct the list of NIDs present in peer. */
2945 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2946 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
2949 * Check for NIDs in pbuf not present in curnis[].
2950 * The loop starts at 1 to skip the loopback NID.
2952 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2953 for (j = 0; j < ncurnis; j++)
2954 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2957 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2960 * Check for NIDs in curnis[] not present in pbuf.
2961 * The nested loop starts at 1 to skip the loopback NID.
2963 * But never add the loopback NID to delnis[]: if it is
2964 * present in curnis[] then this peer is for this node.
2966 for (i = 0; i < ncurnis; i++) {
2967 if (curnis[i] == LNET_NID_LO_0)
2969 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2970 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2972 * update the information we cache for the
2973 * peer with the latest information we
2976 lnet_nid4_to_nid(curnis[i], &nid);
2977 lpni = lnet_peer_ni_find_locked(&nid);
2979 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2980 lnet_peer_ni_decref_locked(lpni);
2985 if (j == pbuf->pb_info.pi_nnis)
2986 delnis[ndelnis++] = curnis[i];
2990 * If we get here and the discovery is disabled then we don't want
2991 * to add or delete any NIs. We just updated the ones we have some
2992 * information on, and call it a day
2995 if (lnet_is_discovery_disabled(lp))
2998 for (i = 0; i < naddnis; i++) {
2999 lnet_nid4_to_nid(addnis[i].ns_nid, &nid);
3000 rc = lnet_peer_add_nid(lp, &nid, flags);
3002 CERROR("Error adding NID %s to peer %s: %d\n",
3003 libcfs_nid2str(addnis[i].ns_nid),
3004 libcfs_nidstr(&lp->lp_primary_nid), rc);
3008 lpni = lnet_peer_ni_find_locked(&nid);
3010 lpni->lpni_ns_status = addnis[i].ns_status;
3011 lnet_peer_ni_decref_locked(lpni);
3015 for (i = 0; i < ndelnis; i++) {
3017 * for routers it's okay to delete the primary_nid because
3018 * the upper layers don't really rely on it. So if we're
3019 * being told that the router changed its primary_nid
3020 * then it's okay to delete it.
3022 lnet_nid4_to_nid(delnis[i], &nid);
3023 if (lp->lp_rtr_refcount > 0)
3024 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3025 rc = lnet_peer_del_nid(lp, &nid, flags);
3027 CERROR("Error deleting NID %s from peer %s: %d\n",
3028 libcfs_nid2str(delnis[i]),
3029 libcfs_nidstr(&lp->lp_primary_nid), rc);
3035 /* The peer net for the primary NID should be the first entry in the
3036 * peer's lp_peer_nets list, and the peer NI for the primary NID should
3037 * be the first entry in its peer net's lpn_peer_nis list.
3039 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, &nid);
3040 lpni = lnet_peer_ni_find_locked(&nid);
3042 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3043 libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3047 lnet_peer_ni_decref_locked(lpni);
3049 lpn = lpni->lpni_peer_net;
3050 if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3051 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3053 if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3054 list_move(&lpni->lpni_peer_nis,
3055 &lpni->lpni_peer_net->lpn_peer_nis);
3058 * Errors other than -ENOMEM are due to peers having been
3059 * configured with DLC. Ignore these because DLC overrides
3064 /* If this peer is a gateway, invoke the routing callback to update
3065 * the associated route status
3067 if (lp->lp_rtr_refcount > 0)
3068 lnet_router_discovery_ping_reply(lp, pbuf);
3070 CFS_FREE_PTR_ARRAY(curnis, nnis);
3071 CFS_FREE_PTR_ARRAY(addnis, nnis);
3072 CFS_FREE_PTR_ARRAY(delnis, nnis);
3073 lnet_ping_buffer_decref(pbuf);
3074 CDEBUG(D_NET, "peer %s (%p): %d\n",
3075 libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3078 spin_lock(&lp->lp_lock);
3079 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3080 lp->lp_state |= LNET_PEER_FORCE_PING;
3081 spin_unlock(&lp->lp_lock);
3087 * The data in pbuf says lp is its primary peer, but the data was
3088 * received by a different peer. Try to update lp with the data.
3091 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3093 struct lnet_handle_md mdh;
3095 /* Queue lp for discovery, and force it on the request queue. */
3096 lnet_net_lock(LNET_LOCK_EX);
3097 if (lnet_peer_queue_for_discovery(lp))
3098 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3099 lnet_net_unlock(LNET_LOCK_EX);
3101 LNetInvalidateMDHandle(&mdh);
3104 * Decide whether we can move the peer to the DATA_PRESENT state.
3106 * We replace stale data for a multi-rail peer, repair PING_FAILED
3107 * status, and preempt FORCE_PING.
3109 * If after that we have DATA_PRESENT, we merge it into this peer.
3111 spin_lock(&lp->lp_lock);
3112 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3113 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3114 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3115 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3116 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3117 lnet_ping_buffer_decref(pbuf);
3122 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3123 lnet_ping_buffer_decref(lp->lp_data);
3125 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3127 if (lp->lp_state & LNET_PEER_PING_FAILED) {
3128 mdh = lp->lp_ping_mdh;
3129 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3130 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3131 lp->lp_ping_error = 0;
3133 if (lp->lp_state & LNET_PEER_FORCE_PING)
3134 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3135 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3136 spin_unlock(&lp->lp_lock);
3138 if (!LNetMDHandleIsInvalid(mdh))
3142 return lnet_peer_merge_data(lp, pbuf);
3144 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3148 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3152 for (i = 0; i < pinfo->pi_nnis; i++) {
3153 if (pinfo->pi_ni[i].ns_nid == nid)
3160 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3161 * to the discovery queue a reference was taken that will prevent the peer from
3162 * actually being freed by this function. After this function exits the
3163 * discovery thread should call lnet_peer_discovery_complete() which will
3164 * drop that reference as well as wake any waiters that may also be holding a
3167 static int lnet_peer_deletion(struct lnet_peer *lp)
3168 __must_hold(&lp->lp_lock)
3170 struct list_head rlist;
3171 struct lnet_route *route, *tmp;
3172 int sensitivity = lp->lp_health_sensitivity;
3175 INIT_LIST_HEAD(&rlist);
3177 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3178 LNET_PEER_FORCE_PUSH);
3179 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3180 libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3182 /* no-op if lnet_peer_del() has already been called on this peer */
3183 if (lp->lp_state & LNET_PEER_MARK_DELETED)
3186 spin_unlock(&lp->lp_lock);
3188 mutex_lock(&the_lnet.ln_api_mutex);
3189 if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3190 the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3191 mutex_unlock(&the_lnet.ln_api_mutex);
3192 spin_lock(&lp->lp_lock);
3196 lnet_net_lock(LNET_LOCK_EX);
3197 /* remove the peer from the discovery work
3198 * queue if it's on there in preparation
3201 if (!list_empty(&lp->lp_dc_list))
3202 list_del_init(&lp->lp_dc_list);
3203 list_for_each_entry_safe(route, tmp,
3206 lnet_move_route(route, NULL, &rlist);
3207 lnet_net_unlock(LNET_LOCK_EX);
3209 /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3210 rc = lnet_peer_del(lp);
3212 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3213 libcfs_nidstr(&lp->lp_primary_nid), rc);
3215 list_for_each_entry_safe(route, tmp,
3217 /* re-add these routes */
3218 lnet_add_route(route->lr_net,
3223 LIBCFS_FREE(route, sizeof(*route));
3226 mutex_unlock(&the_lnet.ln_api_mutex);
3228 spin_lock(&lp->lp_lock);
3234 * Update a peer using the data received.
3236 static int lnet_peer_data_present(struct lnet_peer *lp)
3237 __must_hold(&lp->lp_lock)
3239 struct lnet_ping_buffer *pbuf;
3240 struct lnet_peer_ni *lpni;
3241 struct lnet_nid nid;
3247 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3248 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3249 spin_unlock(&lp->lp_lock);
3252 * Modifications of peer structures are done while holding the
3253 * ln_api_mutex. A global lock is required because we may be
3254 * modifying multiple peer structures, and a mutex greatly
3255 * simplifies memory management.
3257 * The actual changes to the data structures must also protect
3258 * against concurrent lookups, for which the lnet_net_lock in
3259 * LNET_LOCK_EX mode is used.
3261 mutex_lock(&the_lnet.ln_api_mutex);
3262 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3268 * If this peer is not on the peer list then it is being torn
3269 * down, and our reference count may be all that is keeping it
3270 * alive. Don't do any work on it.
3272 if (list_empty(&lp->lp_peer_list)) {
3273 lnet_ping_buffer_decref(pbuf);
3277 flags = LNET_PEER_DISCOVERED;
3278 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3279 flags |= LNET_PEER_MULTI_RAIL;
3282 * Check whether the primary NID in the message matches the
3283 * primary NID of the peer. If it does, update the peer, if
3284 * it it does not, check whether there is already a peer with
3285 * that primary NID. If no such peer exists, try to update
3286 * the primary NID of the current peer (allowed if it was
3287 * created due to message traffic) and complete the update.
3288 * If the peer did exist, hand off the data to it.
3290 * The peer for the loopback interface is a special case: this
3291 * is the peer for the local node, and we want to set its
3292 * primary NID to the correct value here. Moreover, this peer
3293 * can show up with only the loopback NID in the ping buffer.
3295 if (pbuf->pb_info.pi_nnis <= 1) {
3296 lnet_ping_buffer_decref(pbuf);
3299 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, &nid);
3300 if (nid_is_lo0(&lp->lp_primary_nid)) {
3301 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3303 lnet_ping_buffer_decref(pbuf);
3305 rc = lnet_peer_merge_data(lp, pbuf);
3307 * if the primary nid of the peer is present in the ping info returned
3308 * from the peer, but it's not the local primary peer we have
3309 * cached and discovery is disabled, then we don't want to update
3310 * our local peer info, by adding or removing NIDs, we just want
3311 * to update the status of the nids that we currently have
3312 * recorded in that peer.
3314 } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3315 (lnet_is_nid_in_ping_info(lnet_nid_to_nid4(&lp->lp_primary_nid),
3317 lnet_is_discovery_disabled(lp))) {
3318 rc = lnet_peer_merge_data(lp, pbuf);
3320 lpni = lnet_peer_ni_find_locked(&nid);
3321 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3322 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3324 CERROR("Primary NID error %s versus %s: %d\n",
3325 libcfs_nidstr(&lp->lp_primary_nid),
3326 libcfs_nidstr(&nid), rc);
3327 lnet_ping_buffer_decref(pbuf);
3329 rc = lnet_peer_merge_data(lp, pbuf);
3332 lnet_peer_ni_decref_locked(lpni);
3334 struct lnet_peer *new_lp;
3335 new_lp = lpni->lpni_peer_net->lpn_peer;
3337 * if lp has discovery/MR enabled that means new_lp
3338 * should have discovery/MR enabled as well, since
3339 * it's the same peer, which we're about to merge
3341 spin_lock(&lp->lp_lock);
3342 spin_lock(&new_lp->lp_lock);
3343 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3344 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3345 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3346 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3347 /* If we're processing a ping reply then we may be
3348 * about to send a push to the peer that we ping'd.
3349 * Since the ping reply that we're processing was
3350 * received by lp, we need to set the discovery source
3351 * NID for new_lp to the NID stored in lp.
3353 if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3354 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3355 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3357 spin_unlock(&new_lp->lp_lock);
3358 spin_unlock(&lp->lp_lock);
3360 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3361 lnet_consolidate_routes_locked(lp, new_lp);
3362 lnet_peer_ni_decref_locked(lpni);
3366 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3367 libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3369 mutex_unlock(&the_lnet.ln_api_mutex);
3371 spin_lock(&lp->lp_lock);
3372 /* Tell discovery to re-check the peer immediately. */
3374 rc = LNET_REDISCOVER_PEER;
3379 * A ping failed. Clear the PING_FAILED state and set the
3380 * FORCE_PING state, to ensure a retry even if discovery is
3381 * disabled. This avoids being left with incorrect state.
3383 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3384 __must_hold(&lp->lp_lock)
3386 struct lnet_handle_md mdh;
3389 mdh = lp->lp_ping_mdh;
3390 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3391 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3392 lp->lp_state |= LNET_PEER_FORCE_PING;
3393 rc = lp->lp_ping_error;
3394 lp->lp_ping_error = 0;
3395 spin_unlock(&lp->lp_lock);
3397 if (!LNetMDHandleIsInvalid(mdh))
3400 CDEBUG(D_NET, "peer %s:%d\n",
3401 libcfs_nidstr(&lp->lp_primary_nid), rc);
3403 spin_lock(&lp->lp_lock);
3404 return rc ? rc : LNET_REDISCOVER_PEER;
3407 /* Active side of ping. */
3408 static int lnet_peer_send_ping(struct lnet_peer *lp)
3409 __must_hold(&lp->lp_lock)
3415 lp->lp_state |= LNET_PEER_PING_SENT;
3416 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3417 spin_unlock(&lp->lp_lock);
3419 cpt = lnet_net_lock_current();
3420 /* Refcount for MD. */
3421 lnet_peer_addref_locked(lp);
3422 lnet_net_unlock(cpt);
3424 nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3426 rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
3427 the_lnet.ln_dc_handler, false);
3430 * if LNetMDBind in lnet_send_ping fails we need to decrement the
3431 * refcount on the peer, otherwise LNetMDUnlink will be called
3432 * which will eventually do that.
3436 lnet_peer_decref_locked(lp);
3437 lnet_net_unlock(cpt);
3438 rc = -rc; /* change the rc to negative value */
3440 } else if (rc < 0) {
3444 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3446 spin_lock(&lp->lp_lock);
3450 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3452 * The errors that get us here are considered hard errors and
3453 * cause Discovery to terminate. So we clear PING_SENT, but do
3454 * not set either PING_FAILED or FORCE_PING. In fact we need
3455 * to clear PING_FAILED, because the unlink event handler will
3456 * have set it if we called LNetMDUnlink() above.
3458 spin_lock(&lp->lp_lock);
3459 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3464 * This function exists because you cannot call LNetMDUnlink() from an
3467 static int lnet_peer_push_failed(struct lnet_peer *lp)
3468 __must_hold(&lp->lp_lock)
3470 struct lnet_handle_md mdh;
3473 mdh = lp->lp_push_mdh;
3474 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3475 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3476 rc = lp->lp_push_error;
3477 lp->lp_push_error = 0;
3478 spin_unlock(&lp->lp_lock);
3480 if (!LNetMDHandleIsInvalid(mdh))
3483 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3484 spin_lock(&lp->lp_lock);
3485 return rc ? rc : LNET_REDISCOVER_PEER;
3489 * Mark the peer as discovered.
3491 static int lnet_peer_discovered(struct lnet_peer *lp)
3492 __must_hold(&lp->lp_lock)
3494 lp->lp_state |= LNET_PEER_DISCOVERED;
3495 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3496 LNET_PEER_REDISCOVER);
3498 lp->lp_dc_error = 0;
3500 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3505 /* Active side of push. */
3506 static int lnet_peer_send_push(struct lnet_peer *lp)
3507 __must_hold(&lp->lp_lock)
3509 struct lnet_ping_buffer *pbuf;
3510 struct lnet_processid id;
3515 /* Don't push to a non-multi-rail peer. */
3516 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3517 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3518 /* if peer's NIDs are uptodate then peer is discovered */
3519 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3520 rc = lnet_peer_discovered(lp);
3527 lp->lp_state |= LNET_PEER_PUSH_SENT;
3528 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3529 spin_unlock(&lp->lp_lock);
3531 cpt = lnet_net_lock_current();
3532 pbuf = the_lnet.ln_ping_target;
3533 lnet_ping_buffer_addref(pbuf);
3534 lnet_net_unlock(cpt);
3536 /* Push source MD */
3537 md.start = &pbuf->pb_info;
3538 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3539 md.threshold = 2; /* Put/Ack */
3541 md.options = LNET_MD_TRACK_RESPONSE;
3542 md.handler = the_lnet.ln_dc_handler;
3545 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3547 lnet_ping_buffer_decref(pbuf);
3548 CERROR("Can't bind push source MD: %d\n", rc);
3552 cpt = lnet_net_lock_current();
3553 /* Refcount for MD. */
3554 lnet_peer_addref_locked(lp);
3555 id.pid = LNET_PID_LUSTRE;
3556 if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3557 id.nid = lp->lp_disc_dst_nid;
3559 id.nid = lp->lp_primary_nid;
3560 lnet_net_unlock(cpt);
3562 rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3563 LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3564 LNET_PROTO_PING_MATCHBITS, 0, 0);
3567 * reset the discovery nid. There is no need to restrict sending
3568 * from that source, if we call lnet_push_update_to_peers(). It'll
3569 * get set to a specific NID, if we initiate discovery from the
3572 lp->lp_disc_src_nid = LNET_ANY_NID;
3573 lp->lp_disc_dst_nid = LNET_ANY_NID;
3578 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3580 spin_lock(&lp->lp_lock);
3584 LNetMDUnlink(lp->lp_push_mdh);
3585 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3587 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3590 * The errors that get us here are considered hard errors and
3591 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3592 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3593 * because the unlink event handler will have set it if we
3594 * called LNetMDUnlink() above.
3596 spin_lock(&lp->lp_lock);
3597 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3602 * Wait for work to be queued or some other change that must be
3603 * attended to. Returns non-zero if the discovery thread should shut
3606 static int lnet_peer_discovery_wait_for_work(void)
3613 cpt = lnet_net_lock_current();
3615 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3616 TASK_INTERRUPTIBLE);
3617 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3619 if (lnet_push_target_resize_needed() ||
3620 the_lnet.ln_push_target->pb_needs_post)
3622 if (!list_empty(&the_lnet.ln_dc_request))
3624 if (!list_empty(&the_lnet.ln_msg_resend))
3626 lnet_net_unlock(cpt);
3629 * wakeup max every second to check if there are peers that
3630 * have been stuck on the working queue for greater than
3633 schedule_timeout(cfs_time_seconds(1));
3634 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3635 cpt = lnet_net_lock_current();
3637 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3639 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3642 lnet_net_unlock(cpt);
3644 CDEBUG(D_NET, "woken: %d\n", rc);
3650 * Messages that were pending on a destroyed peer will be put on a global
3651 * resend list. The message resend list will be checked by
3652 * the discovery thread when it wakes up, and will resend messages. These
3653 * messages can still be sendable in the case the lpni which was the initial
3654 * cause of the message re-queue was transfered to another peer.
3656 * It is possible that LNet could be shutdown while we're iterating
3657 * through the list. lnet_shudown_lndnets() will attempt to access the
3658 * resend list, but will have to wait until the spinlock is released, by
3659 * which time there shouldn't be any more messages on the resend list.
3660 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3661 * for the messages so they can be released. The other case is that
3662 * lnet_shudown_lndnets() can finalize all the messages before this
3663 * function can visit the resend list, in which case this function will be
3666 static void lnet_resend_msgs(void)
3668 struct lnet_msg *msg, *tmp;
3672 spin_lock(&the_lnet.ln_msg_resend_lock);
3673 list_splice(&the_lnet.ln_msg_resend, &resend);
3674 spin_unlock(&the_lnet.ln_msg_resend_lock);
3676 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3677 list_del_init(&msg->msg_list);
3678 rc = lnet_send(&msg->msg_src_nid_param, msg,
3679 &msg->msg_rtr_nid_param);
3681 CNETERR("Error sending %s to %s: %d\n",
3682 lnet_msgtyp2str(msg->msg_type),
3683 libcfs_idstr(&msg->msg_target), rc);
3684 lnet_finalize(msg, rc);
3689 /* The discovery thread. */
3690 static int lnet_peer_discovery(void *arg)
3692 struct lnet_peer *lp;
3695 wait_for_completion(&the_lnet.ln_started);
3697 CDEBUG(D_NET, "started\n");
3700 if (lnet_peer_discovery_wait_for_work())
3703 if (lnet_push_target_resize_needed())
3704 lnet_push_target_resize();
3705 else if (the_lnet.ln_push_target->pb_needs_post)
3706 lnet_push_target_post(the_lnet.ln_push_target,
3707 &the_lnet.ln_push_target_md);
3711 lnet_net_lock(LNET_LOCK_EX);
3712 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3713 lnet_net_unlock(LNET_LOCK_EX);
3718 * Process all incoming discovery work requests. When
3719 * discovery must wait on a peer to change state, it
3720 * is added to the tail of the ln_dc_working queue. A
3721 * timestamp keeps track of when the peer was added,
3722 * so we can time out discovery requests that take too
3725 while (!list_empty(&the_lnet.ln_dc_request)) {
3726 lp = list_first_entry(&the_lnet.ln_dc_request,
3727 struct lnet_peer, lp_dc_list);
3728 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3730 * set the time the peer was put on the dc_working
3731 * queue. It shouldn't remain on the queue
3732 * forever, in case the GET message (for ping)
3733 * doesn't get a REPLY or the PUT message (for
3734 * push) doesn't get an ACK.
3736 lp->lp_last_queued = ktime_get_real_seconds();
3737 lnet_net_unlock(LNET_LOCK_EX);
3739 if (lnet_push_target_resize_needed())
3740 lnet_push_target_resize();
3741 else if (the_lnet.ln_push_target->pb_needs_post)
3742 lnet_push_target_post(the_lnet.ln_push_target,
3743 &the_lnet.ln_push_target_md);
3746 * Select an action depending on the state of
3747 * the peer and whether discovery is disabled.
3748 * The check whether discovery is disabled is
3749 * done after the code that handles processing
3750 * for arrived data, cleanup for failures, and
3751 * forcing a Ping or Push.
3753 spin_lock(&lp->lp_lock);
3754 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3755 libcfs_nidstr(&lp->lp_primary_nid), lp,
3757 if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3758 LNET_PEER_MARK_DELETED))
3759 rc = lnet_peer_deletion(lp);
3760 else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3761 rc = lnet_peer_data_present(lp);
3762 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3763 rc = lnet_peer_ping_failed(lp);
3764 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3765 rc = lnet_peer_push_failed(lp);
3766 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3767 rc = lnet_peer_send_ping(lp);
3768 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3769 rc = lnet_peer_send_push(lp);
3770 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3771 rc = lnet_peer_send_ping(lp);
3772 else if (lnet_peer_needs_push(lp))
3773 rc = lnet_peer_send_push(lp);
3775 rc = lnet_peer_discovered(lp);
3776 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3777 libcfs_nidstr(&lp->lp_primary_nid), lp,
3780 if (rc == LNET_REDISCOVER_PEER) {
3781 spin_unlock(&lp->lp_lock);
3782 lnet_net_lock(LNET_LOCK_EX);
3783 list_move(&lp->lp_dc_list,
3784 &the_lnet.ln_dc_request);
3786 !(lp->lp_state & LNET_PEER_DISCOVERING)) {
3787 spin_unlock(&lp->lp_lock);
3788 lnet_net_lock(LNET_LOCK_EX);
3789 lnet_peer_discovery_complete(lp, rc);
3791 spin_unlock(&lp->lp_lock);
3792 lnet_net_lock(LNET_LOCK_EX);
3795 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3800 lnet_net_unlock(LNET_LOCK_EX);
3803 CDEBUG(D_NET, "stopping\n");
3805 * Clean up before telling lnet_peer_discovery_stop() that
3806 * we're done. Use wake_up() below to somewhat reduce the
3807 * size of the thundering herd if there are multiple threads
3808 * waiting on discovery of a single peer.
3811 /* Queue cleanup 1: stop all pending pings and pushes. */
3812 lnet_net_lock(LNET_LOCK_EX);
3813 while (!list_empty(&the_lnet.ln_dc_working)) {
3814 lp = list_first_entry(&the_lnet.ln_dc_working,
3815 struct lnet_peer, lp_dc_list);
3816 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3817 lnet_net_unlock(LNET_LOCK_EX);
3818 lnet_peer_cancel_discovery(lp);
3819 lnet_net_lock(LNET_LOCK_EX);
3821 lnet_net_unlock(LNET_LOCK_EX);
3823 /* Queue cleanup 2: wait for the expired queue to clear. */
3824 while (!list_empty(&the_lnet.ln_dc_expired))
3825 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3827 /* Queue cleanup 3: clear the request queue. */
3828 lnet_net_lock(LNET_LOCK_EX);
3829 while (!list_empty(&the_lnet.ln_dc_request)) {
3830 lp = list_first_entry(&the_lnet.ln_dc_request,
3831 struct lnet_peer, lp_dc_list);
3832 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
3834 lnet_net_unlock(LNET_LOCK_EX);
3836 lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3837 the_lnet.ln_dc_handler = NULL;
3839 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3840 wake_up(&the_lnet.ln_dc_waitq);
3842 CDEBUG(D_NET, "stopped\n");
3847 /* ln_api_mutex is held on entry. */
3848 int lnet_peer_discovery_start(void)
3850 struct task_struct *task;
3853 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3856 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3857 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3858 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3861 CERROR("Can't start peer discovery thread: %d\n", rc);
3863 the_lnet.ln_dc_handler = NULL;
3865 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3868 CDEBUG(D_NET, "discovery start: %d\n", rc);
3873 /* ln_api_mutex is held on entry. */
3874 void lnet_peer_discovery_stop(void)
3876 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3879 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3880 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3882 /* In the LNetNIInit() path we may be stopping discovery before it
3883 * entered its work loop
3885 if (!completion_done(&the_lnet.ln_started))
3886 complete(&the_lnet.ln_started);
3888 wake_up(&the_lnet.ln_dc_waitq);
3890 mutex_unlock(&the_lnet.ln_api_mutex);
3891 wait_event(the_lnet.ln_dc_waitq,
3892 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3893 mutex_lock(&the_lnet.ln_api_mutex);
3895 LASSERT(list_empty(&the_lnet.ln_dc_request));
3896 LASSERT(list_empty(&the_lnet.ln_dc_working));
3897 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3899 CDEBUG(D_NET, "discovery stopped\n");
3905 lnet_debug_peer(lnet_nid_t nid4)
3907 char *aliveness = "NA";
3908 struct lnet_peer_ni *lp;
3910 struct lnet_nid nid;
3912 lnet_nid4_to_nid(nid4, &nid);
3913 cpt = lnet_nid2cpt(&nid, NULL);
3916 lp = lnet_peerni_by_nid_locked(&nid, NULL, cpt);
3918 lnet_net_unlock(cpt);
3919 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(&nid));
3923 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3924 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3926 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3927 libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
3928 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3929 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3930 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3932 lnet_peer_ni_decref_locked(lp);
3934 lnet_net_unlock(cpt);
3937 /* Gathering information for userspace. */
3939 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3940 char aliveness[LNET_MAX_STR_LEN],
3941 __u32 *cpt_iter, __u32 *refcount,
3942 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3943 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3944 __u32 *peer_tx_qnob)
3946 struct lnet_peer_table *peer_table;
3947 struct lnet_peer_ni *lp;
3952 /* get the number of CPTs */
3953 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3955 /* if the cpt number to be examined is >= the number of cpts in
3956 * the system then indicate that there are no more cpts to examin
3958 if (*cpt_iter >= lncpt)
3961 /* get the current table */
3962 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3963 /* if the ptable is NULL then there are no more cpts to examine */
3964 if (peer_table == NULL)
3967 lnet_net_lock(*cpt_iter);
3969 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3970 struct list_head *peers = &peer_table->pt_hash[j];
3972 list_for_each_entry(lp, peers, lpni_hashlist) {
3973 if (!nid_is_nid4(&lp->lpni_nid))
3975 if (peer_index-- > 0)
3978 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3979 if (lnet_isrouter(lp) ||
3980 lnet_peer_aliveness_enabled(lp))
3981 snprintf(aliveness, LNET_MAX_STR_LEN,
3982 lnet_is_peer_ni_alive(lp) ? "up" : "down");
3984 *nid = lnet_nid_to_nid4(&lp->lpni_nid);
3985 *refcount = kref_read(&lp->lpni_kref);
3986 *ni_peer_tx_credits =
3987 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3988 *peer_tx_credits = lp->lpni_txcredits;
3989 *peer_rtr_credits = lp->lpni_rtrcredits;
3990 *peer_min_rtr_credits = lp->lpni_mintxcredits;
3991 *peer_tx_qnob = lp->lpni_txqnob;
3997 lnet_net_unlock(*cpt_iter);
4001 return found ? 0 : -ENOENT;
4004 /* ln_api_mutex is held, which keeps the peer list stable */
4005 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4007 struct lnet_ioctl_element_stats *lpni_stats;
4008 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4009 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4010 struct lnet_peer_ni_credit_info *lpni_info;
4011 struct lnet_peer_ni *lpni;
4012 struct lnet_peer *lp;
4014 struct lnet_nid nid;
4018 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4019 lp = lnet_find_peer(&nid);
4026 size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4027 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4028 size *= lp->lp_nnis;
4029 if (size > cfg->prcfg_size) {
4030 cfg->prcfg_size = size;
4035 cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4036 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4037 cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4038 cfg->prcfg_count = lp->lp_nnis;
4039 cfg->prcfg_size = size;
4040 cfg->prcfg_state = lp->lp_state;
4042 /* Allocate helper buffers. */
4044 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4047 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4050 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4051 if (!lpni_msg_stats)
4052 goto out_free_stats;
4053 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4055 goto out_free_msg_stats;
4060 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4061 if (!nid_is_nid4(&lpni->lpni_nid))
4063 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4064 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4065 goto out_free_hstats;
4066 bulk += sizeof(nid4);
4068 memset(lpni_info, 0, sizeof(*lpni_info));
4069 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4070 if (lnet_isrouter(lpni) ||
4071 lnet_peer_aliveness_enabled(lpni))
4072 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4073 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4075 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4076 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4077 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4078 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4079 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4080 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4081 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4082 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4083 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4084 goto out_free_hstats;
4085 bulk += sizeof(*lpni_info);
4087 memset(lpni_stats, 0, sizeof(*lpni_stats));
4088 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4089 LNET_STATS_TYPE_SEND);
4090 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4091 LNET_STATS_TYPE_RECV);
4092 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4093 LNET_STATS_TYPE_DROP);
4094 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4095 goto out_free_hstats;
4096 bulk += sizeof(*lpni_stats);
4097 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4098 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4099 goto out_free_hstats;
4100 bulk += sizeof(*lpni_msg_stats);
4101 lpni_hstats->hlpni_network_timeout =
4102 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4103 lpni_hstats->hlpni_remote_dropped =
4104 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4105 lpni_hstats->hlpni_remote_timeout =
4106 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4107 lpni_hstats->hlpni_remote_error =
4108 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4109 lpni_hstats->hlpni_health_value =
4110 atomic_read(&lpni->lpni_healthv);
4111 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4112 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4113 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4114 goto out_free_hstats;
4115 bulk += sizeof(*lpni_hstats);
4120 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4122 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4124 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4126 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4128 lnet_peer_decref_locked(lp);
4133 /* must hold net_lock/0 */
4135 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4136 struct list_head *recovery_queue,
4139 /* the mt could've shutdown and cleaned up the queues */
4140 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4143 if (!list_empty(&lpni->lpni_recovery))
4146 if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4149 if (!lpni->lpni_last_alive) {
4151 "lpni %s(%p) not eligible for recovery last alive %lld\n",
4152 libcfs_nidstr(&lpni->lpni_nid), lpni,
4153 lpni->lpni_last_alive);
4157 if (lnet_recovery_limit &&
4158 now > lpni->lpni_last_alive + lnet_recovery_limit) {
4159 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4160 libcfs_nidstr(&lpni->lpni_nid),
4161 lpni->lpni_last_alive);
4162 /* Reset the ping count so that if this peer NI is added back to
4163 * the recovery queue we will send the first ping right away.
4165 lpni->lpni_ping_count = 0;
4169 /* This peer NI is going on the recovery queue, so take a ref on it */
4170 lnet_peer_ni_addref_locked(lpni);
4172 lnet_peer_ni_set_next_ping(lpni, now);
4174 CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4175 libcfs_nidstr(&lpni->lpni_nid),
4176 lpni->lpni_ping_count,
4177 lpni->lpni_next_ping,
4178 lpni->lpni_last_alive,
4179 atomic_read(&lpni->lpni_healthv));
4181 list_add_tail(&lpni->lpni_recovery, recovery_queue);
4184 /* Call with the ln_api_mutex held */
4186 lnet_peer_ni_set_healthv(lnet_nid_t nid4, int value, bool all)
4188 struct lnet_peer_table *ptable;
4189 struct lnet_peer *lp;
4190 struct lnet_peer_net *lpn;
4191 struct lnet_peer_ni *lpni;
4192 struct lnet_nid nid;
4197 if (the_lnet.ln_state != LNET_STATE_RUNNING)
4200 lnet_nid4_to_nid(nid4, &nid);
4201 now = ktime_get_seconds();
4204 lnet_net_lock(LNET_LOCK_EX);
4205 lpni = lnet_peer_ni_find_locked(&nid);
4207 lnet_net_unlock(LNET_LOCK_EX);
4210 lnet_set_lpni_healthv_locked(lpni, value);
4211 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4212 &the_lnet.ln_mt_peerNIRecovq, now);
4213 lnet_peer_ni_decref_locked(lpni);
4214 lnet_net_unlock(LNET_LOCK_EX);
4218 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4221 * Walk all the peers and reset the health value for each one to the
4224 lnet_net_lock(LNET_LOCK_EX);
4225 for (cpt = 0; cpt < lncpt; cpt++) {
4226 ptable = the_lnet.ln_peer_tables[cpt];
4227 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4228 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4229 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4231 lnet_set_lpni_healthv_locked(lpni,
4233 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4234 &the_lnet.ln_mt_peerNIRecovq, now);
4239 lnet_net_unlock(LNET_LOCK_EX);