4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
34 #define DEBUG_SUBSYSTEM S_LNET
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
40 #include <linux/uaccess.h>
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
158 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
164 INIT_LIST_HEAD(&lpni->lpni_txq);
165 INIT_LIST_HEAD(&lpni->lpni_hashlist);
166 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167 INIT_LIST_HEAD(&lpni->lpni_recovery);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171 kref_init(&lpni->lpni_kref);
172 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
174 spin_lock_init(&lpni->lpni_lock);
176 if (lnet_peers_start_down())
177 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
179 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181 lpni->lpni_nid = nid;
182 lpni->lpni_cpt = cpt;
183 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
185 net = lnet_get_net_locked(LNET_NIDNET(nid));
186 lpni->lpni_net = net;
188 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
194 * This peer_ni is not on a local network, so we
195 * cannot add the credits here. In case the net is
196 * added later, add the peer_ni to the remote peer ni
197 * list so it can be easily found and revisited.
199 /* FIXME: per-net implementation instead? */
200 lnet_peer_ni_addref_locked(lpni);
201 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202 &the_lnet.ln_remote_peer_ni_list);
205 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
213 struct lnet_peer_net *lpn;
215 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
219 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221 lpn->lpn_net_id = net_id;
222 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
224 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
232 struct lnet_peer *lp;
234 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
236 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237 LASSERT(list_empty(&lpn->lpn_peer_nis));
238 LASSERT(list_empty(&lpn->lpn_peer_nets));
240 lpn->lpn_peer = NULL;
241 LIBCFS_FREE(lpn, sizeof(*lpn));
243 lnet_peer_decref_locked(lp);
246 static struct lnet_peer *
247 lnet_peer_alloc(lnet_nid_t nid)
249 struct lnet_peer *lp;
251 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
255 INIT_LIST_HEAD(&lp->lp_rtrq);
256 INIT_LIST_HEAD(&lp->lp_routes);
257 INIT_LIST_HEAD(&lp->lp_peer_list);
258 INIT_LIST_HEAD(&lp->lp_peer_nets);
259 INIT_LIST_HEAD(&lp->lp_dc_list);
260 INIT_LIST_HEAD(&lp->lp_dc_pendq);
261 INIT_LIST_HEAD(&lp->lp_rtr_list);
262 init_waitqueue_head(&lp->lp_dc_waitq);
263 spin_lock_init(&lp->lp_lock);
264 lp->lp_primary_nid = nid;
265 lp->lp_disc_src_nid = LNET_NID_ANY;
266 lp->lp_disc_dst_nid = LNET_NID_ANY;
267 if (lnet_peers_start_down())
268 lp->lp_alive = false;
273 * all peers created on a router should have health on
274 * if it's not already on.
276 if (the_lnet.ln_routing && !lnet_health_sensitivity)
277 lp->lp_health_sensitivity = 1;
280 * Turn off discovery for loopback peer. If you're creating a peer
281 * for the loopback interface then that was initiated when we
282 * attempted to send a message over the loopback. There is no need
283 * to ever use a different interface when sending messages to
286 if (nid == LNET_NID_LO_0)
287 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
290 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
298 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
300 LASSERT(atomic_read(&lp->lp_refcount) == 0);
301 LASSERT(lp->lp_rtr_refcount == 0);
302 LASSERT(list_empty(&lp->lp_peer_nets));
303 LASSERT(list_empty(&lp->lp_peer_list));
304 LASSERT(list_empty(&lp->lp_dc_list));
307 lnet_ping_buffer_decref(lp->lp_data);
310 * if there are messages still on the pending queue, then make
311 * sure to queue them on the ln_msg_resend list so they can be
312 * resent at a later point if the discovery thread is still
314 * If the discovery thread has stopped, then the wakeup will be a
315 * no-op, and it is expected the lnet_shutdown_lndnets() will
316 * eventually be called, which will traverse this list and
317 * finalize the messages on the list.
318 * We can not resend them now because we're holding the cpt lock.
319 * Releasing the lock can cause an inconsistent state
321 spin_lock(&the_lnet.ln_msg_resend_lock);
322 spin_lock(&lp->lp_lock);
323 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324 spin_unlock(&lp->lp_lock);
325 spin_unlock(&the_lnet.ln_msg_resend_lock);
326 wake_up(&the_lnet.ln_dc_waitq);
328 LIBCFS_FREE(lp, sizeof(*lp));
332 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333 * that peer_net, detach the peer_net from the peer.
335 * Call with lnet_net_lock/EX held
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
340 struct lnet_peer_table *ptable;
341 struct lnet_peer_net *lpn;
342 struct lnet_peer *lp;
345 * Belts and suspenders: gracefully handle teardown of a
346 * partially connected peer_ni.
348 lpn = lpni->lpni_peer_net;
350 list_del_init(&lpni->lpni_peer_nis);
352 * If there are no lpni's left, we detach lpn from
353 * lp_peer_nets, so it cannot be found anymore.
355 if (list_empty(&lpn->lpn_peer_nis))
356 list_del_init(&lpn->lpn_peer_nets);
358 /* Update peer NID count. */
363 * If there are no more peer nets, make the peer unfindable
364 * via the peer_tables.
366 * Otherwise, if the peer is DISCOVERED, tell discovery to
367 * take another look at it. This is a no-op if discovery for
368 * this peer did the detaching.
370 if (list_empty(&lp->lp_peer_nets)) {
371 list_del_init(&lp->lp_peer_list);
372 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
374 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375 /* Discovery isn't running, nothing to do here. */
376 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377 lnet_peer_queue_for_discovery(lp);
378 wake_up(&the_lnet.ln_dc_waitq);
380 CDEBUG(D_NET, "peer %s NID %s\n",
381 libcfs_nid2str(lp->lp_primary_nid),
382 libcfs_nid2str(lpni->lpni_nid));
385 /* called with lnet_net_lock LNET_LOCK_EX held */
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
389 struct lnet_peer_table *ptable = NULL;
391 /* don't remove a peer_ni if it's also a gateway */
392 if (lnet_isrouter(lpni) && !force) {
393 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394 libcfs_nid2str(lpni->lpni_nid));
398 lnet_peer_remove_from_remote_list(lpni);
400 /* remove peer ni from the hash list. */
401 list_del_init(&lpni->lpni_hashlist);
404 * indicate the peer is being deleted so the monitor thread can
405 * remove it from the recovery queue.
407 spin_lock(&lpni->lpni_lock);
408 lpni->lpni_state |= LNET_PEER_NI_DELETING;
409 spin_unlock(&lpni->lpni_lock);
411 /* decrement the ref count on the peer table */
412 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
415 * The peer_ni can no longer be found with a lookup. But there
416 * can be current users, so keep track of it on the zombie
417 * list until the reference count has gone to zero.
419 * The last reference may be lost in a place where the
420 * lnet_net_lock locks only a single cpt, and that cpt may not
421 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
424 spin_lock(&ptable->pt_zombie_lock);
425 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426 ptable->pt_zombies++;
427 spin_unlock(&ptable->pt_zombie_lock);
429 /* no need to keep this peer_ni on the hierarchy anymore */
430 lnet_peer_detach_peer_ni_locked(lpni);
432 /* remove hashlist reference on peer_ni */
433 lnet_peer_ni_decref_locked(lpni);
438 void lnet_peer_uninit(void)
440 struct lnet_peer_ni *lpni, *tmp;
442 lnet_net_lock(LNET_LOCK_EX);
444 /* remove all peer_nis from the remote peer and the hash list */
445 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446 lpni_on_remote_peer_ni_list)
447 lnet_peer_ni_del_locked(lpni, false);
449 lnet_peer_tables_destroy();
451 lnet_net_unlock(LNET_LOCK_EX);
455 lnet_peer_del_locked(struct lnet_peer *peer)
457 struct lnet_peer_ni *lpni = NULL, *lpni2;
460 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
462 spin_lock(&peer->lp_lock);
463 peer->lp_state |= LNET_PEER_MARK_DELETED;
464 spin_unlock(&peer->lp_lock);
466 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467 while (lpni != NULL) {
468 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469 rc = lnet_peer_ni_del_locked(lpni, false);
479 * Discovering this peer is taking too long. Cancel any Ping or Push
480 * that discovery is waiting on by unlinking the relevant MDs. The
481 * lnet_discovery_event_handler() will proceed from here and complete
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
486 struct lnet_handle_md ping_mdh;
487 struct lnet_handle_md push_mdh;
489 LNetInvalidateMDHandle(&ping_mdh);
490 LNetInvalidateMDHandle(&push_mdh);
492 spin_lock(&lp->lp_lock);
493 if (lp->lp_state & LNET_PEER_PING_SENT) {
494 ping_mdh = lp->lp_ping_mdh;
495 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
497 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498 push_mdh = lp->lp_push_mdh;
499 LNetInvalidateMDHandle(&lp->lp_push_mdh);
501 spin_unlock(&lp->lp_lock);
503 if (!LNetMDHandleIsInvalid(ping_mdh))
504 LNetMDUnlink(ping_mdh);
505 if (!LNetMDHandleIsInvalid(push_mdh))
506 LNetMDUnlink(push_mdh);
510 lnet_peer_del(struct lnet_peer *peer)
512 lnet_peer_cancel_discovery(peer);
513 lnet_net_lock(LNET_LOCK_EX);
514 lnet_peer_del_locked(peer);
515 lnet_net_unlock(LNET_LOCK_EX);
521 * Delete a NID from a peer. Call with ln_api_mutex held.
524 * -EPERM: Non-DLC deletion from DLC-configured peer.
525 * -ENOENT: No lnet_peer_ni corresponding to the nid.
526 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
527 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
530 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
532 struct lnet_peer_ni *lpni;
533 lnet_nid_t primary_nid = lp->lp_primary_nid;
535 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
537 if (!(flags & LNET_PEER_CONFIGURED)) {
538 if (lp->lp_state & LNET_PEER_CONFIGURED) {
544 /* If we're asked to lock down the primary NID we shouldn't be
547 if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
548 primary_nid == nid) {
553 lpni = lnet_find_peer_ni_locked(nid);
558 lnet_peer_ni_decref_locked(lpni);
559 if (lp != lpni->lpni_peer_net->lpn_peer) {
565 * This function only allows deletion of the primary NID if it
568 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
573 lnet_net_lock(LNET_LOCK_EX);
575 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
576 struct lnet_peer_ni *lpni2;
577 /* assign the next peer_ni to be the primary */
578 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
580 lp->lp_primary_nid = lpni2->lpni_nid;
582 rc = lnet_peer_ni_del_locked(lpni, force);
584 lnet_net_unlock(LNET_LOCK_EX);
587 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
588 libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
594 lnet_peer_table_cleanup_locked(struct lnet_net *net,
595 struct lnet_peer_table *ptable)
598 struct lnet_peer_ni *next;
599 struct lnet_peer_ni *lpni;
600 struct lnet_peer *peer;
602 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
603 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
605 if (net != NULL && net != lpni->lpni_net)
608 peer = lpni->lpni_peer_net->lpn_peer;
609 if (peer->lp_primary_nid != lpni->lpni_nid) {
610 lnet_peer_ni_del_locked(lpni, false);
614 * Removing the primary NID implies removing
615 * the entire peer. Advance next beyond any
616 * peer_ni that belongs to the same peer.
618 list_for_each_entry_from(next, &ptable->pt_hash[i],
620 if (next->lpni_peer_net->lpn_peer != peer)
623 lnet_peer_del_locked(peer);
629 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
631 wait_var_event_warning(&ptable->pt_zombies,
632 ptable->pt_zombies == 0,
633 "Waiting for %d zombies on peer table\n",
638 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
639 struct lnet_peer_table *ptable)
641 struct lnet_peer_ni *lp;
642 struct lnet_peer_ni *tmp;
646 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
647 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
649 if (net != lp->lpni_net)
652 if (!lnet_isrouter(lp))
655 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
657 lnet_net_unlock(LNET_LOCK_EX);
658 lnet_del_route(LNET_NET_ANY, gw_nid);
659 lnet_net_lock(LNET_LOCK_EX);
665 lnet_peer_tables_cleanup(struct lnet_net *net)
668 struct lnet_peer_table *ptable;
670 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
671 /* If just deleting the peers for a NI, get rid of any routes these
672 * peers are gateways for. */
673 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
674 lnet_net_lock(LNET_LOCK_EX);
675 lnet_peer_table_del_rtrs_locked(net, ptable);
676 lnet_net_unlock(LNET_LOCK_EX);
679 /* Start the cleanup process */
680 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
681 lnet_net_lock(LNET_LOCK_EX);
682 lnet_peer_table_cleanup_locked(net, ptable);
683 lnet_net_unlock(LNET_LOCK_EX);
686 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
687 lnet_peer_ni_finalize_wait(ptable);
690 static struct lnet_peer_ni *
691 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
693 struct list_head *peers;
694 struct lnet_peer_ni *lp;
696 if (the_lnet.ln_state != LNET_STATE_RUNNING)
699 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
700 list_for_each_entry(lp, peers, lpni_hashlist) {
701 if (lp->lpni_nid == nid) {
702 lnet_peer_ni_addref_locked(lp);
710 struct lnet_peer_ni *
711 lnet_find_peer_ni_locked(lnet_nid_t nid)
713 struct lnet_peer_ni *lpni;
714 struct lnet_peer_table *ptable;
717 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
719 ptable = the_lnet.ln_peer_tables[cpt];
720 lpni = lnet_get_peer_ni_locked(ptable, nid);
725 struct lnet_peer_ni *
726 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
728 struct lnet_peer_net *lpn;
729 struct lnet_peer_ni *lpni;
731 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
735 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
736 if (lpni->lpni_nid == nid)
744 lnet_find_peer(lnet_nid_t nid)
746 struct lnet_peer_ni *lpni;
747 struct lnet_peer *lp = NULL;
750 cpt = lnet_net_lock_current();
751 lpni = lnet_find_peer_ni_locked(nid);
753 lp = lpni->lpni_peer_net->lpn_peer;
754 lnet_peer_addref_locked(lp);
755 lnet_peer_ni_decref_locked(lpni);
757 lnet_net_unlock(cpt);
762 struct lnet_peer_net *
763 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
765 struct lnet_peer_net *net;
768 /* no net id provided return the first net */
769 net = list_first_entry_or_null(&lp->lp_peer_nets,
770 struct lnet_peer_net,
776 /* find the net after the one provided */
777 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
778 if (net->lpn_net_id == prev_lpn_id) {
780 * if we reached the end of the list loop to the
783 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
784 return list_first_entry_or_null(&lp->lp_peer_nets,
785 struct lnet_peer_net,
788 return list_next_entry(net, lpn_peer_nets);
795 struct lnet_peer_ni *
796 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
797 struct lnet_peer_net *peer_net,
798 struct lnet_peer_ni *prev)
800 struct lnet_peer_ni *lpni;
801 struct lnet_peer_net *net = peer_net;
805 if (list_empty(&peer->lp_peer_nets))
808 net = list_entry(peer->lp_peer_nets.next,
809 struct lnet_peer_net,
812 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
818 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
820 * if you reached the end of the peer ni list and the peer
821 * net is specified then there are no more peer nis in that
828 * we reached the end of this net ni list. move to the
831 if (prev->lpni_peer_net->lpn_peer_nets.next ==
833 /* no more nets and no more NIs. */
836 /* get the next net */
837 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
838 struct lnet_peer_net,
840 /* get the ni on it */
841 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
847 /* there are more nis left */
848 lpni = list_entry(prev->lpni_peer_nis.next,
849 struct lnet_peer_ni, lpni_peer_nis);
854 /* Call with the ln_api_mutex held */
855 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
857 struct lnet_process_id id;
858 struct lnet_peer_table *ptable;
859 struct lnet_peer *lp;
868 if (the_lnet.ln_state != LNET_STATE_RUNNING)
871 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
874 * Count the number of peers, and return E2BIG if the buffer
875 * is too small. We'll also return the desired size.
878 for (cpt = 0; cpt < lncpt; cpt++) {
879 ptable = the_lnet.ln_peer_tables[cpt];
880 count += ptable->pt_peers;
882 size = count * sizeof(*ids);
887 * Walk the peer lists and copy out the primary nids.
888 * This is safe because the peer lists are only modified
889 * while the ln_api_mutex is held. So we don't need to
890 * hold the lnet_net_lock as well, and can therefore
891 * directly call copy_to_user().
894 memset(&id, 0, sizeof(id));
895 id.pid = LNET_PID_LUSTRE;
897 for (cpt = 0; cpt < lncpt; cpt++) {
898 ptable = the_lnet.ln_peer_tables[cpt];
899 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
902 id.nid = lp->lp_primary_nid;
903 if (copy_to_user(&ids[i], &id, sizeof(id)))
916 * Start pushes to peers that need to be updated for a configuration
917 * change on this node.
920 lnet_push_update_to_peers(int force)
922 struct lnet_peer_table *ptable;
923 struct lnet_peer *lp;
927 lnet_net_lock(LNET_LOCK_EX);
928 if (lnet_peer_discovery_disabled)
930 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
931 for (cpt = 0; cpt < lncpt; cpt++) {
932 ptable = the_lnet.ln_peer_tables[cpt];
933 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
935 spin_lock(&lp->lp_lock);
936 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
937 lp->lp_state |= LNET_PEER_FORCE_PUSH;
938 spin_unlock(&lp->lp_lock);
940 if (lnet_peer_needs_push(lp))
941 lnet_peer_queue_for_discovery(lp);
944 lnet_net_unlock(LNET_LOCK_EX);
945 wake_up(&the_lnet.ln_dc_waitq);
948 /* find the NID in the preferred gateways for the remote peer
950 * false: list is not empty and NID is not preferred
951 * false: list is empty
952 * true: nid is found in the list
955 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
958 struct lnet_nid_list *ne;
960 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
961 libcfs_nid2str(lpni->lpni_nid),
962 list_empty(&lpni->lpni_rtr_pref_nids));
964 if (list_empty(&lpni->lpni_rtr_pref_nids))
967 /* iterate through all the preferred NIDs and see if any of them
968 * matches the provided gw_nid
970 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
971 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
972 libcfs_nid2str(ne->nl_nid),
973 libcfs_nid2str(gw_nid));
974 if (ne->nl_nid == gw_nid)
982 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
984 struct list_head zombies;
985 struct lnet_nid_list *ne;
986 struct lnet_nid_list *tmp;
987 int cpt = lpni->lpni_cpt;
989 INIT_LIST_HEAD(&zombies);
992 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
993 lnet_net_unlock(cpt);
995 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
996 list_del(&ne->nl_list);
997 LIBCFS_FREE(ne, sizeof(*ne));
1002 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1005 int cpt = lpni->lpni_cpt;
1006 struct lnet_nid_list *ne = NULL;
1008 /* This function is called with api_mutex held. When the api_mutex
1009 * is held the list can not be modified, as it is only modified as
1010 * a result of applying a UDSP and that happens under api_mutex
1013 __must_hold(&the_lnet.ln_api_mutex);
1015 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1016 if (ne->nl_nid == gw_nid)
1020 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1024 ne->nl_nid = gw_nid;
1026 /* Lock the cpt to protect against addition and checks in the
1027 * selection algorithm
1030 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1031 lnet_net_unlock(cpt);
1037 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1038 * this is a preferred point-to-point path. Call with lnet_net_lock in
1042 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1044 struct lnet_nid_list *ne;
1046 if (lpni->lpni_pref_nnids == 0)
1048 if (lpni->lpni_pref_nnids == 1)
1049 return lpni->lpni_pref.nid == nid;
1050 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1051 if (ne->nl_nid == nid)
1058 * Set a single ni as preferred, provided no preferred ni is already
1059 * defined. Only to be used for non-multi-rail peer_ni.
1062 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1066 spin_lock(&lpni->lpni_lock);
1067 if (nid == LNET_NID_ANY) {
1069 } else if (lpni->lpni_pref_nnids > 0) {
1071 } else if (lpni->lpni_pref_nnids == 0) {
1072 lpni->lpni_pref.nid = nid;
1073 lpni->lpni_pref_nnids = 1;
1074 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1076 spin_unlock(&lpni->lpni_lock);
1078 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1079 libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1084 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1085 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1088 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1092 spin_lock(&lpni->lpni_lock);
1093 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1094 lpni->lpni_pref_nnids = 0;
1095 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1096 } else if (lpni->lpni_pref_nnids == 0) {
1101 spin_unlock(&lpni->lpni_lock);
1103 CDEBUG(D_NET, "peer %s: %d\n",
1104 libcfs_nid2str(lpni->lpni_nid), rc);
1109 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1111 lpni->lpni_sel_priority = priority;
1115 * Clear the preferred NIDs from a non-multi-rail peer.
1118 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1120 struct lnet_peer_ni *lpni = NULL;
1122 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1123 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1127 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1129 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1130 struct lnet_nid_list *ne1 = NULL;
1131 struct lnet_nid_list *ne2 = NULL;
1132 lnet_nid_t tmp_nid = LNET_NID_ANY;
1135 if (nid == LNET_NID_ANY) {
1140 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1145 /* A non-MR node may have only one preferred NI per peer_ni */
1146 if (lpni->lpni_pref_nnids > 0 &&
1147 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1152 /* add the new preferred nid to the list of preferred nids */
1153 if (lpni->lpni_pref_nnids != 0) {
1154 size_t alloc_size = sizeof(*ne1);
1156 if (lpni->lpni_pref_nnids == 1) {
1157 tmp_nid = lpni->lpni_pref.nid;
1158 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1161 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1162 if (ne1->nl_nid == nid) {
1168 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1175 /* move the originally stored nid to the list */
1176 if (lpni->lpni_pref_nnids == 1) {
1177 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1178 lpni->lpni_cpt, alloc_size);
1183 INIT_LIST_HEAD(&ne2->nl_list);
1184 ne2->nl_nid = tmp_nid;
1189 lnet_net_lock(LNET_LOCK_EX);
1190 spin_lock(&lpni->lpni_lock);
1191 if (lpni->lpni_pref_nnids == 0) {
1192 lpni->lpni_pref.nid = nid;
1195 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1196 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1198 lpni->lpni_pref_nnids++;
1199 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1200 spin_unlock(&lpni->lpni_lock);
1201 lnet_net_unlock(LNET_LOCK_EX);
1204 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1205 spin_lock(&lpni->lpni_lock);
1206 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1207 spin_unlock(&lpni->lpni_lock);
1209 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1210 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1215 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1217 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1218 struct lnet_nid_list *ne = NULL;
1221 if (lpni->lpni_pref_nnids == 0) {
1226 if (lpni->lpni_pref_nnids == 1) {
1227 if (lpni->lpni_pref.nid != nid) {
1232 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1233 if (ne->nl_nid == nid)
1234 goto remove_nid_entry;
1242 lnet_net_lock(LNET_LOCK_EX);
1243 spin_lock(&lpni->lpni_lock);
1244 if (lpni->lpni_pref_nnids == 1)
1245 lpni->lpni_pref.nid = LNET_NID_ANY;
1247 list_del_init(&ne->nl_list);
1248 if (lpni->lpni_pref_nnids == 2) {
1249 struct lnet_nid_list *ne, *tmp;
1251 list_for_each_entry_safe(ne, tmp,
1252 &lpni->lpni_pref.nids,
1254 lpni->lpni_pref.nid = ne->nl_nid;
1255 list_del_init(&ne->nl_list);
1256 LIBCFS_FREE(ne, sizeof(*ne));
1260 lpni->lpni_pref_nnids--;
1261 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1262 spin_unlock(&lpni->lpni_lock);
1263 lnet_net_unlock(LNET_LOCK_EX);
1266 LIBCFS_FREE(ne, sizeof(*ne));
1268 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1269 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1274 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1276 struct list_head zombies;
1277 struct lnet_nid_list *ne;
1278 struct lnet_nid_list *tmp;
1280 INIT_LIST_HEAD(&zombies);
1282 lnet_net_lock(LNET_LOCK_EX);
1283 if (lpni->lpni_pref_nnids == 1)
1284 lpni->lpni_pref.nid = LNET_NID_ANY;
1285 else if (lpni->lpni_pref_nnids > 1)
1286 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1287 lpni->lpni_pref_nnids = 0;
1288 lnet_net_unlock(LNET_LOCK_EX);
1290 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1291 list_del_init(&ne->nl_list);
1292 LIBCFS_FREE(ne, sizeof(*ne));
1297 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1299 struct lnet_peer_ni *lpni;
1300 lnet_nid_t primary_nid = nid;
1302 lpni = lnet_find_peer_ni_locked(nid);
1304 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1305 lnet_peer_ni_decref_locked(lpni);
1312 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1313 __must_hold(&lp->lp_lock)
1315 if (lnet_peer_discovery_disabled)
1318 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1319 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1330 lnet_is_discovery_disabled(struct lnet_peer *lp)
1334 spin_lock(&lp->lp_lock);
1335 rc = lnet_is_discovery_disabled_locked(lp);
1336 spin_unlock(&lp->lp_lock);
1342 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1344 lnet_nid_t pnid = 0;
1348 if (!nids || num_nids < 1)
1351 rc = LNetNIInit(LNET_PID_ANY);
1355 mutex_lock(&the_lnet.ln_api_mutex);
1357 mr = lnet_peer_discovery_disabled == 0;
1360 for (i = 0; i < num_nids; i++) {
1361 if (nids[i] == LNET_NID_LO_0)
1366 rc = lnet_add_peer_ni(pnid, LNET_NID_ANY, mr, true);
1367 } else if (lnet_peer_discovery_disabled) {
1368 rc = lnet_add_peer_ni(nids[i], LNET_NID_ANY, mr, true);
1370 rc = lnet_add_peer_ni(pnid, nids[i], mr, true);
1373 if (rc && rc != -EEXIST)
1378 mutex_unlock(&the_lnet.ln_api_mutex);
1382 return rc == -EEXIST ? 0 : rc;
1384 EXPORT_SYMBOL(LNetAddPeer);
1387 LNetPrimaryNID(lnet_nid_t nid)
1389 struct lnet_peer *lp;
1390 struct lnet_peer_ni *lpni;
1391 lnet_nid_t primary_nid = nid;
1395 if (nid == LNET_NID_LO_0)
1396 return LNET_NID_LO_0;
1398 cpt = lnet_net_lock_current();
1399 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1404 lp = lpni->lpni_peer_net->lpn_peer;
1406 /* If discovery is disabled locally then we needn't bother running
1407 * discovery here because discovery will not modify whatever
1408 * primary NID is currently set for this peer. If the specified peer is
1409 * down then this discovery can introduce long delays into the mount
1410 * process, so skip it if it isn't necessary.
1412 if (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1413 spin_lock(&lp->lp_lock);
1414 /* force a full discovery cycle */
1415 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH |
1416 LNET_PEER_LOCK_PRIMARY;
1417 spin_unlock(&lp->lp_lock);
1419 /* start discovery in the background. Messages to that
1420 * peer will not go through until the discovery is
1423 rc = lnet_discover_peer_locked(lpni, cpt, false);
1426 /* The lpni (or lp) for this NID may have changed and our ref is
1427 * the only thing keeping the old one around. Release the ref
1428 * and lookup the lpni again
1430 lnet_peer_ni_decref_locked(lpni);
1431 lpni = lnet_find_peer_ni_locked(nid);
1436 lp = lpni->lpni_peer_net->lpn_peer;
1438 primary_nid = lp->lp_primary_nid;
1440 lnet_peer_ni_decref_locked(lpni);
1442 lnet_net_unlock(cpt);
1444 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1445 libcfs_nid2str(primary_nid), rc);
1448 EXPORT_SYMBOL(LNetPrimaryNID);
1450 struct lnet_peer_net *
1451 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1453 struct lnet_peer_net *peer_net;
1454 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1455 if (peer_net->lpn_net_id == net_id)
1462 * Attach a peer_ni to a peer_net and peer. This function assumes
1463 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1464 * may be attached to a different peer, in which case it will be
1465 * properly detached first. The whole operation is done atomically.
1467 * This function consumes the reference on lpni and Always returns 0.
1468 * This is the last function called from functions that do return an
1469 * int, so returning 0 here allows the compiler to do a tail call.
1472 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1473 struct lnet_peer_net *lpn,
1474 struct lnet_peer_ni *lpni,
1477 struct lnet_peer_table *ptable;
1478 bool new_lpn = false;
1481 /* Install the new peer_ni */
1482 lnet_net_lock(LNET_LOCK_EX);
1483 /* Add peer_ni to global peer table hash, if necessary. */
1484 if (list_empty(&lpni->lpni_hashlist)) {
1485 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1487 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1488 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1489 ptable->pt_version++;
1490 lnet_peer_ni_addref_locked(lpni);
1493 /* Detach the peer_ni from an existing peer, if necessary. */
1494 if (lpni->lpni_peer_net) {
1495 LASSERT(lpni->lpni_peer_net != lpn);
1496 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1497 lnet_peer_detach_peer_ni_locked(lpni);
1498 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1499 lpni->lpni_peer_net = NULL;
1502 /* Add peer_ni to peer_net */
1503 lpni->lpni_peer_net = lpn;
1504 if (lp->lp_primary_nid == lpni->lpni_nid)
1505 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1507 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1508 lnet_update_peer_net_healthv(lpni);
1509 lnet_peer_net_addref_locked(lpn);
1511 /* Add peer_net to peer */
1512 if (!lpn->lpn_peer) {
1515 if (lp->lp_primary_nid == lpni->lpni_nid)
1516 list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1518 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1519 lnet_peer_addref_locked(lp);
1522 /* Add peer to global peer list, if necessary */
1523 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1524 if (list_empty(&lp->lp_peer_list)) {
1525 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1530 /* Update peer state */
1531 spin_lock(&lp->lp_lock);
1532 if (flags & LNET_PEER_CONFIGURED) {
1533 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1534 lp->lp_state |= LNET_PEER_CONFIGURED;
1536 if (flags & LNET_PEER_MULTI_RAIL) {
1537 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1538 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1539 lnet_peer_clr_non_mr_pref_nids(lp);
1542 if (flags & LNET_PEER_LOCK_PRIMARY)
1543 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1544 spin_unlock(&lp->lp_lock);
1550 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1552 CERROR("Failed to apply UDSPs on lpn %s\n",
1553 libcfs_net2str(lpn->lpn_net_id));
1555 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1557 CERROR("Failed to apply UDSPs on lpni %s\n",
1558 libcfs_nid2str(lpni->lpni_nid));
1560 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1561 libcfs_nid2str(lp->lp_primary_nid),
1562 libcfs_nid2str(lpni->lpni_nid), flags);
1563 lnet_peer_ni_decref_locked(lpni);
1564 lnet_net_unlock(LNET_LOCK_EX);
1570 * Create a new peer, with nid as its primary nid.
1572 * Call with the lnet_api_mutex held.
1575 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1577 struct lnet_peer *lp;
1578 struct lnet_peer_net *lpn;
1579 struct lnet_peer_ni *lpni;
1582 LASSERT(nid != LNET_NID_ANY);
1585 * No need for the lnet_net_lock here, because the
1586 * lnet_api_mutex is held.
1588 lpni = lnet_find_peer_ni_locked(nid);
1590 /* A peer with this NID already exists. */
1591 lp = lpni->lpni_peer_net->lpn_peer;
1592 lnet_peer_ni_decref_locked(lpni);
1594 * This is an error if the peer was configured and the
1595 * primary NID differs or an attempt is made to change
1596 * the Multi-Rail flag. Otherwise the assumption is
1597 * that an existing peer is being modified.
1599 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1600 if (lp->lp_primary_nid != nid)
1602 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1605 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1606 if (lp->lp_primary_nid == nid) {
1611 /* Delete and recreate as a configured peer. */
1615 /* Create peer, peer_net, and peer_ni. */
1617 lp = lnet_peer_alloc(nid);
1620 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1623 lpni = lnet_peer_ni_alloc(nid);
1627 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1630 LIBCFS_FREE(lpn, sizeof(*lpn));
1632 LIBCFS_FREE(lp, sizeof(*lp));
1634 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1635 libcfs_nid2str(nid), flags, rc);
1640 * Add a NID to a peer. Call with ln_api_mutex held.
1643 * -EPERM: Non-DLC addition to a DLC-configured peer.
1644 * -EEXIST: The NID was configured by DLC for a different peer.
1645 * -ENOMEM: Out of memory.
1646 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1647 * non-multi-rail peer.
1650 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1652 struct lnet_peer_net *lpn;
1653 struct lnet_peer_ni *lpni;
1657 LASSERT(nid != LNET_NID_ANY);
1659 /* A configured peer can only be updated through configuration. */
1660 if (!(flags & LNET_PEER_CONFIGURED)) {
1661 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1668 * The MULTI_RAIL flag can be set but not cleared, because
1669 * that would leave the peer struct in an invalid state.
1671 if (flags & LNET_PEER_MULTI_RAIL) {
1672 spin_lock(&lp->lp_lock);
1673 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1674 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1675 lnet_peer_clr_non_mr_pref_nids(lp);
1677 spin_unlock(&lp->lp_lock);
1678 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1683 lpni = lnet_find_peer_ni_locked(nid);
1686 * A peer_ni already exists. This is only a problem if
1687 * it is not connected to this peer and was configured
1690 if (lpni->lpni_peer_net->lpn_peer == lp)
1692 if (lnet_peer_ni_is_configured(lpni)) {
1696 /* If this is the primary NID, destroy the peer. */
1697 if (lnet_peer_ni_is_primary(lpni)) {
1698 struct lnet_peer *lp2 =
1699 lpni->lpni_peer_net->lpn_peer;
1700 int rtr_refcount = lp2->lp_rtr_refcount;
1702 /* If the new peer that this NID belongs to is
1703 * a primary NID for another peer which we're
1704 * suppose to preserve the Primary for then we
1705 * don't want to mess with it. But the
1706 * configuration is wrong at this point, so we
1707 * should flag both of these peers as in a bad
1710 if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY) {
1711 spin_lock(&lp->lp_lock);
1712 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1713 spin_unlock(&lp->lp_lock);
1714 spin_lock(&lp2->lp_lock);
1715 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1716 spin_unlock(&lp2->lp_lock);
1720 * if we're trying to delete a router it means
1721 * we're moving this peer NI to a new peer so must
1722 * transfer router properties to the new peer
1724 if (rtr_refcount > 0) {
1725 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1726 lnet_rtr_transfer_to_peer(lp2, lp);
1729 lnet_peer_ni_decref_locked(lpni);
1730 lpni = lnet_peer_ni_alloc(nid);
1737 lpni = lnet_peer_ni_alloc(nid);
1745 * Get the peer_net. Check that we're not adding a second
1746 * peer_ni on a peer_net of a non-multi-rail peer.
1748 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1750 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1755 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1760 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1763 lnet_peer_ni_decref_locked(lpni);
1765 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1766 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1772 * Update the primary NID of a peer, if possible.
1774 * Call with the lnet_api_mutex held.
1777 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1779 lnet_nid_t old = lp->lp_primary_nid;
1782 if (lp->lp_primary_nid == nid)
1785 if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1786 lp->lp_primary_nid = nid;
1788 rc = lnet_peer_add_nid(lp, nid, flags);
1790 lp->lp_primary_nid = old;
1794 /* if this is a configured peer or the primary for that peer has
1795 * been locked, then we don't want to flag this scenario as
1798 if (lp->lp_state & LNET_PEER_CONFIGURED ||
1799 lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1802 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1803 libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1809 * lpni creation initiated due to traffic either sending or receiving.
1812 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1814 struct lnet_peer *lp;
1815 struct lnet_peer_net *lpn;
1816 struct lnet_peer_ni *lpni;
1820 if (nid == LNET_NID_ANY) {
1825 /* lnet_net_lock is not needed here because ln_api_lock is held */
1826 lpni = lnet_find_peer_ni_locked(nid);
1829 * We must have raced with another thread. Since we
1830 * know next to nothing about a peer_ni created by
1831 * traffic, we just assume everything is ok and
1834 lnet_peer_ni_decref_locked(lpni);
1838 /* Create peer, peer_net, and peer_ni. */
1840 lp = lnet_peer_alloc(nid);
1843 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1846 lpni = lnet_peer_ni_alloc(nid);
1849 if (pref != LNET_NID_ANY)
1850 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1852 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1855 LIBCFS_FREE(lpn, sizeof(*lpn));
1857 LIBCFS_FREE(lp, sizeof(*lp));
1859 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1864 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1866 * This API handles the following combinations:
1867 * Create a peer with its primary NI if only the prim_nid is provided
1868 * Add a NID to a peer identified by the prim_nid. The peer identified
1869 * by the prim_nid must already exist.
1870 * The peer being created may be non-MR.
1872 * The caller must hold ln_api_mutex. This prevents the peer from
1873 * being created/modified/deleted by a different thread.
1876 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr, bool temp)
1878 struct lnet_peer *lp = NULL;
1879 struct lnet_peer_ni *lpni;
1880 unsigned int flags = 0;
1882 /* The prim_nid must always be specified */
1883 if (prim_nid == LNET_NID_ANY)
1887 flags = LNET_PEER_CONFIGURED;
1890 flags |= LNET_PEER_MULTI_RAIL;
1893 * If nid isn't specified, we must create a new peer with
1894 * prim_nid as its primary nid.
1896 if (nid == LNET_NID_ANY)
1897 return lnet_peer_add(prim_nid, flags);
1899 /* Look up the prim_nid, which must exist. */
1900 lpni = lnet_find_peer_ni_locked(prim_nid);
1903 lnet_peer_ni_decref_locked(lpni);
1904 lp = lpni->lpni_peer_net->lpn_peer;
1906 /* Peer must have been configured. */
1907 if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1908 CDEBUG(D_NET, "peer %s was not configured\n",
1909 libcfs_nid2str(prim_nid));
1913 /* Primary NID must match */
1914 if (lp->lp_primary_nid != prim_nid) {
1915 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1916 libcfs_nid2str(prim_nid),
1917 libcfs_nid2str(lp->lp_primary_nid));
1921 /* Multi-Rail flag must match. */
1922 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1923 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1924 libcfs_nid2str(prim_nid));
1928 return lnet_peer_add_nid(lp, nid, flags);
1932 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1934 * This API handles the following combinations:
1935 * Delete a NI from a peer if both prim_nid and nid are provided.
1936 * Delete a peer if only prim_nid is provided.
1937 * Delete a peer if its primary nid is provided.
1939 * The caller must hold ln_api_mutex. This prevents the peer from
1940 * being modified/deleted by a different thread.
1943 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1945 struct lnet_peer *lp;
1946 struct lnet_peer_ni *lpni;
1949 if (prim_nid == LNET_NID_ANY)
1952 lpni = lnet_find_peer_ni_locked(prim_nid);
1955 lnet_peer_ni_decref_locked(lpni);
1956 lp = lpni->lpni_peer_net->lpn_peer;
1958 if (prim_nid != lp->lp_primary_nid) {
1959 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1960 libcfs_nid2str(prim_nid),
1961 libcfs_nid2str(lp->lp_primary_nid));
1965 lnet_net_lock(LNET_LOCK_EX);
1966 if (lp->lp_rtr_refcount > 0) {
1967 lnet_net_unlock(LNET_LOCK_EX);
1968 CERROR("%s is a router. Can not be deleted\n",
1969 libcfs_nid2str(prim_nid));
1972 lnet_net_unlock(LNET_LOCK_EX);
1974 if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1975 return lnet_peer_del(lp);
1977 flags = LNET_PEER_CONFIGURED;
1978 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1979 flags |= LNET_PEER_MULTI_RAIL;
1981 return lnet_peer_del_nid(lp, nid, flags);
1985 lnet_destroy_peer_ni_locked(struct kref *ref)
1987 struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1989 struct lnet_peer_table *ptable;
1990 struct lnet_peer_net *lpn;
1992 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1994 LASSERT(kref_read(&lpni->lpni_kref) == 0);
1995 LASSERT(list_empty(&lpni->lpni_txq));
1996 LASSERT(lpni->lpni_txqnob == 0);
1997 LASSERT(list_empty(&lpni->lpni_peer_nis));
1998 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2000 lpn = lpni->lpni_peer_net;
2001 lpni->lpni_peer_net = NULL;
2002 lpni->lpni_net = NULL;
2004 if (!list_empty(&lpni->lpni_hashlist)) {
2005 /* remove the peer ni from the zombie list */
2006 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2007 spin_lock(&ptable->pt_zombie_lock);
2008 list_del_init(&lpni->lpni_hashlist);
2009 ptable->pt_zombies--;
2010 spin_unlock(&ptable->pt_zombie_lock);
2013 if (lpni->lpni_pref_nnids > 1) {
2014 struct lnet_nid_list *ne, *tmp;
2016 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2018 list_del_init(&ne->nl_list);
2019 LIBCFS_FREE(ne, sizeof(*ne));
2022 LIBCFS_FREE(lpni, sizeof(*lpni));
2025 lnet_peer_net_decref_locked(lpn);
2028 struct lnet_peer_ni *
2029 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
2031 struct lnet_peer_ni *lpni = NULL;
2034 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2035 return ERR_PTR(-ESHUTDOWN);
2038 * find if a peer_ni already exists.
2039 * If so then just return that.
2041 lpni = lnet_find_peer_ni_locked(nid);
2045 lnet_net_unlock(cpt);
2047 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
2050 goto out_net_relock;
2053 lpni = lnet_find_peer_ni_locked(nid);
2063 * Get a peer_ni for the given nid, create it if necessary. Takes a
2064 * hold on the peer_ni.
2066 struct lnet_peer_ni *
2067 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
2069 struct lnet_peer_ni *lpni = NULL;
2072 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2073 return ERR_PTR(-ESHUTDOWN);
2076 * find if a peer_ni already exists.
2077 * If so then just return that.
2079 lpni = lnet_find_peer_ni_locked(nid);
2085 * use the lnet_api_mutex to serialize the creation of the peer_ni
2086 * and the creation/deletion of the local ni/net. When a local ni is
2087 * created, if there exists a set of peer_nis on that network,
2088 * they need to be traversed and updated. When a local NI is
2089 * deleted, which could result in a network being deleted, then
2090 * all peer nis on that network need to be removed as well.
2092 * Creation through traffic should also be serialized with
2093 * creation through DLC.
2095 lnet_net_unlock(cpt);
2096 mutex_lock(&the_lnet.ln_api_mutex);
2098 * Shutdown is only set under the ln_api_lock, so a single
2099 * check here is sufficent.
2101 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2102 lpni = ERR_PTR(-ESHUTDOWN);
2103 goto out_mutex_unlock;
2106 rc = lnet_peer_ni_traffic_add(nid, pref);
2109 goto out_mutex_unlock;
2112 lpni = lnet_find_peer_ni_locked(nid);
2116 mutex_unlock(&the_lnet.ln_api_mutex);
2119 /* Lock has been dropped, check again for shutdown. */
2120 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2122 lnet_peer_ni_decref_locked(lpni);
2123 lpni = ERR_PTR(-ESHUTDOWN);
2130 lnet_peer_gw_discovery(struct lnet_peer *lp)
2134 spin_lock(&lp->lp_lock);
2135 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2137 spin_unlock(&lp->lp_lock);
2143 lnet_peer_is_uptodate(struct lnet_peer *lp)
2147 spin_lock(&lp->lp_lock);
2148 rc = lnet_peer_is_uptodate_locked(lp);
2149 spin_unlock(&lp->lp_lock);
2154 * Is a peer uptodate from the point of view of discovery?
2156 * If it is currently being processed, obviously not.
2157 * A forced Ping or Push is also handled by the discovery thread.
2159 * Otherwise look at whether the peer needs rediscovering.
2162 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2163 __must_hold(&lp->lp_lock)
2167 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2168 LNET_PEER_FORCE_PING |
2169 LNET_PEER_FORCE_PUSH)) {
2171 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2173 } else if (lnet_peer_needs_push(lp)) {
2175 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2176 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2187 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2189 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2191 /* The discovery thread holds net_lock/EX and lp_lock when it splices
2192 * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2193 * when adding to the list and queuing the peer to ensure that we do not
2194 * strand any messages on the lp_dc_pendq. This scheme ensures the
2195 * message will be resent even if the peer is already being discovered.
2196 * Therefore we needn't check the return value of
2197 * lnet_peer_queue_for_discovery(lp).
2199 lnet_net_lock(LNET_LOCK_EX);
2200 spin_lock(&lp->lp_lock);
2201 list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2202 spin_unlock(&lp->lp_lock);
2203 lnet_peer_queue_for_discovery(lp);
2204 lnet_net_unlock(LNET_LOCK_EX);
2208 * Queue a peer for the attention of the discovery thread. Call with
2209 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2210 * -EALREADY if the peer was already queued.
2212 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2216 spin_lock(&lp->lp_lock);
2217 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2218 lp->lp_state |= LNET_PEER_DISCOVERING;
2219 spin_unlock(&lp->lp_lock);
2220 if (list_empty(&lp->lp_dc_list)) {
2221 lnet_peer_addref_locked(lp);
2222 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2223 wake_up(&the_lnet.ln_dc_waitq);
2229 CDEBUG(D_NET, "Queue peer %s: %d\n",
2230 libcfs_nid2str(lp->lp_primary_nid), rc);
2236 * Discovery of a peer is complete. Wake all waiters on the peer.
2237 * Call with lnet_net_lock/EX held.
2239 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2241 struct lnet_msg *msg, *tmp;
2243 LIST_HEAD(pending_msgs);
2245 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2246 libcfs_nid2str(lp->lp_primary_nid));
2248 list_del_init(&lp->lp_dc_list);
2249 spin_lock(&lp->lp_lock);
2250 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2251 spin_unlock(&lp->lp_lock);
2252 wake_up(&lp->lp_dc_waitq);
2254 if (lp->lp_rtr_refcount > 0)
2255 lnet_router_discovery_complete(lp);
2257 lnet_net_unlock(LNET_LOCK_EX);
2259 /* iterate through all pending messages and send them again */
2260 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2261 list_del_init(&msg->msg_list);
2262 if (lp->lp_dc_error) {
2263 lnet_finalize(msg, lp->lp_dc_error);
2267 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2268 lnet_msgtyp2str(msg->msg_type),
2269 libcfs_id2str(msg->msg_target));
2270 rc = lnet_send(msg->msg_src_nid_param, msg,
2271 msg->msg_rtr_nid_param);
2273 CNETERR("Error sending %s to %s: %d\n",
2274 lnet_msgtyp2str(msg->msg_type),
2275 libcfs_id2str(msg->msg_target), rc);
2276 lnet_finalize(msg, rc);
2279 lnet_net_lock(LNET_LOCK_EX);
2280 lnet_peer_decref_locked(lp);
2284 * Handle inbound push.
2285 * Like any event handler, called with lnet_res_lock/CPT held.
2287 void lnet_peer_push_event(struct lnet_event *ev)
2289 struct lnet_ping_buffer *pbuf;
2290 struct lnet_peer *lp;
2292 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2294 /* lnet_find_peer() adds a refcount */
2295 lp = lnet_find_peer(ev->source.nid);
2297 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2298 libcfs_nid2str(ev->initiator.nid),
2299 libcfs_nid2str(ev->source.nid));
2300 pbuf->pb_needs_post = true;
2304 /* Ensure peer state remains consistent while we modify it. */
2305 spin_lock(&lp->lp_lock);
2308 * If some kind of error happened the contents of the message
2309 * cannot be used. Clear the NIDS_UPTODATE and set the
2310 * FORCE_PING flag to trigger a ping.
2313 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2314 lp->lp_state |= LNET_PEER_FORCE_PING;
2315 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2317 libcfs_nid2str(lp->lp_primary_nid),
2318 libcfs_nid2str(ev->source.nid));
2323 * A push with invalid or corrupted info. Clear the UPTODATE
2324 * flag to trigger a ping.
2326 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2327 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2328 lp->lp_state |= LNET_PEER_FORCE_PING;
2329 CDEBUG(D_NET, "Corrupted Push from %s\n",
2330 libcfs_nid2str(lp->lp_primary_nid));
2335 * Make sure we'll allocate the correct size ping buffer when
2338 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2339 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2342 * A non-Multi-Rail peer is not supposed to be capable of
2345 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2346 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2347 libcfs_nid2str(lp->lp_primary_nid));
2352 * The peer may have discovery disabled at its end. Set
2353 * NO_DISCOVERY as appropriate.
2355 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2356 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2357 libcfs_nid2str(lp->lp_primary_nid));
2359 * Mark the peer for deletion if we already know about it
2360 * and it's going from discovery set to no discovery set
2362 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2363 LNET_PEER_DISCOVERING)) &&
2364 lp->lp_state & LNET_PEER_DISCOVERED) {
2365 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2366 libcfs_nid2str(lp->lp_primary_nid),
2368 lp->lp_state |= LNET_PEER_MARK_DELETION;
2370 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2371 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2372 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2373 libcfs_nid2str(lp->lp_primary_nid));
2374 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2378 * Update the MULTI_RAIL flag based on the push. If the peer
2379 * was configured with DLC then the setting should match what
2381 * NB: We verified above that the MR feature bit is set in pi_features
2383 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2384 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2385 libcfs_nid2str(lp->lp_primary_nid), lp);
2386 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2387 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2388 libcfs_nid2str(lp->lp_primary_nid));
2389 } else if (lnet_peer_discovery_disabled) {
2390 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2391 libcfs_nid2str(lp->lp_primary_nid), lp);
2392 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2393 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2394 libcfs_nid2str(lp->lp_primary_nid), lp);
2396 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2397 libcfs_nid2str(lp->lp_primary_nid), lp);
2398 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2399 lnet_peer_clr_non_mr_pref_nids(lp);
2403 * Check for truncation of the Put message. Clear the
2404 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2405 * and tell discovery to allocate a bigger buffer.
2407 if (ev->mlength < ev->rlength) {
2408 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2409 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2410 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2411 lp->lp_state |= LNET_PEER_FORCE_PING;
2412 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2413 libcfs_nid2str(lp->lp_primary_nid),
2414 pbuf->pb_info.pi_nnis);
2418 /* always assume new data */
2419 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2420 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2423 * If there is data present that hasn't been processed yet,
2424 * we'll replace it if the Put contained newer data and it
2425 * fits. We're racing with a Ping or earlier Push in this
2428 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2429 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2430 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2431 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2432 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2433 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2434 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2435 libcfs_nid2str(lp->lp_primary_nid),
2436 LNET_PING_BUFFER_SEQNO(pbuf),
2437 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2443 * Allocate a buffer to copy the data. On a failure we drop
2444 * the Push and set FORCE_PING to force the discovery
2445 * thread to fix the problem by pinging the peer.
2447 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2449 lp->lp_state |= LNET_PEER_FORCE_PING;
2450 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2451 libcfs_nid2str(lp->lp_primary_nid),
2452 LNET_PING_BUFFER_SEQNO(pbuf));
2457 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2458 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2459 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2460 CDEBUG(D_NET, "Received Push %s %u\n",
2461 libcfs_nid2str(lp->lp_primary_nid),
2462 LNET_PING_BUFFER_SEQNO(pbuf));
2465 /* We've processed this buffer. It can be reposted */
2466 pbuf->pb_needs_post = true;
2469 * Queue the peer for discovery if not done, force it on the request
2470 * queue and wake the discovery thread if the peer was already queued,
2471 * because its status changed.
2473 spin_unlock(&lp->lp_lock);
2474 lnet_net_lock(LNET_LOCK_EX);
2475 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2476 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2477 wake_up(&the_lnet.ln_dc_waitq);
2479 /* Drop refcount from lookup */
2480 lnet_peer_decref_locked(lp);
2481 lnet_net_unlock(LNET_LOCK_EX);
2485 * Clear the discovery error state, unless we're already discovering
2486 * this peer, in which case the error is current.
2488 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2490 spin_lock(&lp->lp_lock);
2491 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2492 lp->lp_dc_error = 0;
2493 spin_unlock(&lp->lp_lock);
2497 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2498 * dropped/retaken within this function. An lnet_peer_ni is passed in
2499 * because discovery could tear down an lnet_peer.
2502 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2505 struct lnet_peer *lp;
2510 lnet_net_unlock(cpt);
2511 lnet_net_lock(LNET_LOCK_EX);
2512 lp = lpni->lpni_peer_net->lpn_peer;
2513 lnet_peer_clear_discovery_error(lp);
2516 * We're willing to be interrupted. The lpni can become a
2517 * zombie if we race with DLC, so we must check for that.
2520 /* Keep lp alive when the lnet_net_lock is unlocked */
2521 lnet_peer_addref_locked(lp);
2522 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2523 if (signal_pending(current))
2525 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2528 * Don't repeat discovery if discovery is disabled. This is
2529 * done to ensure we can use discovery as a standard ping as
2530 * well for backwards compatibility with routers which do not
2531 * have discovery or have discovery disabled
2533 if (lnet_is_discovery_disabled(lp) && count > 0)
2535 if (lp->lp_dc_error)
2537 if (lnet_peer_is_uptodate(lp))
2539 lnet_peer_queue_for_discovery(lp);
2541 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2544 * If caller requested a non-blocking operation then
2545 * return immediately. Once discovery is complete any
2546 * pending messages that were stopped due to discovery
2547 * will be transmitted.
2552 lnet_net_unlock(LNET_LOCK_EX);
2554 finish_wait(&lp->lp_dc_waitq, &wait);
2555 lnet_net_lock(LNET_LOCK_EX);
2556 lnet_peer_decref_locked(lp);
2557 /* Peer may have changed */
2558 lp = lpni->lpni_peer_net->lpn_peer;
2560 finish_wait(&lp->lp_dc_waitq, &wait);
2562 lnet_net_unlock(LNET_LOCK_EX);
2564 lnet_peer_decref_locked(lp);
2566 * The peer may have changed, so re-check and rediscover if that turns
2567 * out to have been the case. The reference count on lp ensured that
2568 * even if it was unlinked from lpni the memory could not be recycled.
2569 * Thus the check below is sufficient to determine whether the peer
2570 * changed. If the peer changed, then lp must not be dereferenced.
2572 if (lp != lpni->lpni_peer_net->lpn_peer)
2575 if (signal_pending(current))
2577 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2579 else if (lp->lp_dc_error)
2580 rc = lp->lp_dc_error;
2582 CDEBUG(D_NET, "non-blocking discovery\n");
2583 else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2586 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2587 (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2588 libcfs_nid2str(lpni->lpni_nid), rc,
2589 (!block) ? "pending discovery" : "discovery complete");
2594 /* Handle an incoming ack for a push. */
2596 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2598 struct lnet_ping_buffer *pbuf;
2600 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2601 spin_lock(&lp->lp_lock);
2602 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2603 lp->lp_push_error = ev->status;
2605 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2607 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2608 spin_unlock(&lp->lp_lock);
2610 CDEBUG(D_NET, "peer %s ev->status %d\n",
2611 libcfs_nid2str(lp->lp_primary_nid), ev->status);
2614 /* Handle a Reply message. This is the reply to a Ping message. */
2616 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2618 struct lnet_ping_buffer *pbuf;
2621 spin_lock(&lp->lp_lock);
2623 lp->lp_disc_src_nid = ev->target.nid;
2624 lp->lp_disc_dst_nid = ev->source.nid;
2627 * If some kind of error happened the contents of message
2628 * cannot be used. Set PING_FAILED to trigger a retry.
2631 lp->lp_state |= LNET_PEER_PING_FAILED;
2632 lp->lp_ping_error = ev->status;
2633 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2635 libcfs_nid2str(lp->lp_primary_nid),
2636 libcfs_nid2str(ev->source.nid));
2640 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2641 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2642 lnet_swap_pinginfo(pbuf);
2645 * A reply with invalid or corrupted info. Set PING_FAILED to
2648 rc = lnet_ping_info_validate(&pbuf->pb_info);
2650 lp->lp_state |= LNET_PEER_PING_FAILED;
2651 lp->lp_ping_error = 0;
2652 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2653 libcfs_nid2str(lp->lp_primary_nid), rc);
2658 * The peer may have discovery disabled at its end. Set
2659 * NO_DISCOVERY as appropriate.
2661 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2662 lnet_peer_discovery_disabled) {
2663 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2664 libcfs_nid2str(lp->lp_primary_nid));
2666 /* Detect whether this peer has toggled discovery from on to
2667 * off and whether we can delete and re-create the peer. Peers
2668 * that were manually configured cannot be deleted by discovery.
2669 * We need to delete this peer and re-create it if the peer was
2670 * not configured manually, is currently considered DD capable,
2672 * 1. We've already discovered the peer (the peer has toggled
2673 * the discovery feature from on to off), or
2674 * 2. The peer is considered MR, but it was not user configured
2675 * (this was a "temporary" peer created via the kernel APIs
2676 * that we're discovering for the first time)
2678 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2679 LNET_PEER_NO_DISCOVERY)) &&
2680 (lp->lp_state & (LNET_PEER_DISCOVERED |
2681 LNET_PEER_MULTI_RAIL))) {
2682 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2683 libcfs_nid2str(lp->lp_primary_nid),
2685 lp->lp_state |= LNET_PEER_MARK_DELETION;
2687 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2689 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2690 libcfs_nid2str(lp->lp_primary_nid));
2691 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2695 * Update the MULTI_RAIL flag based on the reply. If the peer
2696 * was configured with DLC then the setting should match what
2699 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2700 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2701 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2702 libcfs_nid2str(lp->lp_primary_nid), lp);
2703 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2704 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2705 libcfs_nid2str(lp->lp_primary_nid));
2706 } else if (lnet_peer_discovery_disabled) {
2708 "peer %s(%p) not MR: DD disabled locally\n",
2709 libcfs_nid2str(lp->lp_primary_nid), lp);
2710 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2712 "peer %s(%p) not MR: DD disabled remotely\n",
2713 libcfs_nid2str(lp->lp_primary_nid), lp);
2715 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2716 libcfs_nid2str(lp->lp_primary_nid), lp);
2717 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2718 lnet_peer_clr_non_mr_pref_nids(lp);
2720 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2721 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2722 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2723 libcfs_nid2str(lp->lp_primary_nid));
2725 CERROR("Multi-Rail state vanished from %s\n",
2726 libcfs_nid2str(lp->lp_primary_nid));
2727 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2732 * Make sure we'll allocate the correct size ping buffer when
2735 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2736 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2739 * Check for truncation of the Reply. Clear PING_SENT and set
2740 * PING_FAILED to trigger a retry.
2742 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2743 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2744 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2745 lp->lp_state |= LNET_PEER_PING_FAILED;
2746 lp->lp_ping_error = 0;
2747 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2748 libcfs_nid2str(lp->lp_primary_nid),
2749 pbuf->pb_info.pi_nnis);
2754 * Check the sequence numbers in the reply. These are only
2755 * available if the reply came from a Multi-Rail peer.
2757 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2758 pbuf->pb_info.pi_nnis > 1 &&
2759 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2760 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2761 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2762 libcfs_nid2str(lp->lp_primary_nid),
2763 LNET_PING_BUFFER_SEQNO(pbuf),
2766 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2769 /* We're happy with the state of the data in the buffer. */
2770 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2771 libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2772 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2773 lnet_ping_buffer_decref(lp->lp_data);
2775 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2776 lnet_ping_buffer_addref(pbuf);
2779 lp->lp_state &= ~LNET_PEER_PING_SENT;
2780 spin_unlock(&lp->lp_lock);
2782 lnet_net_lock(LNET_LOCK_EX);
2784 * If this peer is a gateway, call the routing callback to
2785 * handle the ping reply
2787 if (lp->lp_rtr_refcount > 0)
2788 lnet_router_discovery_ping_reply(lp);
2789 lnet_net_unlock(LNET_LOCK_EX);
2793 * Send event handling. Only matters for error cases, where we clean
2794 * up state on the peer and peer_ni that would otherwise be updated in
2795 * the REPLY event handler for a successful Ping, and the ACK event
2796 * handler for a successful Push.
2799 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2806 spin_lock(&lp->lp_lock);
2807 if (ev->msg_type == LNET_MSG_GET) {
2808 lp->lp_state &= ~LNET_PEER_PING_SENT;
2809 lp->lp_state |= LNET_PEER_PING_FAILED;
2810 lp->lp_ping_error = ev->status;
2811 } else { /* ev->msg_type == LNET_MSG_PUT */
2812 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2813 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2814 lp->lp_push_error = ev->status;
2816 spin_unlock(&lp->lp_lock);
2817 rc = LNET_REDISCOVER_PEER;
2819 CDEBUG(D_NET, "%s Send to %s: %d\n",
2820 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2821 libcfs_nid2str(ev->target.nid), rc);
2826 * Unlink event handling. This event is only seen if a call to
2827 * LNetMDUnlink() caused the event to be unlinked. If this call was
2828 * made after the event was set up in LNetGet() or LNetPut() then we
2829 * assume the Ping or Push timed out.
2832 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2834 spin_lock(&lp->lp_lock);
2835 /* We've passed through LNetGet() */
2836 if (lp->lp_state & LNET_PEER_PING_SENT) {
2837 lp->lp_state &= ~LNET_PEER_PING_SENT;
2838 lp->lp_state |= LNET_PEER_PING_FAILED;
2839 lp->lp_ping_error = -ETIMEDOUT;
2840 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2841 libcfs_nid2str(lp->lp_primary_nid));
2843 /* We've passed through LNetPut() */
2844 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2845 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2846 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2847 lp->lp_push_error = -ETIMEDOUT;
2848 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2849 libcfs_nid2str(lp->lp_primary_nid));
2851 spin_unlock(&lp->lp_lock);
2855 * Event handler for the discovery EQ.
2857 * Called with lnet_res_lock(cpt) held. The cpt is the
2858 * lnet_cpt_of_cookie() of the md handle cookie.
2860 static void lnet_discovery_event_handler(struct lnet_event *event)
2862 struct lnet_peer *lp = event->md_user_ptr;
2863 struct lnet_ping_buffer *pbuf;
2866 /* discovery needs to take another look */
2867 rc = LNET_REDISCOVER_PEER;
2869 CDEBUG(D_NET, "Received event: %d\n", event->type);
2871 switch (event->type) {
2872 case LNET_EVENT_ACK:
2873 lnet_discovery_event_ack(lp, event);
2875 case LNET_EVENT_REPLY:
2876 lnet_discovery_event_reply(lp, event);
2878 case LNET_EVENT_SEND:
2879 /* Only send failure triggers a retry. */
2880 rc = lnet_discovery_event_send(lp, event);
2882 case LNET_EVENT_UNLINK:
2883 /* LNetMDUnlink() was called */
2884 lnet_discovery_event_unlink(lp, event);
2887 /* Invalid events. */
2890 lnet_net_lock(LNET_LOCK_EX);
2891 if (event->unlinked) {
2892 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2893 lnet_ping_buffer_decref(pbuf);
2894 lnet_peer_decref_locked(lp);
2897 /* put peer back at end of request queue, if discovery not already
2899 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2900 lnet_peer_queue_for_discovery(lp)) {
2901 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2902 wake_up(&the_lnet.ln_dc_waitq);
2904 lnet_net_unlock(LNET_LOCK_EX);
2908 * Build a peer from incoming data.
2910 * The NIDs in the incoming data are supposed to be structured as follows:
2913 * - other NIDs in same net
2914 * - NIDs in second net
2915 * - NIDs in third net
2917 * This due to the way the list of NIDs in the data is created.
2919 * Note that this function will mark the peer uptodate unless an
2920 * ENOMEM is encontered. All other errors are due to a conflict
2921 * between the DLC configuration and what discovery sees. We treat DLC
2922 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2923 * peer from becoming stuck in discovery.
2925 static int lnet_peer_merge_data(struct lnet_peer *lp,
2926 struct lnet_ping_buffer *pbuf)
2928 struct lnet_peer_net *lpn;
2929 struct lnet_peer_ni *lpni;
2930 lnet_nid_t *curnis = NULL;
2931 struct lnet_ni_status *addnis = NULL;
2932 lnet_nid_t *delnis = NULL;
2942 flags = LNET_PEER_DISCOVERED;
2943 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2944 flags |= LNET_PEER_MULTI_RAIL;
2947 * Cache the routing feature for the peer; whether it is enabled
2948 * for disabled as reported by the remote peer.
2950 spin_lock(&lp->lp_lock);
2951 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2952 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2954 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2955 spin_unlock(&lp->lp_lock);
2957 nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2958 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2959 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2960 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2961 if (!curnis || !addnis || !delnis) {
2969 /* Construct the list of NIDs present in peer. */
2971 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2972 curnis[ncurnis++] = lpni->lpni_nid;
2975 * Check for NIDs in pbuf not present in curnis[].
2976 * The loop starts at 1 to skip the loopback NID.
2978 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2979 for (j = 0; j < ncurnis; j++)
2980 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2983 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2986 * Check for NIDs in curnis[] not present in pbuf.
2987 * The nested loop starts at 1 to skip the loopback NID.
2989 * But never add the loopback NID to delnis[]: if it is
2990 * present in curnis[] then this peer is for this node.
2992 for (i = 0; i < ncurnis; i++) {
2993 if (curnis[i] == LNET_NID_LO_0)
2995 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2996 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2998 * update the information we cache for the
2999 * peer with the latest information we
3002 lpni = lnet_find_peer_ni_locked(curnis[i]);
3004 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
3005 lnet_peer_ni_decref_locked(lpni);
3010 if (j == pbuf->pb_info.pi_nnis)
3011 delnis[ndelnis++] = curnis[i];
3015 * If we get here and the discovery is disabled then we don't want
3016 * to add or delete any NIs. We just updated the ones we have some
3017 * information on, and call it a day
3020 if (lnet_is_discovery_disabled(lp))
3023 for (i = 0; i < naddnis; i++) {
3024 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
3026 CERROR("Error adding NID %s to peer %s: %d\n",
3027 libcfs_nid2str(addnis[i].ns_nid),
3028 libcfs_nid2str(lp->lp_primary_nid), rc);
3032 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
3034 lpni->lpni_ns_status = addnis[i].ns_status;
3035 lnet_peer_ni_decref_locked(lpni);
3039 for (i = 0; i < ndelnis; i++) {
3041 * for routers it's okay to delete the primary_nid because
3042 * the upper layers don't really rely on it. So if we're
3043 * being told that the router changed its primary_nid
3044 * then it's okay to delete it.
3046 if (lp->lp_rtr_refcount > 0)
3047 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3048 rc = lnet_peer_del_nid(lp, delnis[i], flags);
3050 CERROR("Error deleting NID %s from peer %s: %d\n",
3051 libcfs_nid2str(delnis[i]),
3052 libcfs_nid2str(lp->lp_primary_nid), rc);
3058 /* The peer net for the primary NID should be the first entry in the
3059 * peer's lp_peer_nets list, and the peer NI for the primary NID should
3060 * be the first entry in its peer net's lpn_peer_nis list.
3062 lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
3064 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3065 libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3069 lnet_peer_ni_decref_locked(lpni);
3071 lpn = lpni->lpni_peer_net;
3072 if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3073 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3075 if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3076 list_move(&lpni->lpni_peer_nis,
3077 &lpni->lpni_peer_net->lpn_peer_nis);
3080 * Errors other than -ENOMEM are due to peers having been
3081 * configured with DLC. Ignore these because DLC overrides
3086 CFS_FREE_PTR_ARRAY(curnis, nnis);
3087 CFS_FREE_PTR_ARRAY(addnis, nnis);
3088 CFS_FREE_PTR_ARRAY(delnis, nnis);
3089 lnet_ping_buffer_decref(pbuf);
3090 CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3093 spin_lock(&lp->lp_lock);
3094 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3095 lp->lp_state |= LNET_PEER_FORCE_PING;
3096 spin_unlock(&lp->lp_lock);
3102 * The data in pbuf says lp is its primary peer, but the data was
3103 * received by a different peer. Try to update lp with the data.
3106 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3108 struct lnet_handle_md mdh;
3110 /* Queue lp for discovery, and force it on the request queue. */
3111 lnet_net_lock(LNET_LOCK_EX);
3112 if (lnet_peer_queue_for_discovery(lp))
3113 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3114 lnet_net_unlock(LNET_LOCK_EX);
3116 LNetInvalidateMDHandle(&mdh);
3119 * Decide whether we can move the peer to the DATA_PRESENT state.
3121 * We replace stale data for a multi-rail peer, repair PING_FAILED
3122 * status, and preempt FORCE_PING.
3124 * If after that we have DATA_PRESENT, we merge it into this peer.
3126 spin_lock(&lp->lp_lock);
3127 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3128 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3129 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3130 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3131 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3132 lnet_ping_buffer_decref(pbuf);
3137 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3138 lnet_ping_buffer_decref(lp->lp_data);
3140 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3142 if (lp->lp_state & LNET_PEER_PING_FAILED) {
3143 mdh = lp->lp_ping_mdh;
3144 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3145 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3146 lp->lp_ping_error = 0;
3148 if (lp->lp_state & LNET_PEER_FORCE_PING)
3149 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3150 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3151 spin_unlock(&lp->lp_lock);
3153 if (!LNetMDHandleIsInvalid(mdh))
3157 return lnet_peer_merge_data(lp, pbuf);
3159 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3163 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3167 for (i = 0; i < pinfo->pi_nnis; i++) {
3168 if (pinfo->pi_ni[i].ns_nid == nid)
3175 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3176 * to the discovery queue a reference was taken that will prevent the peer from
3177 * actually being freed by this function. After this function exits the
3178 * discovery thread should call lnet_peer_discovery_complete() which will
3179 * drop that reference as well as wake any waiters that may also be holding a
3182 static int lnet_peer_deletion(struct lnet_peer *lp)
3183 __must_hold(&lp->lp_lock)
3185 struct list_head rlist;
3186 struct lnet_route *route, *tmp;
3187 int sensitivity = lp->lp_health_sensitivity;
3189 INIT_LIST_HEAD(&rlist);
3191 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3192 LNET_PEER_FORCE_PUSH);
3193 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3194 libcfs_nid2str(lp->lp_primary_nid), lp, lp->lp_state);
3196 /* no-op if lnet_peer_del() has already been called on this peer */
3197 if (lp->lp_state & LNET_PEER_MARK_DELETED)
3200 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3203 spin_unlock(&lp->lp_lock);
3205 mutex_lock(&the_lnet.ln_api_mutex);
3207 lnet_net_lock(LNET_LOCK_EX);
3208 /* remove the peer from the discovery work
3209 * queue if it's on there in preparation
3212 if (!list_empty(&lp->lp_dc_list))
3213 list_del_init(&lp->lp_dc_list);
3214 list_for_each_entry_safe(route, tmp,
3217 lnet_move_route(route, NULL, &rlist);
3218 lnet_net_unlock(LNET_LOCK_EX);
3220 /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3223 list_for_each_entry_safe(route, tmp,
3225 /* re-add these routes */
3226 lnet_add_route(route->lr_net,
3231 LIBCFS_FREE(route, sizeof(*route));
3234 mutex_unlock(&the_lnet.ln_api_mutex);
3236 spin_lock(&lp->lp_lock);
3242 * Update a peer using the data received.
3244 static int lnet_peer_data_present(struct lnet_peer *lp)
3245 __must_hold(&lp->lp_lock)
3247 struct lnet_ping_buffer *pbuf;
3248 struct lnet_peer_ni *lpni;
3249 lnet_nid_t nid = LNET_NID_ANY;
3255 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3256 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3257 spin_unlock(&lp->lp_lock);
3260 * Modifications of peer structures are done while holding the
3261 * ln_api_mutex. A global lock is required because we may be
3262 * modifying multiple peer structures, and a mutex greatly
3263 * simplifies memory management.
3265 * The actual changes to the data structures must also protect
3266 * against concurrent lookups, for which the lnet_net_lock in
3267 * LNET_LOCK_EX mode is used.
3269 mutex_lock(&the_lnet.ln_api_mutex);
3270 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3276 * If this peer is not on the peer list then it is being torn
3277 * down, and our reference count may be all that is keeping it
3278 * alive. Don't do any work on it.
3280 if (list_empty(&lp->lp_peer_list))
3283 flags = LNET_PEER_DISCOVERED;
3284 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3285 flags |= LNET_PEER_MULTI_RAIL;
3288 * Check whether the primary NID in the message matches the
3289 * primary NID of the peer. If it does, update the peer, if
3290 * it it does not, check whether there is already a peer with
3291 * that primary NID. If no such peer exists, try to update
3292 * the primary NID of the current peer (allowed if it was
3293 * created due to message traffic) and complete the update.
3294 * If the peer did exist, hand off the data to it.
3296 * The peer for the loopback interface is a special case: this
3297 * is the peer for the local node, and we want to set its
3298 * primary NID to the correct value here. Moreover, this peer
3299 * can show up with only the loopback NID in the ping buffer.
3301 if (pbuf->pb_info.pi_nnis <= 1)
3303 nid = pbuf->pb_info.pi_ni[1].ns_nid;
3304 if (lp->lp_primary_nid == LNET_NID_LO_0) {
3305 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3307 rc = lnet_peer_merge_data(lp, pbuf);
3309 * if the primary nid of the peer is present in the ping info returned
3310 * from the peer, but it's not the local primary peer we have
3311 * cached and discovery is disabled, then we don't want to update
3312 * our local peer info, by adding or removing NIDs, we just want
3313 * to update the status of the nids that we currently have
3314 * recorded in that peer.
3316 } else if (lp->lp_primary_nid == nid ||
3317 (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3318 lnet_is_discovery_disabled(lp))) {
3319 rc = lnet_peer_merge_data(lp, pbuf);
3321 lpni = lnet_find_peer_ni_locked(nid);
3322 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3323 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3325 CERROR("Primary NID error %s versus %s: %d\n",
3326 libcfs_nid2str(lp->lp_primary_nid),
3327 libcfs_nid2str(nid), rc);
3329 rc = lnet_peer_merge_data(lp, pbuf);
3332 lnet_peer_ni_decref_locked(lpni);
3334 struct lnet_peer *new_lp;
3335 new_lp = lpni->lpni_peer_net->lpn_peer;
3337 * if lp has discovery/MR enabled that means new_lp
3338 * should have discovery/MR enabled as well, since
3339 * it's the same peer, which we're about to merge
3341 spin_lock(&lp->lp_lock);
3342 spin_lock(&new_lp->lp_lock);
3343 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3344 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3345 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3346 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3347 /* If we're processing a ping reply then we may be
3348 * about to send a push to the peer that we ping'd.
3349 * Since the ping reply that we're processing was
3350 * received by lp, we need to set the discovery source
3351 * NID for new_lp to the NID stored in lp.
3353 if (lp->lp_disc_src_nid != LNET_NID_ANY) {
3354 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3355 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3357 spin_unlock(&new_lp->lp_lock);
3358 spin_unlock(&lp->lp_lock);
3360 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3361 lnet_consolidate_routes_locked(lp, new_lp);
3362 lnet_peer_ni_decref_locked(lpni);
3366 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3368 mutex_unlock(&the_lnet.ln_api_mutex);
3370 spin_lock(&lp->lp_lock);
3371 /* Tell discovery to re-check the peer immediately. */
3373 rc = LNET_REDISCOVER_PEER;
3378 * A ping failed. Clear the PING_FAILED state and set the