4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
34 #define DEBUG_SUBSYSTEM S_LNET
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
40 #include <linux/uaccess.h>
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid4)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
159 lnet_nid4_to_nid(nid4, &nid);
160 cpt = lnet_nid_cpt_hash(&nid, LNET_CPT_NUMBER);
162 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
166 INIT_LIST_HEAD(&lpni->lpni_txq);
167 INIT_LIST_HEAD(&lpni->lpni_hashlist);
168 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
169 INIT_LIST_HEAD(&lpni->lpni_recovery);
170 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
171 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
172 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
173 kref_init(&lpni->lpni_kref);
174 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
176 spin_lock_init(&lpni->lpni_lock);
178 if (lnet_peers_start_down())
179 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
181 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
182 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
183 lpni->lpni_nid = nid;
184 lpni->lpni_cpt = cpt;
185 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
187 net = lnet_get_net_locked(LNET_NID_NET(&nid));
188 lpni->lpni_net = net;
190 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
191 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
192 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
193 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
196 * This peer_ni is not on a local network, so we
197 * cannot add the credits here. In case the net is
198 * added later, add the peer_ni to the remote peer ni
199 * list so it can be easily found and revisited.
201 /* FIXME: per-net implementation instead? */
202 lnet_peer_ni_addref_locked(lpni);
203 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
204 &the_lnet.ln_remote_peer_ni_list);
207 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
212 static struct lnet_peer_net *
213 lnet_peer_net_alloc(__u32 net_id)
215 struct lnet_peer_net *lpn;
217 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
221 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
222 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
223 lpn->lpn_net_id = net_id;
224 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
226 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
232 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
234 struct lnet_peer *lp;
236 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
238 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
239 LASSERT(list_empty(&lpn->lpn_peer_nis));
240 LASSERT(list_empty(&lpn->lpn_peer_nets));
242 lpn->lpn_peer = NULL;
243 LIBCFS_FREE(lpn, sizeof(*lpn));
245 lnet_peer_decref_locked(lp);
248 static struct lnet_peer *
249 lnet_peer_alloc(lnet_nid_t nid4)
251 struct lnet_peer *lp;
254 lnet_nid4_to_nid(nid4, &nid);
255 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
259 INIT_LIST_HEAD(&lp->lp_rtrq);
260 INIT_LIST_HEAD(&lp->lp_routes);
261 INIT_LIST_HEAD(&lp->lp_peer_list);
262 INIT_LIST_HEAD(&lp->lp_peer_nets);
263 INIT_LIST_HEAD(&lp->lp_dc_list);
264 INIT_LIST_HEAD(&lp->lp_dc_pendq);
265 INIT_LIST_HEAD(&lp->lp_rtr_list);
266 init_waitqueue_head(&lp->lp_dc_waitq);
267 spin_lock_init(&lp->lp_lock);
268 lp->lp_primary_nid = nid;
269 lp->lp_disc_src_nid = LNET_ANY_NID;
270 lp->lp_disc_dst_nid = LNET_ANY_NID;
271 if (lnet_peers_start_down())
272 lp->lp_alive = false;
277 * all peers created on a router should have health on
278 * if it's not already on.
280 if (the_lnet.ln_routing && !lnet_health_sensitivity)
281 lp->lp_health_sensitivity = 1;
284 * Turn off discovery for loopback peer. If you're creating a peer
285 * for the loopback interface then that was initiated when we
286 * attempted to send a message over the loopback. There is no need
287 * to ever use a different interface when sending messages to
290 if (nid_is_lo0(&nid))
291 lp->lp_state = LNET_PEER_NO_DISCOVERY;
292 lp->lp_cpt = lnet_nid_cpt_hash(&nid, LNET_CPT_NUMBER);
294 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
300 lnet_destroy_peer_locked(struct lnet_peer *lp)
302 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
304 LASSERT(atomic_read(&lp->lp_refcount) == 0);
305 LASSERT(lp->lp_rtr_refcount == 0);
306 LASSERT(list_empty(&lp->lp_peer_nets));
307 LASSERT(list_empty(&lp->lp_peer_list));
308 LASSERT(list_empty(&lp->lp_dc_list));
311 lnet_ping_buffer_decref(lp->lp_data);
314 * if there are messages still on the pending queue, then make
315 * sure to queue them on the ln_msg_resend list so they can be
316 * resent at a later point if the discovery thread is still
318 * If the discovery thread has stopped, then the wakeup will be a
319 * no-op, and it is expected the lnet_shutdown_lndnets() will
320 * eventually be called, which will traverse this list and
321 * finalize the messages on the list.
322 * We can not resend them now because we're holding the cpt lock.
323 * Releasing the lock can cause an inconsistent state
325 spin_lock(&the_lnet.ln_msg_resend_lock);
326 spin_lock(&lp->lp_lock);
327 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
328 spin_unlock(&lp->lp_lock);
329 spin_unlock(&the_lnet.ln_msg_resend_lock);
330 wake_up(&the_lnet.ln_dc_waitq);
332 LIBCFS_FREE(lp, sizeof(*lp));
336 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
337 * that peer_net, detach the peer_net from the peer.
339 * Call with lnet_net_lock/EX held
342 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
344 struct lnet_peer_table *ptable;
345 struct lnet_peer_net *lpn;
346 struct lnet_peer *lp;
349 * Belts and suspenders: gracefully handle teardown of a
350 * partially connected peer_ni.
352 lpn = lpni->lpni_peer_net;
354 list_del_init(&lpni->lpni_peer_nis);
356 * If there are no lpni's left, we detach lpn from
357 * lp_peer_nets, so it cannot be found anymore.
359 if (list_empty(&lpn->lpn_peer_nis))
360 list_del_init(&lpn->lpn_peer_nets);
362 /* Update peer NID count. */
367 * If there are no more peer nets, make the peer unfindable
368 * via the peer_tables.
370 * Otherwise, if the peer is DISCOVERED, tell discovery to
371 * take another look at it. This is a no-op if discovery for
372 * this peer did the detaching.
374 if (list_empty(&lp->lp_peer_nets)) {
375 list_del_init(&lp->lp_peer_list);
376 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
378 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
379 /* Discovery isn't running, nothing to do here. */
380 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
381 lnet_peer_queue_for_discovery(lp);
382 wake_up(&the_lnet.ln_dc_waitq);
384 CDEBUG(D_NET, "peer %s NID %s\n",
385 libcfs_nidstr(&lp->lp_primary_nid),
386 libcfs_nidstr(&lpni->lpni_nid));
389 /* called with lnet_net_lock LNET_LOCK_EX held */
391 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
393 struct lnet_peer_table *ptable = NULL;
395 /* don't remove a peer_ni if it's also a gateway */
396 if (lnet_isrouter(lpni) && !force) {
397 CERROR("Peer NI %s is a gateway. Can not delete it\n",
398 libcfs_nidstr(&lpni->lpni_nid));
402 lnet_peer_remove_from_remote_list(lpni);
404 /* remove peer ni from the hash list. */
405 list_del_init(&lpni->lpni_hashlist);
408 * indicate the peer is being deleted so the monitor thread can
409 * remove it from the recovery queue.
411 spin_lock(&lpni->lpni_lock);
412 lpni->lpni_state |= LNET_PEER_NI_DELETING;
413 spin_unlock(&lpni->lpni_lock);
415 /* decrement the ref count on the peer table */
416 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
419 * The peer_ni can no longer be found with a lookup. But there
420 * can be current users, so keep track of it on the zombie
421 * list until the reference count has gone to zero.
423 * The last reference may be lost in a place where the
424 * lnet_net_lock locks only a single cpt, and that cpt may not
425 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
428 spin_lock(&ptable->pt_zombie_lock);
429 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
430 ptable->pt_zombies++;
431 spin_unlock(&ptable->pt_zombie_lock);
433 /* no need to keep this peer_ni on the hierarchy anymore */
434 lnet_peer_detach_peer_ni_locked(lpni);
436 /* remove hashlist reference on peer_ni */
437 lnet_peer_ni_decref_locked(lpni);
442 void lnet_peer_uninit(void)
444 struct lnet_peer_ni *lpni, *tmp;
446 lnet_net_lock(LNET_LOCK_EX);
448 /* remove all peer_nis from the remote peer and the hash list */
449 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
450 lpni_on_remote_peer_ni_list)
451 lnet_peer_ni_del_locked(lpni, false);
453 lnet_peer_tables_destroy();
455 lnet_net_unlock(LNET_LOCK_EX);
459 lnet_peer_del_locked(struct lnet_peer *peer)
461 struct lnet_peer_ni *lpni = NULL, *lpni2;
464 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
466 spin_lock(&peer->lp_lock);
467 peer->lp_state |= LNET_PEER_MARK_DELETED;
468 spin_unlock(&peer->lp_lock);
470 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
471 while (lpni != NULL) {
472 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
473 rc = lnet_peer_ni_del_locked(lpni, false);
483 * Discovering this peer is taking too long. Cancel any Ping or Push
484 * that discovery is waiting on by unlinking the relevant MDs. The
485 * lnet_discovery_event_handler() will proceed from here and complete
488 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
490 struct lnet_handle_md ping_mdh;
491 struct lnet_handle_md push_mdh;
493 LNetInvalidateMDHandle(&ping_mdh);
494 LNetInvalidateMDHandle(&push_mdh);
496 spin_lock(&lp->lp_lock);
497 if (lp->lp_state & LNET_PEER_PING_SENT) {
498 ping_mdh = lp->lp_ping_mdh;
499 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
501 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
502 push_mdh = lp->lp_push_mdh;
503 LNetInvalidateMDHandle(&lp->lp_push_mdh);
505 spin_unlock(&lp->lp_lock);
507 if (!LNetMDHandleIsInvalid(ping_mdh))
508 LNetMDUnlink(ping_mdh);
509 if (!LNetMDHandleIsInvalid(push_mdh))
510 LNetMDUnlink(push_mdh);
514 lnet_peer_del(struct lnet_peer *peer)
516 lnet_peer_cancel_discovery(peer);
517 lnet_net_lock(LNET_LOCK_EX);
518 lnet_peer_del_locked(peer);
519 lnet_net_unlock(LNET_LOCK_EX);
525 * Delete a NID from a peer. Call with ln_api_mutex held.
528 * -EPERM: Non-DLC deletion from DLC-configured peer.
529 * -ENOENT: No lnet_peer_ni corresponding to the nid.
530 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
531 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
534 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
536 struct lnet_peer_ni *lpni;
537 struct lnet_nid primary_nid = lp->lp_primary_nid;
540 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
542 lnet_nid4_to_nid(nid4, &nid);
543 if (!(flags & LNET_PEER_CONFIGURED)) {
544 if (lp->lp_state & LNET_PEER_CONFIGURED) {
550 /* If we're asked to lock down the primary NID we shouldn't be
553 if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
554 nid_same(&primary_nid, &nid)) {
559 lpni = lnet_peer_ni_find_locked(&nid);
564 lnet_peer_ni_decref_locked(lpni);
565 if (lp != lpni->lpni_peer_net->lpn_peer) {
571 * This function only allows deletion of the primary NID if it
574 if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
579 lnet_net_lock(LNET_LOCK_EX);
581 if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
582 struct lnet_peer_ni *lpni2;
583 /* assign the next peer_ni to be the primary */
584 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
586 lp->lp_primary_nid = lpni2->lpni_nid;
588 rc = lnet_peer_ni_del_locked(lpni, force);
590 lnet_net_unlock(LNET_LOCK_EX);
593 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
594 libcfs_nidstr(&primary_nid), libcfs_nidstr(&nid),
601 lnet_peer_table_cleanup_locked(struct lnet_net *net,
602 struct lnet_peer_table *ptable)
605 struct lnet_peer_ni *next;
606 struct lnet_peer_ni *lpni;
607 struct lnet_peer *peer;
609 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
610 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
612 if (net != NULL && net != lpni->lpni_net)
615 peer = lpni->lpni_peer_net->lpn_peer;
616 if (!nid_same(&peer->lp_primary_nid,
618 lnet_peer_ni_del_locked(lpni, false);
622 * Removing the primary NID implies removing
623 * the entire peer. Advance next beyond any
624 * peer_ni that belongs to the same peer.
626 list_for_each_entry_from(next, &ptable->pt_hash[i],
628 if (next->lpni_peer_net->lpn_peer != peer)
631 lnet_peer_del_locked(peer);
637 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
639 wait_var_event_warning(&ptable->pt_zombies,
640 ptable->pt_zombies == 0,
641 "Waiting for %d zombies on peer table\n",
646 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
647 struct lnet_peer_table *ptable)
649 struct lnet_peer_ni *lp;
650 struct lnet_peer_ni *tmp;
654 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
655 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
657 if (net != lp->lpni_net)
660 if (!lnet_isrouter(lp))
663 /* FIXME handle large-addr nid */
664 gw_nid = lnet_nid_to_nid4(
665 &lp->lpni_peer_net->lpn_peer->lp_primary_nid);
667 lnet_net_unlock(LNET_LOCK_EX);
668 lnet_del_route(LNET_NET_ANY, gw_nid);
669 lnet_net_lock(LNET_LOCK_EX);
675 lnet_peer_tables_cleanup(struct lnet_net *net)
678 struct lnet_peer_table *ptable;
680 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
681 /* If just deleting the peers for a NI, get rid of any routes these
682 * peers are gateways for. */
683 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
684 lnet_net_lock(LNET_LOCK_EX);
685 lnet_peer_table_del_rtrs_locked(net, ptable);
686 lnet_net_unlock(LNET_LOCK_EX);
689 /* Start the cleanup process */
690 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
691 lnet_net_lock(LNET_LOCK_EX);
692 lnet_peer_table_cleanup_locked(net, ptable);
693 lnet_net_unlock(LNET_LOCK_EX);
696 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
697 lnet_peer_ni_finalize_wait(ptable);
700 static struct lnet_peer_ni *
701 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
703 struct list_head *peers;
704 struct lnet_peer_ni *lp;
706 if (the_lnet.ln_state != LNET_STATE_RUNNING)
709 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
710 list_for_each_entry(lp, peers, lpni_hashlist) {
711 if (nid_same(&lp->lpni_nid, nid)) {
712 lnet_peer_ni_addref_locked(lp);
720 struct lnet_peer_ni *
721 lnet_find_peer_ni_locked(lnet_nid_t nid4)
723 struct lnet_peer_ni *lpni;
724 struct lnet_peer_table *ptable;
728 lnet_nid4_to_nid(nid4, &nid);
730 cpt = lnet_nid_cpt_hash(&nid, LNET_CPT_NUMBER);
732 ptable = the_lnet.ln_peer_tables[cpt];
733 lpni = lnet_get_peer_ni_locked(ptable, &nid);
738 struct lnet_peer_ni *
739 lnet_peer_ni_find_locked(struct lnet_nid *nid)
741 struct lnet_peer_ni *lpni;
742 struct lnet_peer_table *ptable;
745 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
747 ptable = the_lnet.ln_peer_tables[cpt];
748 lpni = lnet_get_peer_ni_locked(ptable, nid);
753 struct lnet_peer_ni *
754 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
756 struct lnet_peer_net *lpn;
757 struct lnet_peer_ni *lpni;
759 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
763 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
764 if (lnet_nid_to_nid4(&lpni->lpni_nid) == nid)
772 lnet_find_peer(lnet_nid_t nid)
774 struct lnet_peer_ni *lpni;
775 struct lnet_peer *lp = NULL;
778 cpt = lnet_net_lock_current();
779 lpni = lnet_find_peer_ni_locked(nid);
781 lp = lpni->lpni_peer_net->lpn_peer;
782 lnet_peer_addref_locked(lp);
783 lnet_peer_ni_decref_locked(lpni);
785 lnet_net_unlock(cpt);
790 struct lnet_peer_net *
791 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
793 struct lnet_peer_net *net;
796 /* no net id provided return the first net */
797 net = list_first_entry_or_null(&lp->lp_peer_nets,
798 struct lnet_peer_net,
804 /* find the net after the one provided */
805 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
806 if (net->lpn_net_id == prev_lpn_id) {
808 * if we reached the end of the list loop to the
811 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
812 return list_first_entry_or_null(&lp->lp_peer_nets,
813 struct lnet_peer_net,
816 return list_next_entry(net, lpn_peer_nets);
823 struct lnet_peer_ni *
824 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
825 struct lnet_peer_net *peer_net,
826 struct lnet_peer_ni *prev)
828 struct lnet_peer_ni *lpni;
829 struct lnet_peer_net *net = peer_net;
833 if (list_empty(&peer->lp_peer_nets))
836 net = list_entry(peer->lp_peer_nets.next,
837 struct lnet_peer_net,
840 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
846 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
848 * if you reached the end of the peer ni list and the peer
849 * net is specified then there are no more peer nis in that
856 * we reached the end of this net ni list. move to the
859 if (prev->lpni_peer_net->lpn_peer_nets.next ==
861 /* no more nets and no more NIs. */
864 /* get the next net */
865 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
866 struct lnet_peer_net,
868 /* get the ni on it */
869 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
875 /* there are more nis left */
876 lpni = list_entry(prev->lpni_peer_nis.next,
877 struct lnet_peer_ni, lpni_peer_nis);
882 /* Call with the ln_api_mutex held */
883 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
885 struct lnet_process_id id;
886 struct lnet_peer_table *ptable;
887 struct lnet_peer *lp;
896 if (the_lnet.ln_state != LNET_STATE_RUNNING)
899 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
902 * Count the number of peers, and return E2BIG if the buffer
903 * is too small. We'll also return the desired size.
906 for (cpt = 0; cpt < lncpt; cpt++) {
907 ptable = the_lnet.ln_peer_tables[cpt];
908 count += ptable->pt_peers;
910 size = count * sizeof(*ids);
915 * Walk the peer lists and copy out the primary nids.
916 * This is safe because the peer lists are only modified
917 * while the ln_api_mutex is held. So we don't need to
918 * hold the lnet_net_lock as well, and can therefore
919 * directly call copy_to_user().
922 memset(&id, 0, sizeof(id));
923 id.pid = LNET_PID_LUSTRE;
925 for (cpt = 0; cpt < lncpt; cpt++) {
926 ptable = the_lnet.ln_peer_tables[cpt];
927 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
928 if (!nid_is_nid4(&lp->lp_primary_nid))
932 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
933 if (copy_to_user(&ids[i], &id, sizeof(id)))
946 * Start pushes to peers that need to be updated for a configuration
947 * change on this node.
950 lnet_push_update_to_peers(int force)
952 struct lnet_peer_table *ptable;
953 struct lnet_peer *lp;
957 lnet_net_lock(LNET_LOCK_EX);
958 if (lnet_peer_discovery_disabled)
960 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
961 for (cpt = 0; cpt < lncpt; cpt++) {
962 ptable = the_lnet.ln_peer_tables[cpt];
963 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
965 spin_lock(&lp->lp_lock);
966 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
967 lp->lp_state |= LNET_PEER_FORCE_PUSH;
968 spin_unlock(&lp->lp_lock);
970 if (lnet_peer_needs_push(lp))
971 lnet_peer_queue_for_discovery(lp);
974 lnet_net_unlock(LNET_LOCK_EX);
975 wake_up(&the_lnet.ln_dc_waitq);
978 /* find the NID in the preferred gateways for the remote peer
980 * false: list is not empty and NID is not preferred
981 * false: list is empty
982 * true: nid is found in the list
985 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
988 struct lnet_nid_list *ne;
990 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
991 libcfs_nidstr(&lpni->lpni_nid),
992 list_empty(&lpni->lpni_rtr_pref_nids));
994 if (list_empty(&lpni->lpni_rtr_pref_nids))
997 /* iterate through all the preferred NIDs and see if any of them
998 * matches the provided gw_nid
1000 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1001 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1002 libcfs_nid2str(ne->nl_nid),
1003 libcfs_nid2str(gw_nid));
1004 if (ne->nl_nid == gw_nid)
1012 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
1014 struct list_head zombies;
1015 struct lnet_nid_list *ne;
1016 struct lnet_nid_list *tmp;
1017 int cpt = lpni->lpni_cpt;
1019 INIT_LIST_HEAD(&zombies);
1022 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1023 lnet_net_unlock(cpt);
1025 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1026 list_del(&ne->nl_list);
1027 LIBCFS_FREE(ne, sizeof(*ne));
1032 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1035 int cpt = lpni->lpni_cpt;
1036 struct lnet_nid_list *ne = NULL;
1038 /* This function is called with api_mutex held. When the api_mutex
1039 * is held the list can not be modified, as it is only modified as
1040 * a result of applying a UDSP and that happens under api_mutex
1043 __must_hold(&the_lnet.ln_api_mutex);
1045 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1046 if (ne->nl_nid == gw_nid)
1050 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1054 ne->nl_nid = gw_nid;
1056 /* Lock the cpt to protect against addition and checks in the
1057 * selection algorithm
1060 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1061 lnet_net_unlock(cpt);
1067 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1068 * this is a preferred point-to-point path. Call with lnet_net_lock in
1072 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1074 struct lnet_nid_list *ne;
1076 if (lpni->lpni_pref_nnids == 0)
1078 if (lpni->lpni_pref_nnids == 1)
1079 return lpni->lpni_pref.nid == nid;
1080 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1081 if (ne->nl_nid == nid)
1088 * Set a single ni as preferred, provided no preferred ni is already
1089 * defined. Only to be used for non-multi-rail peer_ni.
1092 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1096 spin_lock(&lpni->lpni_lock);
1097 if (nid == LNET_NID_ANY) {
1099 } else if (lpni->lpni_pref_nnids > 0) {
1101 } else if (lpni->lpni_pref_nnids == 0) {
1102 lpni->lpni_pref.nid = nid;
1103 lpni->lpni_pref_nnids = 1;
1104 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1106 spin_unlock(&lpni->lpni_lock);
1108 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1109 libcfs_nidstr(&lpni->lpni_nid), libcfs_nid2str(nid), rc);
1114 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1115 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1118 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1122 spin_lock(&lpni->lpni_lock);
1123 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1124 lpni->lpni_pref_nnids = 0;
1125 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1126 } else if (lpni->lpni_pref_nnids == 0) {
1131 spin_unlock(&lpni->lpni_lock);
1133 CDEBUG(D_NET, "peer %s: %d\n",
1134 libcfs_nidstr(&lpni->lpni_nid), rc);
1139 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1141 lpni->lpni_sel_priority = priority;
1145 * Clear the preferred NIDs from a non-multi-rail peer.
1148 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1150 struct lnet_peer_ni *lpni = NULL;
1152 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1153 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1157 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1159 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1160 struct lnet_nid_list *ne1 = NULL;
1161 struct lnet_nid_list *ne2 = NULL;
1162 lnet_nid_t tmp_nid = LNET_NID_ANY;
1165 if (nid == LNET_NID_ANY) {
1170 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1175 /* A non-MR node may have only one preferred NI per peer_ni */
1176 if (lpni->lpni_pref_nnids > 0 &&
1177 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1182 /* add the new preferred nid to the list of preferred nids */
1183 if (lpni->lpni_pref_nnids != 0) {
1184 size_t alloc_size = sizeof(*ne1);
1186 if (lpni->lpni_pref_nnids == 1) {
1187 tmp_nid = lpni->lpni_pref.nid;
1188 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1191 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1192 if (ne1->nl_nid == nid) {
1198 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1205 /* move the originally stored nid to the list */
1206 if (lpni->lpni_pref_nnids == 1) {
1207 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1208 lpni->lpni_cpt, alloc_size);
1213 INIT_LIST_HEAD(&ne2->nl_list);
1214 ne2->nl_nid = tmp_nid;
1219 lnet_net_lock(LNET_LOCK_EX);
1220 spin_lock(&lpni->lpni_lock);
1221 if (lpni->lpni_pref_nnids == 0) {
1222 lpni->lpni_pref.nid = nid;
1225 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1226 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1228 lpni->lpni_pref_nnids++;
1229 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1230 spin_unlock(&lpni->lpni_lock);
1231 lnet_net_unlock(LNET_LOCK_EX);
1234 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1235 spin_lock(&lpni->lpni_lock);
1236 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1237 spin_unlock(&lpni->lpni_lock);
1239 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1240 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1245 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1247 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1248 struct lnet_nid_list *ne = NULL;
1251 if (lpni->lpni_pref_nnids == 0) {
1256 if (lpni->lpni_pref_nnids == 1) {
1257 if (lpni->lpni_pref.nid != nid) {
1262 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1263 if (ne->nl_nid == nid)
1264 goto remove_nid_entry;
1272 lnet_net_lock(LNET_LOCK_EX);
1273 spin_lock(&lpni->lpni_lock);
1274 if (lpni->lpni_pref_nnids == 1)
1275 lpni->lpni_pref.nid = LNET_NID_ANY;
1277 list_del_init(&ne->nl_list);
1278 if (lpni->lpni_pref_nnids == 2) {
1279 struct lnet_nid_list *ne, *tmp;
1281 list_for_each_entry_safe(ne, tmp,
1282 &lpni->lpni_pref.nids,
1284 lpni->lpni_pref.nid = ne->nl_nid;
1285 list_del_init(&ne->nl_list);
1286 LIBCFS_FREE(ne, sizeof(*ne));
1290 lpni->lpni_pref_nnids--;
1291 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1292 spin_unlock(&lpni->lpni_lock);
1293 lnet_net_unlock(LNET_LOCK_EX);
1296 LIBCFS_FREE(ne, sizeof(*ne));
1298 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1299 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1304 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1306 struct list_head zombies;
1307 struct lnet_nid_list *ne;
1308 struct lnet_nid_list *tmp;
1310 INIT_LIST_HEAD(&zombies);
1312 lnet_net_lock(LNET_LOCK_EX);
1313 if (lpni->lpni_pref_nnids == 1)
1314 lpni->lpni_pref.nid = LNET_NID_ANY;
1315 else if (lpni->lpni_pref_nnids > 1)
1316 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1317 lpni->lpni_pref_nnids = 0;
1318 lnet_net_unlock(LNET_LOCK_EX);
1320 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1321 list_del_init(&ne->nl_list);
1322 LIBCFS_FREE(ne, sizeof(*ne));
1327 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1329 /* FIXME handle large-addr nid */
1330 struct lnet_peer_ni *lpni;
1331 lnet_nid_t primary_nid = nid;
1333 lpni = lnet_find_peer_ni_locked(nid);
1335 primary_nid = lnet_nid_to_nid4(
1336 &lpni->lpni_peer_net->lpn_peer->lp_primary_nid);
1337 lnet_peer_ni_decref_locked(lpni);
1344 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1345 __must_hold(&lp->lp_lock)
1347 if (lnet_peer_discovery_disabled)
1350 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1351 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1362 lnet_is_discovery_disabled(struct lnet_peer *lp)
1366 spin_lock(&lp->lp_lock);
1367 rc = lnet_is_discovery_disabled_locked(lp);
1368 spin_unlock(&lp->lp_lock);
1374 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1376 lnet_nid_t pnid = 0;
1380 if (!nids || num_nids < 1)
1383 rc = LNetNIInit(LNET_PID_ANY);
1387 mutex_lock(&the_lnet.ln_api_mutex);
1389 mr = lnet_peer_discovery_disabled == 0;
1392 for (i = 0; i < num_nids; i++) {
1393 if (nids[i] == LNET_NID_LO_0)
1398 rc = lnet_add_peer_ni(pnid, LNET_NID_ANY, mr, true);
1399 } else if (lnet_peer_discovery_disabled) {
1400 rc = lnet_add_peer_ni(nids[i], LNET_NID_ANY, mr, true);
1402 rc = lnet_add_peer_ni(pnid, nids[i], mr, true);
1405 if (rc && rc != -EEXIST)
1410 mutex_unlock(&the_lnet.ln_api_mutex);
1414 return rc == -EEXIST ? 0 : rc;
1416 EXPORT_SYMBOL(LNetAddPeer);
1418 /* FIXME support large-addr nid */
1420 LNetPrimaryNID(lnet_nid_t nid)
1422 struct lnet_peer *lp;
1423 struct lnet_peer_ni *lpni;
1424 lnet_nid_t primary_nid = nid;
1428 if (nid == LNET_NID_LO_0)
1429 return LNET_NID_LO_0;
1431 cpt = lnet_net_lock_current();
1432 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1437 lp = lpni->lpni_peer_net->lpn_peer;
1439 /* If discovery is disabled locally then we needn't bother running
1440 * discovery here because discovery will not modify whatever
1441 * primary NID is currently set for this peer. If the specified peer is
1442 * down then this discovery can introduce long delays into the mount
1443 * process, so skip it if it isn't necessary.
1445 if (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1446 spin_lock(&lp->lp_lock);
1447 /* force a full discovery cycle */
1448 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH |
1449 LNET_PEER_LOCK_PRIMARY;
1450 spin_unlock(&lp->lp_lock);
1452 /* start discovery in the background. Messages to that
1453 * peer will not go through until the discovery is
1456 rc = lnet_discover_peer_locked(lpni, cpt, false);
1459 /* The lpni (or lp) for this NID may have changed and our ref is
1460 * the only thing keeping the old one around. Release the ref
1461 * and lookup the lpni again
1463 lnet_peer_ni_decref_locked(lpni);
1464 lpni = lnet_find_peer_ni_locked(nid);
1469 lp = lpni->lpni_peer_net->lpn_peer;
1471 primary_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
1473 lnet_peer_ni_decref_locked(lpni);
1475 lnet_net_unlock(cpt);
1477 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1478 libcfs_nid2str(primary_nid), rc);
1481 EXPORT_SYMBOL(LNetPrimaryNID);
1483 struct lnet_peer_net *
1484 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1486 struct lnet_peer_net *peer_net;
1487 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1488 if (peer_net->lpn_net_id == net_id)
1495 * Attach a peer_ni to a peer_net and peer. This function assumes
1496 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1497 * may be attached to a different peer, in which case it will be
1498 * properly detached first. The whole operation is done atomically.
1500 * This function consumes the reference on lpni and Always returns 0.
1501 * This is the last function called from functions that do return an
1502 * int, so returning 0 here allows the compiler to do a tail call.
1505 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1506 struct lnet_peer_net *lpn,
1507 struct lnet_peer_ni *lpni,
1510 struct lnet_peer_table *ptable;
1511 bool new_lpn = false;
1514 /* Install the new peer_ni */
1515 lnet_net_lock(LNET_LOCK_EX);
1516 /* Add peer_ni to global peer table hash, if necessary. */
1517 if (list_empty(&lpni->lpni_hashlist)) {
1518 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1520 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1521 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1522 ptable->pt_version++;
1523 lnet_peer_ni_addref_locked(lpni);
1526 /* Detach the peer_ni from an existing peer, if necessary. */
1527 if (lpni->lpni_peer_net) {
1528 LASSERT(lpni->lpni_peer_net != lpn);
1529 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1530 lnet_peer_detach_peer_ni_locked(lpni);
1531 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1532 lpni->lpni_peer_net = NULL;
1535 /* Add peer_ni to peer_net */
1536 lpni->lpni_peer_net = lpn;
1537 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1538 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1540 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1541 lnet_update_peer_net_healthv(lpni);
1542 lnet_peer_net_addref_locked(lpn);
1544 /* Add peer_net to peer */
1545 if (!lpn->lpn_peer) {
1548 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1549 list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1551 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1552 lnet_peer_addref_locked(lp);
1555 /* Add peer to global peer list, if necessary */
1556 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1557 if (list_empty(&lp->lp_peer_list)) {
1558 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1563 /* Update peer state */
1564 spin_lock(&lp->lp_lock);
1565 if (flags & LNET_PEER_CONFIGURED) {
1566 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1567 lp->lp_state |= LNET_PEER_CONFIGURED;
1569 if (flags & LNET_PEER_MULTI_RAIL) {
1570 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1571 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1572 lnet_peer_clr_non_mr_pref_nids(lp);
1575 if (flags & LNET_PEER_LOCK_PRIMARY)
1576 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1577 spin_unlock(&lp->lp_lock);
1583 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1585 CERROR("Failed to apply UDSPs on lpn %s\n",
1586 libcfs_net2str(lpn->lpn_net_id));
1588 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1590 CERROR("Failed to apply UDSPs on lpni %s\n",
1591 libcfs_nidstr(&lpni->lpni_nid));
1593 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1594 libcfs_nidstr(&lp->lp_primary_nid),
1595 libcfs_nidstr(&lpni->lpni_nid), flags);
1596 lnet_peer_ni_decref_locked(lpni);
1597 lnet_net_unlock(LNET_LOCK_EX);
1603 * Create a new peer, with nid as its primary nid.
1605 * Call with the lnet_api_mutex held.
1608 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1610 struct lnet_peer *lp;
1611 struct lnet_peer_net *lpn;
1612 struct lnet_peer_ni *lpni;
1615 LASSERT(nid != LNET_NID_ANY);
1618 * No need for the lnet_net_lock here, because the
1619 * lnet_api_mutex is held.
1621 lpni = lnet_find_peer_ni_locked(nid);
1623 /* A peer with this NID already exists. */
1624 lp = lpni->lpni_peer_net->lpn_peer;
1625 lnet_peer_ni_decref_locked(lpni);
1627 * This is an error if the peer was configured and the
1628 * primary NID differs or an attempt is made to change
1629 * the Multi-Rail flag. Otherwise the assumption is
1630 * that an existing peer is being modified.
1632 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1633 if (lnet_nid_to_nid4(&lp->lp_primary_nid) != nid)
1635 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1638 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1639 if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid) {
1644 /* Delete and recreate as a configured peer. */
1648 /* Create peer, peer_net, and peer_ni. */
1650 lp = lnet_peer_alloc(nid);
1653 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1656 lpni = lnet_peer_ni_alloc(nid);
1660 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1663 LIBCFS_FREE(lpn, sizeof(*lpn));
1665 LIBCFS_FREE(lp, sizeof(*lp));
1667 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1668 libcfs_nid2str(nid), flags, rc);
1673 * Add a NID to a peer. Call with ln_api_mutex held.
1676 * -EPERM: Non-DLC addition to a DLC-configured peer.
1677 * -EEXIST: The NID was configured by DLC for a different peer.
1678 * -ENOMEM: Out of memory.
1679 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1680 * non-multi-rail peer.
1683 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1685 struct lnet_peer_net *lpn;
1686 struct lnet_peer_ni *lpni;
1690 LASSERT(nid != LNET_NID_ANY);
1692 /* A configured peer can only be updated through configuration. */
1693 if (!(flags & LNET_PEER_CONFIGURED)) {
1694 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1701 * The MULTI_RAIL flag can be set but not cleared, because
1702 * that would leave the peer struct in an invalid state.
1704 if (flags & LNET_PEER_MULTI_RAIL) {
1705 spin_lock(&lp->lp_lock);
1706 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1707 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1708 lnet_peer_clr_non_mr_pref_nids(lp);
1710 spin_unlock(&lp->lp_lock);
1711 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1716 lpni = lnet_find_peer_ni_locked(nid);
1719 * A peer_ni already exists. This is only a problem if
1720 * it is not connected to this peer and was configured
1723 if (lpni->lpni_peer_net->lpn_peer == lp)
1725 if (lnet_peer_ni_is_configured(lpni)) {
1729 /* If this is the primary NID, destroy the peer. */
1730 if (lnet_peer_ni_is_primary(lpni)) {
1731 struct lnet_peer *lp2 =
1732 lpni->lpni_peer_net->lpn_peer;
1733 int rtr_refcount = lp2->lp_rtr_refcount;
1735 /* If the new peer that this NID belongs to is
1736 * a primary NID for another peer which we're
1737 * suppose to preserve the Primary for then we
1738 * don't want to mess with it. But the
1739 * configuration is wrong at this point, so we
1740 * should flag both of these peers as in a bad
1743 if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY) {
1744 spin_lock(&lp->lp_lock);
1745 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1746 spin_unlock(&lp->lp_lock);
1747 spin_lock(&lp2->lp_lock);
1748 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1749 spin_unlock(&lp2->lp_lock);
1753 * if we're trying to delete a router it means
1754 * we're moving this peer NI to a new peer so must
1755 * transfer router properties to the new peer
1757 if (rtr_refcount > 0) {
1758 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1759 lnet_rtr_transfer_to_peer(lp2, lp);
1762 lnet_peer_ni_decref_locked(lpni);
1763 lpni = lnet_peer_ni_alloc(nid);
1770 lpni = lnet_peer_ni_alloc(nid);
1778 * Get the peer_net. Check that we're not adding a second
1779 * peer_ni on a peer_net of a non-multi-rail peer.
1781 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1783 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1788 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1793 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1796 lnet_peer_ni_decref_locked(lpni);
1798 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1799 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nid2str(nid),
1805 * Update the primary NID of a peer, if possible.
1807 * Call with the lnet_api_mutex held.
1810 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid,
1813 struct lnet_nid old = lp->lp_primary_nid;
1816 if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid)
1819 if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1820 lnet_nid4_to_nid(nid, &lp->lp_primary_nid);
1822 rc = lnet_peer_add_nid(lp, nid, flags);
1824 lp->lp_primary_nid = old;
1828 /* if this is a configured peer or the primary for that peer has
1829 * been locked, then we don't want to flag this scenario as
1832 if (lp->lp_state & LNET_PEER_CONFIGURED ||
1833 lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1836 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1837 libcfs_nidstr(&old), libcfs_nid2str(nid), rc);
1843 * lpni creation initiated due to traffic either sending or receiving.
1846 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1848 struct lnet_peer *lp;
1849 struct lnet_peer_net *lpn;
1850 struct lnet_peer_ni *lpni;
1854 if (nid == LNET_NID_ANY) {
1859 /* lnet_net_lock is not needed here because ln_api_lock is held */
1860 lpni = lnet_find_peer_ni_locked(nid);
1863 * We must have raced with another thread. Since we
1864 * know next to nothing about a peer_ni created by
1865 * traffic, we just assume everything is ok and
1868 lnet_peer_ni_decref_locked(lpni);
1872 /* Create peer, peer_net, and peer_ni. */
1874 lp = lnet_peer_alloc(nid);
1877 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1880 lpni = lnet_peer_ni_alloc(nid);
1883 if (pref != LNET_NID_ANY)
1884 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1886 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1889 LIBCFS_FREE(lpn, sizeof(*lpn));
1891 LIBCFS_FREE(lp, sizeof(*lp));
1893 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1898 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1900 * This API handles the following combinations:
1901 * Create a peer with its primary NI if only the prim_nid is provided
1902 * Add a NID to a peer identified by the prim_nid. The peer identified
1903 * by the prim_nid must already exist.
1904 * The peer being created may be non-MR.
1906 * The caller must hold ln_api_mutex. This prevents the peer from
1907 * being created/modified/deleted by a different thread.
1910 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr, bool temp)
1912 struct lnet_peer *lp = NULL;
1913 struct lnet_peer_ni *lpni;
1914 unsigned int flags = 0;
1916 /* The prim_nid must always be specified */
1917 if (prim_nid == LNET_NID_ANY)
1921 flags = LNET_PEER_CONFIGURED;
1924 flags |= LNET_PEER_MULTI_RAIL;
1927 * If nid isn't specified, we must create a new peer with
1928 * prim_nid as its primary nid.
1930 if (nid == LNET_NID_ANY)
1931 return lnet_peer_add(prim_nid, flags);
1933 /* Look up the prim_nid, which must exist. */
1934 lpni = lnet_find_peer_ni_locked(prim_nid);
1937 lnet_peer_ni_decref_locked(lpni);
1938 lp = lpni->lpni_peer_net->lpn_peer;
1940 /* Peer must have been configured. */
1941 if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1942 CDEBUG(D_NET, "peer %s was not configured\n",
1943 libcfs_nid2str(prim_nid));
1947 /* Primary NID must match */
1948 if (lnet_nid_to_nid4(&lp->lp_primary_nid) != prim_nid) {
1949 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1950 libcfs_nid2str(prim_nid),
1951 libcfs_nidstr(&lp->lp_primary_nid));
1955 /* Multi-Rail flag must match. */
1956 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1957 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1958 libcfs_nid2str(prim_nid));
1962 return lnet_peer_add_nid(lp, nid, flags);
1966 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1968 * This API handles the following combinations:
1969 * Delete a NI from a peer if both prim_nid and nid are provided.
1970 * Delete a peer if only prim_nid is provided.
1971 * Delete a peer if its primary nid is provided.
1973 * The caller must hold ln_api_mutex. This prevents the peer from
1974 * being modified/deleted by a different thread.
1977 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1979 struct lnet_peer *lp;
1980 struct lnet_peer_ni *lpni;
1983 if (prim_nid == LNET_NID_ANY)
1986 lpni = lnet_find_peer_ni_locked(prim_nid);
1989 lnet_peer_ni_decref_locked(lpni);
1990 lp = lpni->lpni_peer_net->lpn_peer;
1992 if (prim_nid != lnet_nid_to_nid4(&lp->lp_primary_nid)) {
1993 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1994 libcfs_nid2str(prim_nid),
1995 libcfs_nidstr(&lp->lp_primary_nid));
1999 lnet_net_lock(LNET_LOCK_EX);
2000 if (lp->lp_rtr_refcount > 0) {
2001 lnet_net_unlock(LNET_LOCK_EX);
2002 CERROR("%s is a router. Can not be deleted\n",
2003 libcfs_nid2str(prim_nid));
2006 lnet_net_unlock(LNET_LOCK_EX);
2008 if (nid == LNET_NID_ANY || nid == lnet_nid_to_nid4(&lp->lp_primary_nid))
2009 return lnet_peer_del(lp);
2011 flags = LNET_PEER_CONFIGURED;
2012 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2013 flags |= LNET_PEER_MULTI_RAIL;
2015 return lnet_peer_del_nid(lp, nid, flags);
2019 lnet_destroy_peer_ni_locked(struct kref *ref)
2021 struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2023 struct lnet_peer_table *ptable;
2024 struct lnet_peer_net *lpn;
2026 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2028 LASSERT(kref_read(&lpni->lpni_kref) == 0);
2029 LASSERT(list_empty(&lpni->lpni_txq));
2030 LASSERT(lpni->lpni_txqnob == 0);
2031 LASSERT(list_empty(&lpni->lpni_peer_nis));
2032 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2034 lpn = lpni->lpni_peer_net;
2035 lpni->lpni_peer_net = NULL;
2036 lpni->lpni_net = NULL;
2038 if (!list_empty(&lpni->lpni_hashlist)) {
2039 /* remove the peer ni from the zombie list */
2040 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2041 spin_lock(&ptable->pt_zombie_lock);
2042 list_del_init(&lpni->lpni_hashlist);
2043 ptable->pt_zombies--;
2044 spin_unlock(&ptable->pt_zombie_lock);
2047 if (lpni->lpni_pref_nnids > 1) {
2048 struct lnet_nid_list *ne, *tmp;
2050 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2052 list_del_init(&ne->nl_list);
2053 LIBCFS_FREE(ne, sizeof(*ne));
2056 LIBCFS_FREE(lpni, sizeof(*lpni));
2059 lnet_peer_net_decref_locked(lpn);
2062 struct lnet_peer_ni *
2063 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
2065 struct lnet_peer_ni *lpni = NULL;
2068 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2069 return ERR_PTR(-ESHUTDOWN);
2072 * find if a peer_ni already exists.
2073 * If so then just return that.
2075 lpni = lnet_find_peer_ni_locked(nid);
2079 lnet_net_unlock(cpt);
2081 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
2084 goto out_net_relock;
2087 lpni = lnet_find_peer_ni_locked(nid);
2097 * Get a peer_ni for the given nid, create it if necessary. Takes a
2098 * hold on the peer_ni.
2100 struct lnet_peer_ni *
2101 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
2103 struct lnet_peer_ni *lpni = NULL;
2106 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2107 return ERR_PTR(-ESHUTDOWN);
2110 * find if a peer_ni already exists.
2111 * If so then just return that.
2113 lpni = lnet_find_peer_ni_locked(nid);
2119 * use the lnet_api_mutex to serialize the creation of the peer_ni
2120 * and the creation/deletion of the local ni/net. When a local ni is
2121 * created, if there exists a set of peer_nis on that network,
2122 * they need to be traversed and updated. When a local NI is
2123 * deleted, which could result in a network being deleted, then
2124 * all peer nis on that network need to be removed as well.
2126 * Creation through traffic should also be serialized with
2127 * creation through DLC.
2129 lnet_net_unlock(cpt);
2130 mutex_lock(&the_lnet.ln_api_mutex);
2132 * Shutdown is only set under the ln_api_lock, so a single
2133 * check here is sufficent.
2135 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2136 lpni = ERR_PTR(-ESHUTDOWN);
2137 goto out_mutex_unlock;
2140 rc = lnet_peer_ni_traffic_add(nid, pref);
2143 goto out_mutex_unlock;
2146 lpni = lnet_find_peer_ni_locked(nid);
2150 mutex_unlock(&the_lnet.ln_api_mutex);
2153 /* Lock has been dropped, check again for shutdown. */
2154 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2156 lnet_peer_ni_decref_locked(lpni);
2157 lpni = ERR_PTR(-ESHUTDOWN);
2164 lnet_peer_gw_discovery(struct lnet_peer *lp)
2168 spin_lock(&lp->lp_lock);
2169 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2171 spin_unlock(&lp->lp_lock);
2177 lnet_peer_is_uptodate(struct lnet_peer *lp)
2181 spin_lock(&lp->lp_lock);
2182 rc = lnet_peer_is_uptodate_locked(lp);
2183 spin_unlock(&lp->lp_lock);
2188 * Is a peer uptodate from the point of view of discovery?
2190 * If it is currently being processed, obviously not.
2191 * A forced Ping or Push is also handled by the discovery thread.
2193 * Otherwise look at whether the peer needs rediscovering.
2196 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2197 __must_hold(&lp->lp_lock)
2201 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2202 LNET_PEER_FORCE_PING |
2203 LNET_PEER_FORCE_PUSH)) {
2205 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2207 } else if (lnet_peer_needs_push(lp)) {
2209 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2210 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2221 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2223 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2225 /* The discovery thread holds net_lock/EX and lp_lock when it splices
2226 * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2227 * when adding to the list and queuing the peer to ensure that we do not
2228 * strand any messages on the lp_dc_pendq. This scheme ensures the
2229 * message will be resent even if the peer is already being discovered.
2230 * Therefore we needn't check the return value of
2231 * lnet_peer_queue_for_discovery(lp).
2233 lnet_net_lock(LNET_LOCK_EX);
2234 spin_lock(&lp->lp_lock);
2235 list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2236 spin_unlock(&lp->lp_lock);
2237 lnet_peer_queue_for_discovery(lp);
2238 lnet_net_unlock(LNET_LOCK_EX);
2242 * Queue a peer for the attention of the discovery thread. Call with
2243 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2244 * -EALREADY if the peer was already queued.
2246 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2250 spin_lock(&lp->lp_lock);
2251 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2252 lp->lp_state |= LNET_PEER_DISCOVERING;
2253 spin_unlock(&lp->lp_lock);
2254 if (list_empty(&lp->lp_dc_list)) {
2255 lnet_peer_addref_locked(lp);
2256 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2257 wake_up(&the_lnet.ln_dc_waitq);
2263 CDEBUG(D_NET, "Queue peer %s: %d\n",
2264 libcfs_nidstr(&lp->lp_primary_nid), rc);
2270 * Discovery of a peer is complete. Wake all waiters on the peer.
2271 * Call with lnet_net_lock/EX held.
2273 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2275 struct lnet_msg *msg, *tmp;
2277 LIST_HEAD(pending_msgs);
2279 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2280 libcfs_nidstr(&lp->lp_primary_nid));
2282 list_del_init(&lp->lp_dc_list);
2283 spin_lock(&lp->lp_lock);
2284 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2285 spin_unlock(&lp->lp_lock);
2286 wake_up(&lp->lp_dc_waitq);
2288 if (lp->lp_rtr_refcount > 0)
2289 lnet_router_discovery_complete(lp);
2291 lnet_net_unlock(LNET_LOCK_EX);
2293 /* iterate through all pending messages and send them again */
2294 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2295 list_del_init(&msg->msg_list);
2296 if (lp->lp_dc_error) {
2297 lnet_finalize(msg, lp->lp_dc_error);
2301 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2302 lnet_msgtyp2str(msg->msg_type),
2303 libcfs_id2str(msg->msg_target));
2304 rc = lnet_send(msg->msg_src_nid_param, msg,
2305 msg->msg_rtr_nid_param);
2307 CNETERR("Error sending %s to %s: %d\n",
2308 lnet_msgtyp2str(msg->msg_type),
2309 libcfs_id2str(msg->msg_target), rc);
2310 lnet_finalize(msg, rc);
2313 lnet_net_lock(LNET_LOCK_EX);
2314 lnet_peer_decref_locked(lp);
2318 * Handle inbound push.
2319 * Like any event handler, called with lnet_res_lock/CPT held.
2321 void lnet_peer_push_event(struct lnet_event *ev)
2323 struct lnet_ping_buffer *pbuf;
2324 struct lnet_peer *lp;
2326 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2328 /* lnet_find_peer() adds a refcount */
2329 lp = lnet_find_peer(ev->source.nid);
2331 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2332 libcfs_nid2str(ev->initiator.nid),
2333 libcfs_nid2str(ev->source.nid));
2334 pbuf->pb_needs_post = true;
2338 /* Ensure peer state remains consistent while we modify it. */
2339 spin_lock(&lp->lp_lock);
2342 * If some kind of error happened the contents of the message
2343 * cannot be used. Clear the NIDS_UPTODATE and set the
2344 * FORCE_PING flag to trigger a ping.
2347 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2348 lp->lp_state |= LNET_PEER_FORCE_PING;
2349 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2351 libcfs_nidstr(&lp->lp_primary_nid),
2352 libcfs_nid2str(ev->source.nid));
2357 * A push with invalid or corrupted info. Clear the UPTODATE
2358 * flag to trigger a ping.
2360 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2361 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2362 lp->lp_state |= LNET_PEER_FORCE_PING;
2363 CDEBUG(D_NET, "Corrupted Push from %s\n",
2364 libcfs_nidstr(&lp->lp_primary_nid));
2369 * Make sure we'll allocate the correct size ping buffer when
2372 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2373 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2376 * A non-Multi-Rail peer is not supposed to be capable of
2379 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2380 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2381 libcfs_nidstr(&lp->lp_primary_nid));
2386 * The peer may have discovery disabled at its end. Set
2387 * NO_DISCOVERY as appropriate.
2389 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2390 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2391 libcfs_nidstr(&lp->lp_primary_nid));
2393 * Mark the peer for deletion if we already know about it
2394 * and it's going from discovery set to no discovery set
2396 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2397 LNET_PEER_DISCOVERING)) &&
2398 lp->lp_state & LNET_PEER_DISCOVERED) {
2399 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2400 libcfs_nidstr(&lp->lp_primary_nid),
2402 lp->lp_state |= LNET_PEER_MARK_DELETION;
2404 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2405 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2406 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2407 libcfs_nidstr(&lp->lp_primary_nid));
2408 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2412 * Update the MULTI_RAIL flag based on the push. If the peer
2413 * was configured with DLC then the setting should match what
2415 * NB: We verified above that the MR feature bit is set in pi_features
2417 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2418 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2419 libcfs_nidstr(&lp->lp_primary_nid), lp);
2420 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2421 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2422 libcfs_nidstr(&lp->lp_primary_nid));
2423 } else if (lnet_peer_discovery_disabled) {
2424 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2425 libcfs_nidstr(&lp->lp_primary_nid), lp);
2426 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2427 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2428 libcfs_nidstr(&lp->lp_primary_nid), lp);
2430 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2431 libcfs_nidstr(&lp->lp_primary_nid), lp);
2432 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2433 lnet_peer_clr_non_mr_pref_nids(lp);
2437 * Check for truncation of the Put message. Clear the
2438 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2439 * and tell discovery to allocate a bigger buffer.
2441 if (ev->mlength < ev->rlength) {
2442 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2443 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2444 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2445 lp->lp_state |= LNET_PEER_FORCE_PING;
2446 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2447 libcfs_nidstr(&lp->lp_primary_nid),
2448 pbuf->pb_info.pi_nnis);
2452 /* always assume new data */
2453 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2454 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2457 * If there is data present that hasn't been processed yet,
2458 * we'll replace it if the Put contained newer data and it
2459 * fits. We're racing with a Ping or earlier Push in this
2462 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2463 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2464 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2465 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2466 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2467 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2468 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2469 libcfs_nidstr(&lp->lp_primary_nid),
2470 LNET_PING_BUFFER_SEQNO(pbuf),
2471 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2477 * Allocate a buffer to copy the data. On a failure we drop
2478 * the Push and set FORCE_PING to force the discovery
2479 * thread to fix the problem by pinging the peer.
2481 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2483 lp->lp_state |= LNET_PEER_FORCE_PING;
2484 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2485 libcfs_nidstr(&lp->lp_primary_nid),
2486 LNET_PING_BUFFER_SEQNO(pbuf));
2491 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2492 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2493 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2494 CDEBUG(D_NET, "Received Push %s %u\n",
2495 libcfs_nidstr(&lp->lp_primary_nid),
2496 LNET_PING_BUFFER_SEQNO(pbuf));
2499 /* We've processed this buffer. It can be reposted */
2500 pbuf->pb_needs_post = true;
2503 * Queue the peer for discovery if not done, force it on the request
2504 * queue and wake the discovery thread if the peer was already queued,
2505 * because its status changed.
2507 spin_unlock(&lp->lp_lock);
2508 lnet_net_lock(LNET_LOCK_EX);
2509 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2510 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2511 wake_up(&the_lnet.ln_dc_waitq);
2513 /* Drop refcount from lookup */
2514 lnet_peer_decref_locked(lp);
2515 lnet_net_unlock(LNET_LOCK_EX);
2519 * Clear the discovery error state, unless we're already discovering
2520 * this peer, in which case the error is current.
2522 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2524 spin_lock(&lp->lp_lock);
2525 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2526 lp->lp_dc_error = 0;
2527 spin_unlock(&lp->lp_lock);
2531 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2532 * dropped/retaken within this function. An lnet_peer_ni is passed in
2533 * because discovery could tear down an lnet_peer.
2536 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2539 struct lnet_peer *lp;
2544 lnet_net_unlock(cpt);
2545 lnet_net_lock(LNET_LOCK_EX);
2546 lp = lpni->lpni_peer_net->lpn_peer;
2547 lnet_peer_clear_discovery_error(lp);
2550 * We're willing to be interrupted. The lpni can become a
2551 * zombie if we race with DLC, so we must check for that.
2554 /* Keep lp alive when the lnet_net_lock is unlocked */
2555 lnet_peer_addref_locked(lp);
2556 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2557 if (signal_pending(current))
2559 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2562 * Don't repeat discovery if discovery is disabled. This is
2563 * done to ensure we can use discovery as a standard ping as
2564 * well for backwards compatibility with routers which do not
2565 * have discovery or have discovery disabled
2567 if (lnet_is_discovery_disabled(lp) && count > 0)
2569 if (lp->lp_dc_error)
2571 if (lnet_peer_is_uptodate(lp))
2573 lnet_peer_queue_for_discovery(lp);
2575 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2578 * If caller requested a non-blocking operation then
2579 * return immediately. Once discovery is complete any
2580 * pending messages that were stopped due to discovery
2581 * will be transmitted.
2586 lnet_net_unlock(LNET_LOCK_EX);
2588 finish_wait(&lp->lp_dc_waitq, &wait);
2589 lnet_net_lock(LNET_LOCK_EX);
2590 lnet_peer_decref_locked(lp);
2591 /* Peer may have changed */
2592 lp = lpni->lpni_peer_net->lpn_peer;
2594 finish_wait(&lp->lp_dc_waitq, &wait);
2596 lnet_net_unlock(LNET_LOCK_EX);
2598 lnet_peer_decref_locked(lp);
2600 * The peer may have changed, so re-check and rediscover if that turns
2601 * out to have been the case. The reference count on lp ensured that
2602 * even if it was unlinked from lpni the memory could not be recycled.
2603 * Thus the check below is sufficient to determine whether the peer
2604 * changed. If the peer changed, then lp must not be dereferenced.
2606 if (lp != lpni->lpni_peer_net->lpn_peer)
2609 if (signal_pending(current))
2611 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2613 else if (lp->lp_dc_error)
2614 rc = lp->lp_dc_error;
2616 CDEBUG(D_NET, "non-blocking discovery\n");
2617 else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2620 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2621 (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2622 libcfs_nidstr(&lpni->lpni_nid), rc,
2623 (!block) ? "pending discovery" : "discovery complete");
2628 /* Handle an incoming ack for a push. */
2630 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2632 struct lnet_ping_buffer *pbuf;
2634 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2635 spin_lock(&lp->lp_lock);
2636 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2637 lp->lp_push_error = ev->status;
2639 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2641 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2642 spin_unlock(&lp->lp_lock);
2644 CDEBUG(D_NET, "peer %s ev->status %d\n",
2645 libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2648 /* Handle a Reply message. This is the reply to a Ping message. */
2650 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2652 struct lnet_ping_buffer *pbuf;
2655 spin_lock(&lp->lp_lock);
2657 lnet_nid4_to_nid(ev->target.nid, &lp->lp_disc_src_nid);
2658 lnet_nid4_to_nid(ev->source.nid, &lp->lp_disc_dst_nid);
2661 * If some kind of error happened the contents of message
2662 * cannot be used. Set PING_FAILED to trigger a retry.
2665 lp->lp_state |= LNET_PEER_PING_FAILED;
2666 lp->lp_ping_error = ev->status;
2667 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2669 libcfs_nidstr(&lp->lp_primary_nid),
2670 libcfs_nid2str(ev->source.nid));
2674 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2675 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2676 lnet_swap_pinginfo(pbuf);
2679 * A reply with invalid or corrupted info. Set PING_FAILED to
2682 rc = lnet_ping_info_validate(&pbuf->pb_info);
2684 lp->lp_state |= LNET_PEER_PING_FAILED;
2685 lp->lp_ping_error = 0;
2686 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2687 libcfs_nidstr(&lp->lp_primary_nid), rc);
2692 * The peer may have discovery disabled at its end. Set
2693 * NO_DISCOVERY as appropriate.
2695 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2696 lnet_peer_discovery_disabled) {
2697 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2698 libcfs_nidstr(&lp->lp_primary_nid));
2700 /* Detect whether this peer has toggled discovery from on to
2701 * off and whether we can delete and re-create the peer. Peers
2702 * that were manually configured cannot be deleted by discovery.
2703 * We need to delete this peer and re-create it if the peer was
2704 * not configured manually, is currently considered DD capable,
2706 * 1. We've already discovered the peer (the peer has toggled
2707 * the discovery feature from on to off), or
2708 * 2. The peer is considered MR, but it was not user configured
2709 * (this was a "temporary" peer created via the kernel APIs
2710 * that we're discovering for the first time)
2712 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2713 LNET_PEER_NO_DISCOVERY)) &&
2714 (lp->lp_state & (LNET_PEER_DISCOVERED |
2715 LNET_PEER_MULTI_RAIL))) {
2716 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2717 libcfs_nidstr(&lp->lp_primary_nid),
2719 lp->lp_state |= LNET_PEER_MARK_DELETION;
2721 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2723 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2724 libcfs_nidstr(&lp->lp_primary_nid));
2725 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2729 * Update the MULTI_RAIL flag based on the reply. If the peer
2730 * was configured with DLC then the setting should match what
2733 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2734 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2735 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2736 libcfs_nidstr(&lp->lp_primary_nid), lp);
2737 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2738 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2739 libcfs_nidstr(&lp->lp_primary_nid));
2740 } else if (lnet_peer_discovery_disabled) {
2742 "peer %s(%p) not MR: DD disabled locally\n",
2743 libcfs_nidstr(&lp->lp_primary_nid), lp);
2744 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2746 "peer %s(%p) not MR: DD disabled remotely\n",
2747 libcfs_nidstr(&lp->lp_primary_nid), lp);
2749 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2750 libcfs_nidstr(&lp->lp_primary_nid), lp);
2751 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2752 lnet_peer_clr_non_mr_pref_nids(lp);
2754 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2755 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2756 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2757 libcfs_nidstr(&lp->lp_primary_nid));
2759 CERROR("Multi-Rail state vanished from %s\n",
2760 libcfs_nidstr(&lp->lp_primary_nid));
2761 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2766 * Make sure we'll allocate the correct size ping buffer when
2769 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2770 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2773 * Check for truncation of the Reply. Clear PING_SENT and set
2774 * PING_FAILED to trigger a retry.
2776 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2777 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2778 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2779 lp->lp_state |= LNET_PEER_PING_FAILED;
2780 lp->lp_ping_error = 0;
2781 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2782 libcfs_nidstr(&lp->lp_primary_nid),
2783 pbuf->pb_info.pi_nnis);
2788 * Check the sequence numbers in the reply. These are only
2789 * available if the reply came from a Multi-Rail peer.
2791 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2792 pbuf->pb_info.pi_nnis > 1 &&
2793 lnet_nid_to_nid4(&lp->lp_primary_nid) ==
2794 pbuf->pb_info.pi_ni[1].ns_nid) {
2795 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2796 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2797 libcfs_nidstr(&lp->lp_primary_nid),
2798 LNET_PING_BUFFER_SEQNO(pbuf),
2801 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2804 /* We're happy with the state of the data in the buffer. */
2805 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2806 libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2808 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2809 lnet_ping_buffer_decref(lp->lp_data);
2811 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2812 lnet_ping_buffer_addref(pbuf);
2815 lp->lp_state &= ~LNET_PEER_PING_SENT;
2816 spin_unlock(&lp->lp_lock);
2818 lnet_net_lock(LNET_LOCK_EX);
2820 * If this peer is a gateway, call the routing callback to
2821 * handle the ping reply
2823 if (lp->lp_rtr_refcount > 0)
2824 lnet_router_discovery_ping_reply(lp);
2825 lnet_net_unlock(LNET_LOCK_EX);
2829 * Send event handling. Only matters for error cases, where we clean
2830 * up state on the peer and peer_ni that would otherwise be updated in
2831 * the REPLY event handler for a successful Ping, and the ACK event
2832 * handler for a successful Push.
2835 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2842 spin_lock(&lp->lp_lock);
2843 if (ev->msg_type == LNET_MSG_GET) {
2844 lp->lp_state &= ~LNET_PEER_PING_SENT;
2845 lp->lp_state |= LNET_PEER_PING_FAILED;
2846 lp->lp_ping_error = ev->status;
2847 } else { /* ev->msg_type == LNET_MSG_PUT */
2848 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2849 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2850 lp->lp_push_error = ev->status;
2852 spin_unlock(&lp->lp_lock);
2853 rc = LNET_REDISCOVER_PEER;
2855 CDEBUG(D_NET, "%s Send to %s: %d\n",
2856 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2857 libcfs_nid2str(ev->target.nid), rc);
2862 * Unlink event handling. This event is only seen if a call to
2863 * LNetMDUnlink() caused the event to be unlinked. If this call was
2864 * made after the event was set up in LNetGet() or LNetPut() then we
2865 * assume the Ping or Push timed out.
2868 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2870 spin_lock(&lp->lp_lock);
2871 /* We've passed through LNetGet() */
2872 if (lp->lp_state & LNET_PEER_PING_SENT) {
2873 lp->lp_state &= ~LNET_PEER_PING_SENT;
2874 lp->lp_state |= LNET_PEER_PING_FAILED;
2875 lp->lp_ping_error = -ETIMEDOUT;
2876 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2877 libcfs_nidstr(&lp->lp_primary_nid));
2879 /* We've passed through LNetPut() */
2880 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2881 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2882 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2883 lp->lp_push_error = -ETIMEDOUT;
2884 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2885 libcfs_nidstr(&lp->lp_primary_nid));
2887 spin_unlock(&lp->lp_lock);
2891 * Event handler for the discovery EQ.
2893 * Called with lnet_res_lock(cpt) held. The cpt is the
2894 * lnet_cpt_of_cookie() of the md handle cookie.
2896 static void lnet_discovery_event_handler(struct lnet_event *event)
2898 struct lnet_peer *lp = event->md_user_ptr;
2899 struct lnet_ping_buffer *pbuf;
2902 /* discovery needs to take another look */
2903 rc = LNET_REDISCOVER_PEER;
2905 CDEBUG(D_NET, "Received event: %d\n", event->type);
2907 switch (event->type) {
2908 case LNET_EVENT_ACK:
2909 lnet_discovery_event_ack(lp, event);
2911 case LNET_EVENT_REPLY:
2912 lnet_discovery_event_reply(lp, event);
2914 case LNET_EVENT_SEND:
2915 /* Only send failure triggers a retry. */
2916 rc = lnet_discovery_event_send(lp, event);
2918 case LNET_EVENT_UNLINK:
2919 /* LNetMDUnlink() was called */
2920 lnet_discovery_event_unlink(lp, event);
2923 /* Invalid events. */
2926 lnet_net_lock(LNET_LOCK_EX);
2927 if (event->unlinked) {
2928 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2929 lnet_ping_buffer_decref(pbuf);
2930 lnet_peer_decref_locked(lp);
2933 /* put peer back at end of request queue, if discovery not already
2935 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2936 lnet_peer_queue_for_discovery(lp)) {
2937 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2938 wake_up(&the_lnet.ln_dc_waitq);
2940 lnet_net_unlock(LNET_LOCK_EX);
2944 * Build a peer from incoming data.
2946 * The NIDs in the incoming data are supposed to be structured as follows:
2949 * - other NIDs in same net
2950 * - NIDs in second net
2951 * - NIDs in third net
2953 * This due to the way the list of NIDs in the data is created.
2955 * Note that this function will mark the peer uptodate unless an
2956 * ENOMEM is encontered. All other errors are due to a conflict
2957 * between the DLC configuration and what discovery sees. We treat DLC
2958 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2959 * peer from becoming stuck in discovery.
2961 static int lnet_peer_merge_data(struct lnet_peer *lp,
2962 struct lnet_ping_buffer *pbuf)
2964 struct lnet_peer_net *lpn;
2965 struct lnet_peer_ni *lpni;
2966 lnet_nid_t *curnis = NULL;
2967 struct lnet_ni_status *addnis = NULL;
2968 lnet_nid_t *delnis = NULL;
2978 flags = LNET_PEER_DISCOVERED;
2979 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2980 flags |= LNET_PEER_MULTI_RAIL;
2983 * Cache the routing feature for the peer; whether it is enabled
2984 * for disabled as reported by the remote peer.
2986 spin_lock(&lp->lp_lock);
2987 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2988 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2990 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2991 spin_unlock(&lp->lp_lock);
2993 nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2994 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2995 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2996 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2997 if (!curnis || !addnis || !delnis) {
3005 /* Construct the list of NIDs present in peer. */
3007 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3008 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
3011 * Check for NIDs in pbuf not present in curnis[].
3012 * The loop starts at 1 to skip the loopback NID.
3014 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
3015 for (j = 0; j < ncurnis; j++)
3016 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
3019 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
3022 * Check for NIDs in curnis[] not present in pbuf.
3023 * The nested loop starts at 1 to skip the loopback NID.
3025 * But never add the loopback NID to delnis[]: if it is
3026 * present in curnis[] then this peer is for this node.
3028 for (i = 0; i < ncurnis; i++) {
3029 if (curnis[i] == LNET_NID_LO_0)
3031 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
3032 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
3034 * update the information we cache for the
3035 * peer with the latest information we
3038 lpni = lnet_find_peer_ni_locked(curnis[i]);
3040 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
3041 lnet_peer_ni_decref_locked(lpni);
3046 if (j == pbuf->pb_info.pi_nnis)
3047 delnis[ndelnis++] = curnis[i];
3051 * If we get here and the discovery is disabled then we don't want
3052 * to add or delete any NIs. We just updated the ones we have some
3053 * information on, and call it a day
3056 if (lnet_is_discovery_disabled(lp))
3059 for (i = 0; i < naddnis; i++) {
3060 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
3062 CERROR("Error adding NID %s to peer %s: %d\n",
3063 libcfs_nid2str(addnis[i].ns_nid),
3064 libcfs_nidstr(&lp->lp_primary_nid), rc);
3068 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
3070 lpni->lpni_ns_status = addnis[i].ns_status;
3071 lnet_peer_ni_decref_locked(lpni);
3075 for (i = 0; i < ndelnis; i++) {
3077 * for routers it's okay to delete the primary_nid because
3078 * the upper layers don't really rely on it. So if we're
3079 * being told that the router changed its primary_nid
3080 * then it's okay to delete it.
3082 if (lp->lp_rtr_refcount > 0)
3083 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3084 rc = lnet_peer_del_nid(lp, delnis[i], flags);
3086 CERROR("Error deleting NID %s from peer %s: %d\n",
3087 libcfs_nid2str(delnis[i]),
3088 libcfs_nidstr(&lp->lp_primary_nid), rc);
3094 /* The peer net for the primary NID should be the first entry in the
3095 * peer's lp_peer_nets list, and the peer NI for the primary NID should
3096 * be the first entry in its peer net's lpn_peer_nis list.
3098 lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
3100 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3101 libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3105 lnet_peer_ni_decref_locked(lpni);
3107 lpn = lpni->lpni_peer_net;
3108 if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3109 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3111 if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3112 list_move(&lpni->lpni_peer_nis,
3113 &lpni->lpni_peer_net->lpn_peer_nis);
3116 * Errors other than -ENOMEM are due to peers having been
3117 * configured with DLC. Ignore these because DLC overrides
3122 CFS_FREE_PTR_ARRAY(curnis, nnis);
3123 CFS_FREE_PTR_ARRAY(addnis, nnis);
3124 CFS_FREE_PTR_ARRAY(delnis, nnis);
3125 lnet_ping_buffer_decref(pbuf);
3126 CDEBUG(D_NET, "peer %s (%p): %d\n",
3127 libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3130 spin_lock(&lp->lp_lock);
3131 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3132 lp->lp_state |= LNET_PEER_FORCE_PING;
3133 spin_unlock(&lp->lp_lock);
3139 * The data in pbuf says lp is its primary peer, but the data was
3140 * received by a different peer. Try to update lp with the data.
3143 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3145 struct lnet_handle_md mdh;
3147 /* Queue lp for discovery, and force it on the request queue. */
3148 lnet_net_lock(LNET_LOCK_EX);
3149 if (lnet_peer_queue_for_discovery(lp))
3150 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3151 lnet_net_unlock(LNET_LOCK_EX);
3153 LNetInvalidateMDHandle(&mdh);
3156 * Decide whether we can move the peer to the DATA_PRESENT state.
3158 * We replace stale data for a multi-rail peer, repair PING_FAILED
3159 * status, and preempt FORCE_PING.
3161 * If after that we have DATA_PRESENT, we merge it into this peer.
3163 spin_lock(&lp->lp_lock);
3164 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3165 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3166 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3167 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3168 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3169 lnet_ping_buffer_decref(pbuf);
3174 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3175 lnet_ping_buffer_decref(lp->lp_data);
3177 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3179 if (lp->lp_state & LNET_PEER_PING_FAILED) {
3180 mdh = lp->lp_ping_mdh;
3181 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3182 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3183 lp->lp_ping_error = 0;
3185 if (lp->lp_state & LNET_PEER_FORCE_PING)
3186 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3187 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3188 spin_unlock(&lp->lp_lock);
3190 if (!LNetMDHandleIsInvalid(mdh))
3194 return lnet_peer_merge_data(lp, pbuf);
3196 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3200 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3204 for (i = 0; i < pinfo->pi_nnis; i++) {
3205 if (pinfo->pi_ni[i].ns_nid == nid)
3212 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3213 * to the discovery queue a reference was taken that will prevent the peer from
3214 * actually being freed by this function. After this function exits the
3215 * discovery thread should call lnet_peer_discovery_complete() which will
3216 * drop that reference as well as wake any waiters that may also be holding a
3219 static int lnet_peer_deletion(struct lnet_peer *lp)
3220 __must_hold(&lp->lp_lock)
3222 struct list_head rlist;
3223 struct lnet_route *route, *tmp;
3224 int sensitivity = lp->lp_health_sensitivity;
3226 INIT_LIST_HEAD(&rlist);
3228 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3229 LNET_PEER_FORCE_PUSH);
3230 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3231 libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3233 /* no-op if lnet_peer_del() has already been called on this peer */
3234 if (lp->lp_state & LNET_PEER_MARK_DELETED)
3237 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3240 spin_unlock(&lp->lp_lock);
3242 mutex_lock(&the_lnet.ln_api_mutex);
3244 lnet_net_lock(LNET_LOCK_EX);
3245 /* remove the peer from the discovery work
3246 * queue if it's on there in preparation
3249 if (!list_empty(&lp->lp_dc_list))
3250 list_del_init(&lp->lp_dc_list);
3251 list_for_each_entry_safe(route, tmp,
3254 lnet_move_route(route, NULL, &rlist);
3255 lnet_net_unlock(LNET_LOCK_EX);
3257 /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3260 list_for_each_entry_safe(route, tmp,
3262 /* re-add these routes */
3263 lnet_add_route(route->lr_net,
3268 LIBCFS_FREE(route, sizeof(*route));
3271 mutex_unlock(&the_lnet.ln_api_mutex);
3273 spin_lock(&lp->lp_lock);
3279 * Update a peer using the data received.
3281 static int lnet_peer_data_present(struct lnet_peer *lp)
3282 __must_hold(&lp->lp_lock)
3284 struct lnet_ping_buffer *pbuf;
3285 struct lnet_peer_ni *lpni;
3286 lnet_nid_t nid = LNET_NID_ANY;
3292 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3293 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3294 spin_unlock(&lp->lp_lock);
3297 * Modifications of peer structures are done while holding the
3298 * ln_api_mutex. A global lock is required because we may be
3299 * modifying multiple peer structures, and a mutex greatly
3300 * simplifies memory management.
3302 * The actual changes to the data structures must also protect
3303 * against concurrent lookups, for which the lnet_net_lock in
3304 * LNET_LOCK_EX mode is used.
3306 mutex_lock(&the_lnet.ln_api_mutex);
3307 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3313 * If this peer is not on the peer list then it is being torn
3314 * down, and our reference count may be all that is keeping it
3315 * alive. Don't do any work on it.
3317 if (list_empty(&lp->lp_peer_list))
3320 flags = LNET_PEER_DISCOVERED;
3321 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3322 flags |= LNET_PEER_MULTI_RAIL;
3325 * Check whether the primary NID in the message matches the
3326 * primary NID of the peer. If it does, update the peer, if
3327 * it it does not, check whether there is already a peer with
3328 * that primary NID. If no such peer exists, try to update
3329 * the primary NID of the current peer (allowed if it was
3330 * created due to message traffic) and complete the update.
3331 * If the peer did exist, hand off the data to it.
3333 * The peer for the loopback interface is a special case: this
3334 * is the peer for the local node, and we want to set its
3335 * primary NID to the correct value here. Moreover, this peer
3336 * can show up with only the loopback NID in the ping buffer.
3338 if (pbuf->pb_info.pi_nnis <= 1)
3340 nid = pbuf->pb_info.pi_ni[1].ns_nid;
3341 if (nid_is_lo0(&lp->lp_primary_nid)) {
3342 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3344 rc = lnet_peer_merge_data(lp, pbuf);
3346 * if the primary nid of the peer is present in the ping info returned
3347 * from the peer, but it's not the local primary peer we have
3348 * cached and discovery is disabled, then we don't want to update
3349 * our local peer info, by adding or removing NIDs, we just want
3350 * to update the status of the nids that we currently have
3351 * recorded in that peer.
3353 } else if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid ||
3354 (lnet_is_nid_in_ping_info(lnet_nid_to_nid4(&lp->lp_primary_nid),
3356 lnet_is_discovery_disabled(lp))) {
3357 rc = lnet_peer_merge_data(lp, pbuf);
3359 lpni = lnet_find_peer_ni_locked(nid);
3360 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3361 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3363 CERROR("Primary NID error %s versus %s: %d\n",
3364 libcfs_nidstr(&lp->lp_primary_nid),
3365 libcfs_nid2str(nid), rc);
3367 rc = lnet_peer_merge_data(lp, pbuf);
3370 lnet_peer_ni_decref_locked(lpni);
3372 struct lnet_peer *new_lp;
3373 new_lp = lpni->lpni_peer_net->lpn_peer;
3375 * if lp has discovery/MR enabled that means new_lp
3376 * should have discovery/MR enabled as well, since
3377 * it's the same peer, which we're about to merge
3379 spin_lock(&lp->lp_lock);
3380 spin_lock(&new_lp->lp_lock);
3381 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3382 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3383 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3384 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3385 /* If we're processing a ping reply then we may be
3386 * about to send a push to the peer that we ping'd.
3387 * Since the ping reply that we're processing was
3388 * received by lp, we need to set the discovery source
3389 * NID for new_lp to the NID stored in lp.
3391 if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3392 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3393 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3395 spin_unlock(&new_lp->lp_lock);
3396 spin_unlock(&lp->lp_lock);
3398 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3399 lnet_consolidate_routes_locked(lp, new_lp);
3400 lnet_peer_ni_decref_locked(lpni);
3404 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3405 libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3407 mutex_unlock(&the_lnet.ln_api_mutex);
3409 spin_lock(&lp->lp_lock);
3410 /* Tell discovery to re-check the peer immediately. */
3412 rc = LNET_REDISCOVER_PEER;
3417 * A ping failed. Clear the PING_FAILED state and set the
3418 * FORCE_PING state, to ensure a retry even if discovery is
3419 * disabled. This avoids being left with incorrect state.
3421 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3422 __must_hold(&lp->lp_lock)
3424 struct lnet_handle_md mdh;
3427 mdh = lp->lp_ping_mdh;
3428 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3429 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3430 lp->lp_state |= LNET_PEER_FORCE_PING;
3431 rc = lp->lp_ping_error;
3432 lp->lp_ping_error = 0;
3433 spin_unlock(&lp->lp_lock);
3435 if (!LNetMDHandleIsInvalid(mdh))
3438 CDEBUG(D_NET, "peer %s:%d\n",
3439 libcfs_nidstr(&lp->lp_primary_nid), rc);
3441 spin_lock(&lp->lp_lock);
3442 return rc ? rc : LNET_REDISCOVER_PEER;
3445 /* Active side of ping. */
3446 static int lnet_peer_send_ping(struct lnet_peer *lp)
3447 __must_hold(&lp->lp_lock)
3453 lp->lp_state |= LNET_PEER_PING_SENT;
3454 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3455 spin_unlock(&lp->lp_lock);
3457 cpt = lnet_net_lock_current();
3458 /* Refcount for MD. */
3459 lnet_peer_addref_locked(lp);
3460 lnet_net_unlock(cpt);
3462 nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3464 rc = lnet_send_ping(lnet_nid_to_nid4(&lp->lp_primary_nid),
3465 &lp->lp_ping_mdh, nnis, lp,
3466 the_lnet.ln_dc_handler, false);
3469 * if LNetMDBind in lnet_send_ping fails we need to decrement the
3470 * refcount on the peer, otherwise LNetMDUnlink will be called
3471 * which will eventually do that.
3475 lnet_peer_decref_locked(lp);
3476 lnet_net_unlock(cpt);
3477 rc = -rc; /* change the rc to negative value */
3479 } else if (rc < 0) {
3483 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3485 spin_lock(&lp->lp_lock);
3489 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3491 * The errors that get us here are considered hard errors and
3492 * cause Discovery to terminate. So we clear PING_SENT, but do
3493 * not set either PING_FAILED or FORCE_PING. In fact we need
3494 * to clear PING_FAILED, because the unlink event handler will
3495 * have set it if we called LNetMDUnlink() above.
3497 spin_lock(&lp->lp_lock);
3498 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3503 * This function exists because you cannot call LNetMDUnlink() from an
3506 static int lnet_peer_push_failed(struct lnet_peer *lp)
3507 __must_hold(&lp->lp_lock)
3509 struct lnet_handle_md mdh;
3512 mdh = lp->lp_push_mdh;
3513 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3514 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3515 rc = lp->lp_push_error;
3516 lp->lp_push_error = 0;
3517 spin_unlock(&lp->lp_lock);
3519 if (!LNetMDHandleIsInvalid(mdh))
3522 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3523 spin_lock(&lp->lp_lock);
3524 return rc ? rc : LNET_REDISCOVER_PEER;
3528 * Mark the peer as discovered.
3530 static int lnet_peer_discovered(struct lnet_peer *lp)
3531 __must_hold(&lp->lp_lock)
3533 lp->lp_state |= LNET_PEER_DISCOVERED;
3534 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3535 LNET_PEER_REDISCOVER);
3537 lp->lp_dc_error = 0;
3539 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3544 /* Active side of push. */
3545 static int lnet_peer_send_push(struct lnet_peer *lp)
3546 __must_hold(&lp->lp_lock)
3548 struct lnet_ping_buffer *pbuf;
3549 struct lnet_process_id id;
3554 /* Don't push to a non-multi-rail peer. */
3555 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3556 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3557 /* if peer's NIDs are uptodate then peer is discovered */
3558 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3559 rc = lnet_peer_discovered(lp);
3566 lp->lp_state |= LNET_PEER_PUSH_SENT;
3567 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3568 spin_unlock(&lp->lp_lock);
3570 cpt = lnet_net_lock_current();
3571 pbuf = the_lnet.ln_ping_target;
3572 lnet_ping_buffer_addref(pbuf);
3573 lnet_net_unlock(cpt);
3575 /* Push source MD */
3576 md.start = &pbuf->pb_info;
3577 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3578 md.threshold = 2; /* Put/Ack */
3580 md.options = LNET_MD_TRACK_RESPONSE;
3581 md.handler = the_lnet.ln_dc_handler;
3584 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3586 lnet_ping_buffer_decref(pbuf);
3587 CERROR("Can't bind push source MD: %d\n", rc);
3591 cpt = lnet_net_lock_current();
3592 /* Refcount for MD. */
3593 lnet_peer_addref_locked(lp);
3594 id.pid = LNET_PID_LUSTRE;
3595 if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3596 id.nid = lnet_nid_to_nid4(&lp->lp_disc_dst_nid);
3598 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
3599 lnet_net_unlock(cpt);
3601 rc = LNetPut(lnet_nid_to_nid4(&lp->lp_disc_src_nid), lp->lp_push_mdh,
3602 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3603 LNET_PROTO_PING_MATCHBITS, 0, 0);
3606 * reset the discovery nid. There is no need to restrict sending
3607 * from that source, if we call lnet_push_update_to_peers(). It'll
3608 * get set to a specific NID, if we initiate discovery from the
3611 lp->lp_disc_src_nid = LNET_ANY_NID;
3612 lp->lp_disc_dst_nid = LNET_ANY_NID;
3617 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3619 spin_lock(&lp->lp_lock);
3623 LNetMDUnlink(lp->lp_push_mdh);
3624 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3626 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3629 * The errors that get us here are considered hard errors and
3630 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3631 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3632 * because the unlink event handler will have set it if we
3633 * called LNetMDUnlink() above.
3635 spin_lock(&lp->lp_lock);
3636 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3641 * An unrecoverable error was encountered during discovery.
3642 * Set error status in peer and abort discovery.
3644 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3646 CDEBUG(D_NET, "Discovery error %s: %d\n",
3647 libcfs_nidstr(&lp->lp_primary_nid), error);
3649 spin_lock(&lp->lp_lock);
3650 lp->lp_dc_error = error;
3651 lp->lp_state &= ~LNET_PEER_DISCOVERING;
3652 lp->lp_state |= LNET_PEER_REDISCOVER;
3653 spin_unlock(&lp->lp_lock);
3657 * Wait for work to be queued or some other change that must be
3658 * attended to. Returns non-zero if the discovery thread should shut
3661 static int lnet_peer_discovery_wait_for_work(void)
3668 cpt = lnet_net_lock_current();
3670 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3671 TASK_INTERRUPTIBLE);
3672 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3674 if (lnet_push_target_resize_needed() ||
3675 the_lnet.ln_push_target->pb_needs_post)
3677 if (!list_empty(&the_lnet.ln_dc_request))
3679 if (!list_empty(&the_lnet.ln_msg_resend))
3681 lnet_net_unlock(cpt);
3684 * wakeup max every second to check if there are peers that
3685 * have been stuck on the working queue for greater than
3688 schedule_timeout(cfs_time_seconds(1));
3689 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3690 cpt = lnet_net_lock_current();
3692 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3694 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3697 lnet_net_unlock(cpt);
3699 CDEBUG(D_NET, "woken: %d\n", rc);
3705 * Messages that were pending on a destroyed peer will be put on a global
3706 * resend list. The message resend list will be checked by
3707 * the discovery thread when it wakes up, and will resend messages. These
3708 * messages can still be sendable in the case the lpni which was the initial
3709 * cause of the message re-queue was transfered to another peer.
3711 * It is possible that LNet could be shutdown while we're iterating
3712 * through the list. lnet_shudown_lndnets() will attempt to access the
3713 * resend list, but will have to wait until the spinlock is released, by
3714 * which time there shouldn't be any more messages on the resend list.
3715 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3716 * for the messages so they can be released. The other case is that
3717 * lnet_shudown_lndnets() can finalize all the messages before this
3718 * function can visit the resend list, in which case this function will be
3721 static void lnet_resend_msgs(void)
3723 struct lnet_msg *msg, *tmp;
3727 spin_lock(&the_lnet.ln_msg_resend_lock);
3728 list_splice(&the_lnet.ln_msg_resend, &resend);
3729 spin_unlock(&the_lnet.ln_msg_resend_lock);
3731 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3732 list_del_init(&msg->msg_list);
3733 rc = lnet_send(msg->msg_src_nid_param, msg,
3734 msg->msg_rtr_nid_param);
3736 CNETERR("Error sending %s to %s: %d\n",
3737 lnet_msgtyp2str(msg->msg_type),
3738 libcfs_id2str(msg->msg_target), rc);
3739 lnet_finalize(msg, rc);
3744 /* The discovery thread. */
3745 static int lnet_peer_discovery(void *arg)
3747 struct lnet_peer *lp;
3750 wait_for_completion(&the_lnet.ln_started);
3752 CDEBUG(D_NET, "started\n");
3755 if (lnet_peer_discovery_wait_for_work())
3758 if (lnet_push_target_resize_needed())
3759 lnet_push_target_resize();
3760 else if (the_lnet.ln_push_target->pb_needs_post)
3761 lnet_push_target_post(the_lnet.ln_push_target,
3762 &the_lnet.ln_push_target_md);
3766 lnet_net_lock(LNET_LOCK_EX);
3767 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3768 lnet_net_unlock(LNET_LOCK_EX);
3773 * Process all incoming discovery work requests. When
3774 * discovery must wait on a peer to change state, it
3775 * is added to the tail of the ln_dc_working queue. A
3776 * timestamp keeps track of when the peer was added,
3777 * so we can time out discovery requests that take too
3780 while (!list_empty(&the_lnet.ln_dc_request)) {
3781 lp = list_first_entry(&the_lnet.ln_dc_request,
3782 struct lnet_peer, lp_dc_list);
3783 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3785 * set the time the peer was put on the dc_working
3786 * queue. It shouldn't remain on the queue
3787 * forever, in case the GET message (for ping)
3788 * doesn't get a REPLY or the PUT message (for
3789 * push) doesn't get an ACK.
3791 lp->lp_last_queued = ktime_get_real_seconds();
3792 lnet_net_unlock(LNET_LOCK_EX);
3794 if (lnet_push_target_resize_needed())
3795 lnet_push_target_resize();
3796 else if (the_lnet.ln_push_target->pb_needs_post)
3797 lnet_push_target_post(the_lnet.ln_push_target,
3798 &the_lnet.ln_push_target_md);
3801 * Select an action depending on the state of
3802 * the peer and whether discovery is disabled.
3803 * The check whether discovery is disabled is
3804 * done after the code that handles processing
3805 * for arrived data, cleanup for failures, and
3806 * forcing a Ping or Push.
3808 spin_lock(&lp->lp_lock);
3809 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3810 libcfs_nidstr(&lp->lp_primary_nid), lp,
3812 if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3813 LNET_PEER_MARK_DELETED))
3814 rc = lnet_peer_deletion(lp);
3815 else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3816 rc = lnet_peer_data_present(lp);
3817 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3818 rc = lnet_peer_ping_failed(lp);
3819 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3820 rc = lnet_peer_push_failed(lp);
3821 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3822 rc = lnet_peer_send_ping(lp);
3823 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3824 rc = lnet_peer_send_push(lp);
3825 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3826 rc = lnet_peer_send_ping(lp);
3827 else if (lnet_peer_needs_push(lp))
3828 rc = lnet_peer_send_push(lp);
3830 rc = lnet_peer_discovered(lp);
3831 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3832 libcfs_nidstr(&lp->lp_primary_nid), lp,
3834 spin_unlock(&lp->lp_lock);
3836 lnet_net_lock(LNET_LOCK_EX);
3837 if (rc == LNET_REDISCOVER_PEER) {
3838 list_move(&lp->lp_dc_list,
3839 &the_lnet.ln_dc_request);
3841 lnet_peer_discovery_error(lp, rc);
3843 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3844 lnet_peer_discovery_complete(lp);
3845 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3850 lnet_net_unlock(LNET_LOCK_EX);
3853 CDEBUG(D_NET, "stopping\n");
3855 * Clean up before telling lnet_peer_discovery_stop() that
3856 * we're done. Use wake_up() below to somewhat reduce the
3857 * size of the thundering herd if there are multiple threads
3858 * waiting on discovery of a single peer.
3861 /* Queue cleanup 1: stop all pending pings and pushes. */
3862 lnet_net_lock(LNET_LOCK_EX);
3863 while (!list_empty(&the_lnet.ln_dc_working)) {
3864 lp = list_first_entry(&the_lnet.ln_dc_working,
3865 struct lnet_peer, lp_dc_list);
3866 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3867 lnet_net_unlock(LNET_LOCK_EX);
3868 lnet_peer_cancel_discovery(lp);
3869 lnet_net_lock(LNET_LOCK_EX);
3871 lnet_net_unlock(LNET_LOCK_EX);
3873 /* Queue cleanup 2: wait for the expired queue to clear. */
3874 while (!list_empty(&the_lnet.ln_dc_expired))
3875 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3877 /* Queue cleanup 3: clear the request queue. */
3878 lnet_net_lock(LNET_LOCK_EX);
3879 while (!list_empty(&the_lnet.ln_dc_request)) {
3880 lp = list_first_entry(&the_lnet.ln_dc_request,
3881 struct lnet_peer, lp_dc_list);
3882 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3883 lnet_peer_discovery_complete(lp);
3885 lnet_net_unlock(LNET_LOCK_EX);
3887 lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3888 the_lnet.ln_dc_handler = NULL;
3890 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3891 wake_up(&the_lnet.ln_dc_waitq);
3893 CDEBUG(D_NET, "stopped\n");
3898 /* ln_api_mutex is held on entry. */
3899 int lnet_peer_discovery_start(void)
3901 struct task_struct *task;
3904 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3907 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3908 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3909 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3912 CERROR("Can't start peer discovery thread: %d\n", rc);
3914 the_lnet.ln_dc_handler = NULL;
3916 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3919 CDEBUG(D_NET, "discovery start: %d\n", rc);
3924 /* ln_api_mutex is held on entry. */
3925 void lnet_peer_discovery_stop(void)
3927 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3930 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3931 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3933 /* In the LNetNIInit() path we may be stopping discovery before it
3934 * entered its work loop
3936 if (!completion_done(&the_lnet.ln_started))
3937 complete(&the_lnet.ln_started);
3939 wake_up(&the_lnet.ln_dc_waitq);
3941 wait_event(the_lnet.ln_dc_waitq,
3942 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3944 LASSERT(list_empty(&the_lnet.ln_dc_request));
3945 LASSERT(list_empty(&the_lnet.ln_dc_working));
3946 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3948 CDEBUG(D_NET, "discovery stopped\n");
3954 lnet_debug_peer(lnet_nid_t nid)
3956 char *aliveness = "NA";
3957 struct lnet_peer_ni *lp;
3960 cpt = lnet_cpt_of_nid(nid, NULL);
3963 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3965 lnet_net_unlock(cpt);
3966 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3970 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3971 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3973 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3974 libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
3975 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3976 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3977 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3979 lnet_peer_ni_decref_locked(lp);
3981 lnet_net_unlock(cpt);
3984 /* Gathering information for userspace. */
3986 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3987 char aliveness[LNET_MAX_STR_LEN],
3988 __u32 *cpt_iter, __u32 *refcount,
3989 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3990 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3991 __u32 *peer_tx_qnob)
3993 struct lnet_peer_table *peer_table;
3994 struct lnet_peer_ni *lp;
3999 /* get the number of CPTs */
4000 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4002 /* if the cpt number to be examined is >= the number of cpts in
4003 * the system then indicate that there are no more cpts to examin
4005 if (*cpt_iter >= lncpt)
4008 /* get the current table */
4009 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4010 /* if the ptable is NULL then there are no more cpts to examine */
4011 if (peer_table == NULL)
4014 lnet_net_lock(*cpt_iter);
4016 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4017 struct list_head *peers = &peer_table->pt_hash[j];
4019 list_for_each_entry(lp, peers, lpni_hashlist) {
4020 if (!nid_is_nid4(&lp->lpni_nid))
4022 if (peer_index-- > 0)
4025 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4026 if (lnet_isrouter(lp) ||
4027 lnet_peer_aliveness_enabled(lp))
4028 snprintf(aliveness, LNET_MAX_STR_LEN,
4029 lnet_is_peer_ni_alive(lp) ? "up" : "down");
4031 *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4032 *refcount = kref_read(&lp->lpni_kref);
4033 *ni_peer_tx_credits =
4034 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4035 *peer_tx_credits = lp->lpni_txcredits;
4036 *peer_rtr_credits = lp->lpni_rtrcredits;
4037 *peer_min_rtr_credits = lp->lpni_mintxcredits;
4038 *peer_tx_qnob = lp->lpni_txqnob;
4044 lnet_net_unlock(*cpt_iter);
4048 return found ? 0 : -ENOENT;
4051 /* ln_api_mutex is held, which keeps the peer list stable */
4052 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4054 struct lnet_ioctl_element_stats *lpni_stats;
4055 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4056 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4057 struct lnet_peer_ni_credit_info *lpni_info;
4058 struct lnet_peer_ni *lpni;
4059 struct lnet_peer *lp;
4064 lp = lnet_find_peer(cfg->prcfg_prim_nid);
4071 size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4072 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4073 size *= lp->lp_nnis;
4074 if (size > cfg->prcfg_size) {
4075 cfg->prcfg_size = size;
4080 cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4081 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4082 cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4083 cfg->prcfg_count = lp->lp_nnis;
4084 cfg->prcfg_size = size;
4085 cfg->prcfg_state = lp->lp_state;
4087 /* Allocate helper buffers. */
4089 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4092 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4095 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4096 if (!lpni_msg_stats)
4097 goto out_free_stats;
4098 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4100 goto out_free_msg_stats;
4105 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4106 if (!nid_is_nid4(&lpni->lpni_nid))
4108 nid = lnet_nid_to_nid4(&lpni->lpni_nid);
4109 if (copy_to_user(bulk, &nid, sizeof(nid)))
4110 goto out_free_hstats;
4111 bulk += sizeof(nid);
4113 memset(lpni_info, 0, sizeof(*lpni_info));
4114 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4115 if (lnet_isrouter(lpni) ||
4116 lnet_peer_aliveness_enabled(lpni))
4117 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4118 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4120 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4121 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4122 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4123 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4124 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4125 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4126 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4127 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4128 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4129 goto out_free_hstats;
4130 bulk += sizeof(*lpni_info);
4132 memset(lpni_stats, 0, sizeof(*lpni_stats));
4133 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4134 LNET_STATS_TYPE_SEND);
4135 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4136 LNET_STATS_TYPE_RECV);
4137 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4138 LNET_STATS_TYPE_DROP);
4139 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4140 goto out_free_hstats;
4141 bulk += sizeof(*lpni_stats);
4142 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4143 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4144 goto out_free_hstats;
4145 bulk += sizeof(*lpni_msg_stats);
4146 lpni_hstats->hlpni_network_timeout =
4147 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4148 lpni_hstats->hlpni_remote_dropped =
4149 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4150 lpni_hstats->hlpni_remote_timeout =
4151 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4152 lpni_hstats->hlpni_remote_error =
4153 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4154 lpni_hstats->hlpni_health_value =
4155 atomic_read(&lpni->lpni_healthv);
4156 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4157 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4158 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4159 goto out_free_hstats;
4160 bulk += sizeof(*lpni_hstats);
4165 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4167 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4169 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4171 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4173 lnet_peer_decref_locked(lp);
4178 /* must hold net_lock/0 */
4180 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4181 struct list_head *recovery_queue,
4184 /* the mt could've shutdown and cleaned up the queues */
4185 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4188 if (!list_empty(&lpni->lpni_recovery))
4191 if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4194 if (!lpni->lpni_last_alive) {
4196 "lpni %s(%p) not eligible for recovery last alive %lld\n",
4197 libcfs_nidstr(&lpni->lpni_nid), lpni,
4198 lpni->lpni_last_alive);
4202 if (lnet_recovery_limit &&
4203 now > lpni->lpni_last_alive + lnet_recovery_limit) {
4204 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4205 libcfs_nidstr(&lpni->lpni_nid),
4206 lpni->lpni_last_alive);
4207 /* Reset the ping count so that if this peer NI is added back to
4208 * the recovery queue we will send the first ping right away.
4210 lpni->lpni_ping_count = 0;
4214 /* This peer NI is going on the recovery queue, so take a ref on it */
4215 lnet_peer_ni_addref_locked(lpni);
4217 lnet_peer_ni_set_next_ping(lpni, now);
4219 CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4220 libcfs_nidstr(&lpni->lpni_nid),
4221 lpni->lpni_ping_count,
4222 lpni->lpni_next_ping,
4223 lpni->lpni_last_alive,
4224 atomic_read(&lpni->lpni_healthv));
4226 list_add_tail(&lpni->lpni_recovery, recovery_queue);
4229 /* Call with the ln_api_mutex held */
4231 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4233 struct lnet_peer_table *ptable;
4234 struct lnet_peer *lp;
4235 struct lnet_peer_net *lpn;
4236 struct lnet_peer_ni *lpni;
4241 if (the_lnet.ln_state != LNET_STATE_RUNNING)
4244 now = ktime_get_seconds();
4247 lnet_net_lock(LNET_LOCK_EX);
4248 lpni = lnet_find_peer_ni_locked(nid);
4250 lnet_net_unlock(LNET_LOCK_EX);
4253 lnet_set_lpni_healthv_locked(lpni, value);
4254 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4255 &the_lnet.ln_mt_peerNIRecovq, now);
4256 lnet_peer_ni_decref_locked(lpni);
4257 lnet_net_unlock(LNET_LOCK_EX);
4261 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4264 * Walk all the peers and reset the health value for each one to the
4267 lnet_net_lock(LNET_LOCK_EX);
4268 for (cpt = 0; cpt < lncpt; cpt++) {
4269 ptable = the_lnet.ln_peer_tables[cpt];
4270 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4271 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4272 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4274 lnet_set_lpni_healthv_locked(lpni,
4276 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4277 &the_lnet.ln_mt_peerNIRecovq, now);
4282 lnet_net_unlock(LNET_LOCK_EX);