4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
34 #define DEBUG_SUBSYSTEM S_LNET
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
40 #include <linux/uaccess.h>
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
158 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
164 INIT_LIST_HEAD(&lpni->lpni_txq);
165 INIT_LIST_HEAD(&lpni->lpni_hashlist);
166 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167 INIT_LIST_HEAD(&lpni->lpni_recovery);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171 kref_init(&lpni->lpni_kref);
172 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
174 spin_lock_init(&lpni->lpni_lock);
176 if (lnet_peers_start_down())
177 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
179 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181 lpni->lpni_nid = nid;
182 lpni->lpni_cpt = cpt;
183 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
185 net = lnet_get_net_locked(LNET_NIDNET(nid));
186 lpni->lpni_net = net;
188 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
194 * This peer_ni is not on a local network, so we
195 * cannot add the credits here. In case the net is
196 * added later, add the peer_ni to the remote peer ni
197 * list so it can be easily found and revisited.
199 /* FIXME: per-net implementation instead? */
200 lnet_peer_ni_addref_locked(lpni);
201 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202 &the_lnet.ln_remote_peer_ni_list);
205 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
213 struct lnet_peer_net *lpn;
215 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
219 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221 lpn->lpn_net_id = net_id;
222 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
224 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
232 struct lnet_peer *lp;
234 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
236 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237 LASSERT(list_empty(&lpn->lpn_peer_nis));
238 LASSERT(list_empty(&lpn->lpn_peer_nets));
240 lpn->lpn_peer = NULL;
241 LIBCFS_FREE(lpn, sizeof(*lpn));
243 lnet_peer_decref_locked(lp);
246 static struct lnet_peer *
247 lnet_peer_alloc(lnet_nid_t nid)
249 struct lnet_peer *lp;
251 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
255 INIT_LIST_HEAD(&lp->lp_rtrq);
256 INIT_LIST_HEAD(&lp->lp_routes);
257 INIT_LIST_HEAD(&lp->lp_peer_list);
258 INIT_LIST_HEAD(&lp->lp_peer_nets);
259 INIT_LIST_HEAD(&lp->lp_dc_list);
260 INIT_LIST_HEAD(&lp->lp_dc_pendq);
261 INIT_LIST_HEAD(&lp->lp_rtr_list);
262 init_waitqueue_head(&lp->lp_dc_waitq);
263 spin_lock_init(&lp->lp_lock);
264 lp->lp_primary_nid = nid;
265 lp->lp_disc_src_nid = LNET_NID_ANY;
266 lp->lp_disc_dst_nid = LNET_NID_ANY;
267 if (lnet_peers_start_down())
268 lp->lp_alive = false;
273 * all peers created on a router should have health on
274 * if it's not already on.
276 if (the_lnet.ln_routing && !lnet_health_sensitivity)
277 lp->lp_health_sensitivity = 1;
280 * Turn off discovery for loopback peer. If you're creating a peer
281 * for the loopback interface then that was initiated when we
282 * attempted to send a message over the loopback. There is no need
283 * to ever use a different interface when sending messages to
286 if (nid == LNET_NID_LO_0)
287 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
290 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
298 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
300 LASSERT(atomic_read(&lp->lp_refcount) == 0);
301 LASSERT(lp->lp_rtr_refcount == 0);
302 LASSERT(list_empty(&lp->lp_peer_nets));
303 LASSERT(list_empty(&lp->lp_peer_list));
304 LASSERT(list_empty(&lp->lp_dc_list));
307 lnet_ping_buffer_decref(lp->lp_data);
310 * if there are messages still on the pending queue, then make
311 * sure to queue them on the ln_msg_resend list so they can be
312 * resent at a later point if the discovery thread is still
314 * If the discovery thread has stopped, then the wakeup will be a
315 * no-op, and it is expected the lnet_shutdown_lndnets() will
316 * eventually be called, which will traverse this list and
317 * finalize the messages on the list.
318 * We can not resend them now because we're holding the cpt lock.
319 * Releasing the lock can cause an inconsistent state
321 spin_lock(&the_lnet.ln_msg_resend_lock);
322 spin_lock(&lp->lp_lock);
323 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324 spin_unlock(&lp->lp_lock);
325 spin_unlock(&the_lnet.ln_msg_resend_lock);
326 wake_up(&the_lnet.ln_dc_waitq);
328 LIBCFS_FREE(lp, sizeof(*lp));
332 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333 * that peer_net, detach the peer_net from the peer.
335 * Call with lnet_net_lock/EX held
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
340 struct lnet_peer_table *ptable;
341 struct lnet_peer_net *lpn;
342 struct lnet_peer *lp;
345 * Belts and suspenders: gracefully handle teardown of a
346 * partially connected peer_ni.
348 lpn = lpni->lpni_peer_net;
350 list_del_init(&lpni->lpni_peer_nis);
352 * If there are no lpni's left, we detach lpn from
353 * lp_peer_nets, so it cannot be found anymore.
355 if (list_empty(&lpn->lpn_peer_nis))
356 list_del_init(&lpn->lpn_peer_nets);
358 /* Update peer NID count. */
363 * If there are no more peer nets, make the peer unfindable
364 * via the peer_tables.
366 * Otherwise, if the peer is DISCOVERED, tell discovery to
367 * take another look at it. This is a no-op if discovery for
368 * this peer did the detaching.
370 if (list_empty(&lp->lp_peer_nets)) {
371 list_del_init(&lp->lp_peer_list);
372 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
374 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375 /* Discovery isn't running, nothing to do here. */
376 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377 lnet_peer_queue_for_discovery(lp);
378 wake_up(&the_lnet.ln_dc_waitq);
380 CDEBUG(D_NET, "peer %s NID %s\n",
381 libcfs_nid2str(lp->lp_primary_nid),
382 libcfs_nid2str(lpni->lpni_nid));
385 /* called with lnet_net_lock LNET_LOCK_EX held */
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
389 struct lnet_peer_table *ptable = NULL;
391 /* don't remove a peer_ni if it's also a gateway */
392 if (lnet_isrouter(lpni) && !force) {
393 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394 libcfs_nid2str(lpni->lpni_nid));
398 lnet_peer_remove_from_remote_list(lpni);
400 /* remove peer ni from the hash list. */
401 list_del_init(&lpni->lpni_hashlist);
404 * indicate the peer is being deleted so the monitor thread can
405 * remove it from the recovery queue.
407 spin_lock(&lpni->lpni_lock);
408 lpni->lpni_state |= LNET_PEER_NI_DELETING;
409 spin_unlock(&lpni->lpni_lock);
411 /* decrement the ref count on the peer table */
412 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
415 * The peer_ni can no longer be found with a lookup. But there
416 * can be current users, so keep track of it on the zombie
417 * list until the reference count has gone to zero.
419 * The last reference may be lost in a place where the
420 * lnet_net_lock locks only a single cpt, and that cpt may not
421 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
424 spin_lock(&ptable->pt_zombie_lock);
425 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426 ptable->pt_zombies++;
427 spin_unlock(&ptable->pt_zombie_lock);
429 /* no need to keep this peer_ni on the hierarchy anymore */
430 lnet_peer_detach_peer_ni_locked(lpni);
432 /* remove hashlist reference on peer_ni */
433 lnet_peer_ni_decref_locked(lpni);
438 void lnet_peer_uninit(void)
440 struct lnet_peer_ni *lpni, *tmp;
442 lnet_net_lock(LNET_LOCK_EX);
444 /* remove all peer_nis from the remote peer and the hash list */
445 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446 lpni_on_remote_peer_ni_list)
447 lnet_peer_ni_del_locked(lpni, false);
449 lnet_peer_tables_destroy();
451 lnet_net_unlock(LNET_LOCK_EX);
455 lnet_peer_del_locked(struct lnet_peer *peer)
457 struct lnet_peer_ni *lpni = NULL, *lpni2;
460 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
462 spin_lock(&peer->lp_lock);
463 peer->lp_state |= LNET_PEER_MARK_DELETED;
464 spin_unlock(&peer->lp_lock);
466 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467 while (lpni != NULL) {
468 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469 rc = lnet_peer_ni_del_locked(lpni, false);
479 * Discovering this peer is taking too long. Cancel any Ping or Push
480 * that discovery is waiting on by unlinking the relevant MDs. The
481 * lnet_discovery_event_handler() will proceed from here and complete
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
486 struct lnet_handle_md ping_mdh;
487 struct lnet_handle_md push_mdh;
489 LNetInvalidateMDHandle(&ping_mdh);
490 LNetInvalidateMDHandle(&push_mdh);
492 spin_lock(&lp->lp_lock);
493 if (lp->lp_state & LNET_PEER_PING_SENT) {
494 ping_mdh = lp->lp_ping_mdh;
495 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
497 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498 push_mdh = lp->lp_push_mdh;
499 LNetInvalidateMDHandle(&lp->lp_push_mdh);
501 spin_unlock(&lp->lp_lock);
503 if (!LNetMDHandleIsInvalid(ping_mdh))
504 LNetMDUnlink(ping_mdh);
505 if (!LNetMDHandleIsInvalid(push_mdh))
506 LNetMDUnlink(push_mdh);
510 lnet_peer_del(struct lnet_peer *peer)
512 lnet_peer_cancel_discovery(peer);
513 lnet_net_lock(LNET_LOCK_EX);
514 lnet_peer_del_locked(peer);
515 lnet_net_unlock(LNET_LOCK_EX);
521 * Delete a NID from a peer. Call with ln_api_mutex held.
524 * -EPERM: Non-DLC deletion from DLC-configured peer.
525 * -ENOENT: No lnet_peer_ni corresponding to the nid.
526 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
527 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
530 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
532 struct lnet_peer_ni *lpni;
533 lnet_nid_t primary_nid = lp->lp_primary_nid;
535 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
537 if (!(flags & LNET_PEER_CONFIGURED)) {
538 if (lp->lp_state & LNET_PEER_CONFIGURED) {
543 lpni = lnet_find_peer_ni_locked(nid);
548 lnet_peer_ni_decref_locked(lpni);
549 if (lp != lpni->lpni_peer_net->lpn_peer) {
555 * This function only allows deletion of the primary NID if it
558 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
563 lnet_net_lock(LNET_LOCK_EX);
565 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
566 struct lnet_peer_ni *lpni2;
567 /* assign the next peer_ni to be the primary */
568 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
570 lp->lp_primary_nid = lpni2->lpni_nid;
572 rc = lnet_peer_ni_del_locked(lpni, force);
574 lnet_net_unlock(LNET_LOCK_EX);
577 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
578 libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
584 lnet_peer_table_cleanup_locked(struct lnet_net *net,
585 struct lnet_peer_table *ptable)
588 struct lnet_peer_ni *next;
589 struct lnet_peer_ni *lpni;
590 struct lnet_peer *peer;
592 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
593 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
595 if (net != NULL && net != lpni->lpni_net)
598 peer = lpni->lpni_peer_net->lpn_peer;
599 if (peer->lp_primary_nid != lpni->lpni_nid) {
600 lnet_peer_ni_del_locked(lpni, false);
604 * Removing the primary NID implies removing
605 * the entire peer. Advance next beyond any
606 * peer_ni that belongs to the same peer.
608 list_for_each_entry_from(next, &ptable->pt_hash[i],
610 if (next->lpni_peer_net->lpn_peer != peer)
613 lnet_peer_del_locked(peer);
619 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
621 wait_var_event_warning(&ptable->pt_zombies,
622 ptable->pt_zombies == 0,
623 "Waiting for %d zombies on peer table\n",
628 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
629 struct lnet_peer_table *ptable)
631 struct lnet_peer_ni *lp;
632 struct lnet_peer_ni *tmp;
636 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
637 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
639 if (net != lp->lpni_net)
642 if (!lnet_isrouter(lp))
645 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
647 lnet_net_unlock(LNET_LOCK_EX);
648 lnet_del_route(LNET_NET_ANY, gw_nid);
649 lnet_net_lock(LNET_LOCK_EX);
655 lnet_peer_tables_cleanup(struct lnet_net *net)
658 struct lnet_peer_table *ptable;
660 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
661 /* If just deleting the peers for a NI, get rid of any routes these
662 * peers are gateways for. */
663 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
664 lnet_net_lock(LNET_LOCK_EX);
665 lnet_peer_table_del_rtrs_locked(net, ptable);
666 lnet_net_unlock(LNET_LOCK_EX);
669 /* Start the cleanup process */
670 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
671 lnet_net_lock(LNET_LOCK_EX);
672 lnet_peer_table_cleanup_locked(net, ptable);
673 lnet_net_unlock(LNET_LOCK_EX);
676 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
677 lnet_peer_ni_finalize_wait(ptable);
680 static struct lnet_peer_ni *
681 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
683 struct list_head *peers;
684 struct lnet_peer_ni *lp;
686 if (the_lnet.ln_state != LNET_STATE_RUNNING)
689 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
690 list_for_each_entry(lp, peers, lpni_hashlist) {
691 if (lp->lpni_nid == nid) {
692 lnet_peer_ni_addref_locked(lp);
700 struct lnet_peer_ni *
701 lnet_find_peer_ni_locked(lnet_nid_t nid)
703 struct lnet_peer_ni *lpni;
704 struct lnet_peer_table *ptable;
707 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
709 ptable = the_lnet.ln_peer_tables[cpt];
710 lpni = lnet_get_peer_ni_locked(ptable, nid);
715 struct lnet_peer_ni *
716 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
718 struct lnet_peer_net *lpn;
719 struct lnet_peer_ni *lpni;
721 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
725 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
726 if (lpni->lpni_nid == nid)
734 lnet_find_peer(lnet_nid_t nid)
736 struct lnet_peer_ni *lpni;
737 struct lnet_peer *lp = NULL;
740 cpt = lnet_net_lock_current();
741 lpni = lnet_find_peer_ni_locked(nid);
743 lp = lpni->lpni_peer_net->lpn_peer;
744 lnet_peer_addref_locked(lp);
745 lnet_peer_ni_decref_locked(lpni);
747 lnet_net_unlock(cpt);
752 struct lnet_peer_net *
753 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
755 struct lnet_peer_net *net;
758 /* no net id provided return the first net */
759 net = list_first_entry_or_null(&lp->lp_peer_nets,
760 struct lnet_peer_net,
766 /* find the net after the one provided */
767 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
768 if (net->lpn_net_id == prev_lpn_id) {
770 * if we reached the end of the list loop to the
773 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
774 return list_first_entry_or_null(&lp->lp_peer_nets,
775 struct lnet_peer_net,
778 return list_next_entry(net, lpn_peer_nets);
785 struct lnet_peer_ni *
786 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
787 struct lnet_peer_net *peer_net,
788 struct lnet_peer_ni *prev)
790 struct lnet_peer_ni *lpni;
791 struct lnet_peer_net *net = peer_net;
795 if (list_empty(&peer->lp_peer_nets))
798 net = list_entry(peer->lp_peer_nets.next,
799 struct lnet_peer_net,
802 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
808 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
810 * if you reached the end of the peer ni list and the peer
811 * net is specified then there are no more peer nis in that
818 * we reached the end of this net ni list. move to the
821 if (prev->lpni_peer_net->lpn_peer_nets.next ==
823 /* no more nets and no more NIs. */
826 /* get the next net */
827 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
828 struct lnet_peer_net,
830 /* get the ni on it */
831 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
837 /* there are more nis left */
838 lpni = list_entry(prev->lpni_peer_nis.next,
839 struct lnet_peer_ni, lpni_peer_nis);
844 /* Call with the ln_api_mutex held */
845 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
847 struct lnet_process_id id;
848 struct lnet_peer_table *ptable;
849 struct lnet_peer *lp;
858 if (the_lnet.ln_state != LNET_STATE_RUNNING)
861 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
864 * Count the number of peers, and return E2BIG if the buffer
865 * is too small. We'll also return the desired size.
868 for (cpt = 0; cpt < lncpt; cpt++) {
869 ptable = the_lnet.ln_peer_tables[cpt];
870 count += ptable->pt_peers;
872 size = count * sizeof(*ids);
877 * Walk the peer lists and copy out the primary nids.
878 * This is safe because the peer lists are only modified
879 * while the ln_api_mutex is held. So we don't need to
880 * hold the lnet_net_lock as well, and can therefore
881 * directly call copy_to_user().
884 memset(&id, 0, sizeof(id));
885 id.pid = LNET_PID_LUSTRE;
887 for (cpt = 0; cpt < lncpt; cpt++) {
888 ptable = the_lnet.ln_peer_tables[cpt];
889 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
892 id.nid = lp->lp_primary_nid;
893 if (copy_to_user(&ids[i], &id, sizeof(id)))
906 * Start pushes to peers that need to be updated for a configuration
907 * change on this node.
910 lnet_push_update_to_peers(int force)
912 struct lnet_peer_table *ptable;
913 struct lnet_peer *lp;
917 lnet_net_lock(LNET_LOCK_EX);
918 if (lnet_peer_discovery_disabled)
920 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
921 for (cpt = 0; cpt < lncpt; cpt++) {
922 ptable = the_lnet.ln_peer_tables[cpt];
923 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
925 spin_lock(&lp->lp_lock);
926 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
927 lp->lp_state |= LNET_PEER_FORCE_PUSH;
928 spin_unlock(&lp->lp_lock);
930 if (lnet_peer_needs_push(lp))
931 lnet_peer_queue_for_discovery(lp);
934 lnet_net_unlock(LNET_LOCK_EX);
935 wake_up(&the_lnet.ln_dc_waitq);
938 /* find the NID in the preferred gateways for the remote peer
940 * false: list is not empty and NID is not preferred
941 * false: list is empty
942 * true: nid is found in the list
945 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
948 struct lnet_nid_list *ne;
950 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
951 libcfs_nid2str(lpni->lpni_nid),
952 list_empty(&lpni->lpni_rtr_pref_nids));
954 if (list_empty(&lpni->lpni_rtr_pref_nids))
957 /* iterate through all the preferred NIDs and see if any of them
958 * matches the provided gw_nid
960 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
961 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
962 libcfs_nid2str(ne->nl_nid),
963 libcfs_nid2str(gw_nid));
964 if (ne->nl_nid == gw_nid)
972 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
974 struct list_head zombies;
975 struct lnet_nid_list *ne;
976 struct lnet_nid_list *tmp;
977 int cpt = lpni->lpni_cpt;
979 INIT_LIST_HEAD(&zombies);
982 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
983 lnet_net_unlock(cpt);
985 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
986 list_del(&ne->nl_list);
987 LIBCFS_FREE(ne, sizeof(*ne));
992 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
995 int cpt = lpni->lpni_cpt;
996 struct lnet_nid_list *ne = NULL;
998 /* This function is called with api_mutex held. When the api_mutex
999 * is held the list can not be modified, as it is only modified as
1000 * a result of applying a UDSP and that happens under api_mutex
1003 __must_hold(&the_lnet.ln_api_mutex);
1005 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1006 if (ne->nl_nid == gw_nid)
1010 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1014 ne->nl_nid = gw_nid;
1016 /* Lock the cpt to protect against addition and checks in the
1017 * selection algorithm
1020 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1021 lnet_net_unlock(cpt);
1027 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1028 * this is a preferred point-to-point path. Call with lnet_net_lock in
1032 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1034 struct lnet_nid_list *ne;
1036 if (lpni->lpni_pref_nnids == 0)
1038 if (lpni->lpni_pref_nnids == 1)
1039 return lpni->lpni_pref.nid == nid;
1040 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1041 if (ne->nl_nid == nid)
1048 * Set a single ni as preferred, provided no preferred ni is already
1049 * defined. Only to be used for non-multi-rail peer_ni.
1052 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1056 spin_lock(&lpni->lpni_lock);
1057 if (nid == LNET_NID_ANY) {
1059 } else if (lpni->lpni_pref_nnids > 0) {
1061 } else if (lpni->lpni_pref_nnids == 0) {
1062 lpni->lpni_pref.nid = nid;
1063 lpni->lpni_pref_nnids = 1;
1064 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1066 spin_unlock(&lpni->lpni_lock);
1068 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1069 libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1074 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1075 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1078 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1082 spin_lock(&lpni->lpni_lock);
1083 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1084 lpni->lpni_pref_nnids = 0;
1085 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1086 } else if (lpni->lpni_pref_nnids == 0) {
1091 spin_unlock(&lpni->lpni_lock);
1093 CDEBUG(D_NET, "peer %s: %d\n",
1094 libcfs_nid2str(lpni->lpni_nid), rc);
1099 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1101 lpni->lpni_sel_priority = priority;
1105 * Clear the preferred NIDs from a non-multi-rail peer.
1108 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1110 struct lnet_peer_ni *lpni = NULL;
1112 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1113 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1117 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1119 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1120 struct lnet_nid_list *ne1 = NULL;
1121 struct lnet_nid_list *ne2 = NULL;
1122 lnet_nid_t tmp_nid = LNET_NID_ANY;
1125 if (nid == LNET_NID_ANY) {
1130 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1135 /* A non-MR node may have only one preferred NI per peer_ni */
1136 if (lpni->lpni_pref_nnids > 0 &&
1137 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1142 /* add the new preferred nid to the list of preferred nids */
1143 if (lpni->lpni_pref_nnids != 0) {
1144 size_t alloc_size = sizeof(*ne1);
1146 if (lpni->lpni_pref_nnids == 1) {
1147 tmp_nid = lpni->lpni_pref.nid;
1148 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1151 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1152 if (ne1->nl_nid == nid) {
1158 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1165 /* move the originally stored nid to the list */
1166 if (lpni->lpni_pref_nnids == 1) {
1167 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1168 lpni->lpni_cpt, alloc_size);
1173 INIT_LIST_HEAD(&ne2->nl_list);
1174 ne2->nl_nid = tmp_nid;
1179 lnet_net_lock(LNET_LOCK_EX);
1180 spin_lock(&lpni->lpni_lock);
1181 if (lpni->lpni_pref_nnids == 0) {
1182 lpni->lpni_pref.nid = nid;
1185 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1186 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1188 lpni->lpni_pref_nnids++;
1189 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1190 spin_unlock(&lpni->lpni_lock);
1191 lnet_net_unlock(LNET_LOCK_EX);
1194 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1195 spin_lock(&lpni->lpni_lock);
1196 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1197 spin_unlock(&lpni->lpni_lock);
1199 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1200 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1205 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1207 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1208 struct lnet_nid_list *ne = NULL;
1211 if (lpni->lpni_pref_nnids == 0) {
1216 if (lpni->lpni_pref_nnids == 1) {
1217 if (lpni->lpni_pref.nid != nid) {
1222 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1223 if (ne->nl_nid == nid)
1224 goto remove_nid_entry;
1232 lnet_net_lock(LNET_LOCK_EX);
1233 spin_lock(&lpni->lpni_lock);
1234 if (lpni->lpni_pref_nnids == 1)
1235 lpni->lpni_pref.nid = LNET_NID_ANY;
1237 list_del_init(&ne->nl_list);
1238 if (lpni->lpni_pref_nnids == 2) {
1239 struct lnet_nid_list *ne, *tmp;
1241 list_for_each_entry_safe(ne, tmp,
1242 &lpni->lpni_pref.nids,
1244 lpni->lpni_pref.nid = ne->nl_nid;
1245 list_del_init(&ne->nl_list);
1246 LIBCFS_FREE(ne, sizeof(*ne));
1250 lpni->lpni_pref_nnids--;
1251 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1252 spin_unlock(&lpni->lpni_lock);
1253 lnet_net_unlock(LNET_LOCK_EX);
1256 LIBCFS_FREE(ne, sizeof(*ne));
1258 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1259 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1264 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1266 struct list_head zombies;
1267 struct lnet_nid_list *ne;
1268 struct lnet_nid_list *tmp;
1270 INIT_LIST_HEAD(&zombies);
1272 lnet_net_lock(LNET_LOCK_EX);
1273 if (lpni->lpni_pref_nnids == 1)
1274 lpni->lpni_pref.nid = LNET_NID_ANY;
1275 else if (lpni->lpni_pref_nnids > 1)
1276 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1277 lpni->lpni_pref_nnids = 0;
1278 lnet_net_unlock(LNET_LOCK_EX);
1280 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1281 list_del_init(&ne->nl_list);
1282 LIBCFS_FREE(ne, sizeof(*ne));
1287 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1289 struct lnet_peer_ni *lpni;
1290 lnet_nid_t primary_nid = nid;
1292 lpni = lnet_find_peer_ni_locked(nid);
1294 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1295 lnet_peer_ni_decref_locked(lpni);
1302 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1303 __must_hold(&lp->lp_lock)
1305 if (lnet_peer_discovery_disabled)
1308 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1309 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1320 lnet_is_discovery_disabled(struct lnet_peer *lp)
1324 spin_lock(&lp->lp_lock);
1325 rc = lnet_is_discovery_disabled_locked(lp);
1326 spin_unlock(&lp->lp_lock);
1332 LNetPrimaryNID(lnet_nid_t nid)
1334 struct lnet_peer *lp;
1335 struct lnet_peer_ni *lpni;
1336 lnet_nid_t primary_nid = nid;
1340 if (nid == LNET_NID_LO_0)
1341 return LNET_NID_LO_0;
1343 cpt = lnet_net_lock_current();
1344 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1349 lp = lpni->lpni_peer_net->lpn_peer;
1351 /* If discovery is disabled locally then we needn't bother running
1352 * discovery here because discovery will not modify whatever
1353 * primary NID is currently set for this peer. If the specified peer is
1354 * down then this discovery can introduce long delays into the mount
1355 * process, so skip it if it isn't necessary.
1357 while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1358 spin_lock(&lp->lp_lock);
1359 /* force a full discovery cycle */
1360 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1361 spin_unlock(&lp->lp_lock);
1363 rc = lnet_discover_peer_locked(lpni, cpt, true);
1366 /* The lpni (or lp) for this NID may have changed and our ref is
1367 * the only thing keeping the old one around. Release the ref
1368 * and lookup the lpni again
1370 lnet_peer_ni_decref_locked(lpni);
1371 lpni = lnet_find_peer_ni_locked(nid);
1376 lp = lpni->lpni_peer_net->lpn_peer;
1378 /* If we find that the peer has discovery disabled then we will
1379 * not modify whatever primary NID is currently set for this
1380 * peer. Thus, we can break out of this loop even if the peer
1381 * is not fully up to date.
1383 if (lnet_is_discovery_disabled(lp))
1386 primary_nid = lp->lp_primary_nid;
1388 lnet_peer_ni_decref_locked(lpni);
1390 lnet_net_unlock(cpt);
1392 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1393 libcfs_nid2str(primary_nid), rc);
1396 EXPORT_SYMBOL(LNetPrimaryNID);
1398 struct lnet_peer_net *
1399 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1401 struct lnet_peer_net *peer_net;
1402 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1403 if (peer_net->lpn_net_id == net_id)
1410 * Attach a peer_ni to a peer_net and peer. This function assumes
1411 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1412 * may be attached to a different peer, in which case it will be
1413 * properly detached first. The whole operation is done atomically.
1415 * This function consumes the reference on lpni and Always returns 0.
1416 * This is the last function called from functions that do return an
1417 * int, so returning 0 here allows the compiler to do a tail call.
1420 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1421 struct lnet_peer_net *lpn,
1422 struct lnet_peer_ni *lpni,
1425 struct lnet_peer_table *ptable;
1426 bool new_lpn = false;
1429 /* Install the new peer_ni */
1430 lnet_net_lock(LNET_LOCK_EX);
1431 /* Add peer_ni to global peer table hash, if necessary. */
1432 if (list_empty(&lpni->lpni_hashlist)) {
1433 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1435 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1436 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1437 ptable->pt_version++;
1438 lnet_peer_ni_addref_locked(lpni);
1441 /* Detach the peer_ni from an existing peer, if necessary. */
1442 if (lpni->lpni_peer_net) {
1443 LASSERT(lpni->lpni_peer_net != lpn);
1444 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1445 lnet_peer_detach_peer_ni_locked(lpni);
1446 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1447 lpni->lpni_peer_net = NULL;
1450 /* Add peer_ni to peer_net */
1451 lpni->lpni_peer_net = lpn;
1452 if (lp->lp_primary_nid == lpni->lpni_nid)
1453 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1455 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1456 lnet_update_peer_net_healthv(lpni);
1457 lnet_peer_net_addref_locked(lpn);
1459 /* Add peer_net to peer */
1460 if (!lpn->lpn_peer) {
1463 if (lp->lp_primary_nid == lpni->lpni_nid)
1464 list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1466 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1467 lnet_peer_addref_locked(lp);
1470 /* Add peer to global peer list, if necessary */
1471 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1472 if (list_empty(&lp->lp_peer_list)) {
1473 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1478 /* Update peer state */
1479 spin_lock(&lp->lp_lock);
1480 if (flags & LNET_PEER_CONFIGURED) {
1481 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1482 lp->lp_state |= LNET_PEER_CONFIGURED;
1484 if (flags & LNET_PEER_MULTI_RAIL) {
1485 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1486 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1487 lnet_peer_clr_non_mr_pref_nids(lp);
1490 spin_unlock(&lp->lp_lock);
1496 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1498 CERROR("Failed to apply UDSPs on lpn %s\n",
1499 libcfs_net2str(lpn->lpn_net_id));
1501 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1503 CERROR("Failed to apply UDSPs on lpni %s\n",
1504 libcfs_nid2str(lpni->lpni_nid));
1506 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1507 libcfs_nid2str(lp->lp_primary_nid),
1508 libcfs_nid2str(lpni->lpni_nid), flags);
1509 lnet_peer_ni_decref_locked(lpni);
1510 lnet_net_unlock(LNET_LOCK_EX);
1516 * Create a new peer, with nid as its primary nid.
1518 * Call with the lnet_api_mutex held.
1521 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1523 struct lnet_peer *lp;
1524 struct lnet_peer_net *lpn;
1525 struct lnet_peer_ni *lpni;
1528 LASSERT(nid != LNET_NID_ANY);
1531 * No need for the lnet_net_lock here, because the
1532 * lnet_api_mutex is held.
1534 lpni = lnet_find_peer_ni_locked(nid);
1536 /* A peer with this NID already exists. */
1537 lp = lpni->lpni_peer_net->lpn_peer;
1538 lnet_peer_ni_decref_locked(lpni);
1540 * This is an error if the peer was configured and the
1541 * primary NID differs or an attempt is made to change
1542 * the Multi-Rail flag. Otherwise the assumption is
1543 * that an existing peer is being modified.
1545 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1546 if (lp->lp_primary_nid != nid)
1548 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1552 /* Delete and recreate as a configured peer. */
1556 /* Create peer, peer_net, and peer_ni. */
1558 lp = lnet_peer_alloc(nid);
1561 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1564 lpni = lnet_peer_ni_alloc(nid);
1568 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1571 LIBCFS_FREE(lpn, sizeof(*lpn));
1573 LIBCFS_FREE(lp, sizeof(*lp));
1575 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1576 libcfs_nid2str(nid), flags, rc);
1581 * Add a NID to a peer. Call with ln_api_mutex held.
1584 * -EPERM: Non-DLC addition to a DLC-configured peer.
1585 * -EEXIST: The NID was configured by DLC for a different peer.
1586 * -ENOMEM: Out of memory.
1587 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1588 * non-multi-rail peer.
1591 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1593 struct lnet_peer_net *lpn;
1594 struct lnet_peer_ni *lpni;
1598 LASSERT(nid != LNET_NID_ANY);
1600 /* A configured peer can only be updated through configuration. */
1601 if (!(flags & LNET_PEER_CONFIGURED)) {
1602 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1609 * The MULTI_RAIL flag can be set but not cleared, because
1610 * that would leave the peer struct in an invalid state.
1612 if (flags & LNET_PEER_MULTI_RAIL) {
1613 spin_lock(&lp->lp_lock);
1614 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1615 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1616 lnet_peer_clr_non_mr_pref_nids(lp);
1618 spin_unlock(&lp->lp_lock);
1619 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1624 lpni = lnet_find_peer_ni_locked(nid);
1627 * A peer_ni already exists. This is only a problem if
1628 * it is not connected to this peer and was configured
1631 if (lpni->lpni_peer_net->lpn_peer == lp)
1633 if (lnet_peer_ni_is_configured(lpni)) {
1637 /* If this is the primary NID, destroy the peer. */
1638 if (lnet_peer_ni_is_primary(lpni)) {
1639 struct lnet_peer *rtr_lp =
1640 lpni->lpni_peer_net->lpn_peer;
1641 int rtr_refcount = rtr_lp->lp_rtr_refcount;
1643 * if we're trying to delete a router it means
1644 * we're moving this peer NI to a new peer so must
1645 * transfer router properties to the new peer
1647 if (rtr_refcount > 0) {
1648 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1649 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1651 lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1652 lnet_peer_ni_decref_locked(lpni);
1653 lpni = lnet_peer_ni_alloc(nid);
1660 lpni = lnet_peer_ni_alloc(nid);
1668 * Get the peer_net. Check that we're not adding a second
1669 * peer_ni on a peer_net of a non-multi-rail peer.
1671 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1673 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1678 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1683 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1686 lnet_peer_ni_decref_locked(lpni);
1688 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1689 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1695 * Update the primary NID of a peer, if possible.
1697 * Call with the lnet_api_mutex held.
1700 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1702 lnet_nid_t old = lp->lp_primary_nid;
1705 if (lp->lp_primary_nid == nid)
1708 lp->lp_primary_nid = nid;
1710 rc = lnet_peer_add_nid(lp, nid, flags);
1712 lp->lp_primary_nid = old;
1716 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1717 libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1722 * lpni creation initiated due to traffic either sending or receiving.
1725 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1727 struct lnet_peer *lp;
1728 struct lnet_peer_net *lpn;
1729 struct lnet_peer_ni *lpni;
1733 if (nid == LNET_NID_ANY) {
1738 /* lnet_net_lock is not needed here because ln_api_lock is held */
1739 lpni = lnet_find_peer_ni_locked(nid);
1742 * We must have raced with another thread. Since we
1743 * know next to nothing about a peer_ni created by
1744 * traffic, we just assume everything is ok and
1747 lnet_peer_ni_decref_locked(lpni);
1751 /* Create peer, peer_net, and peer_ni. */
1753 lp = lnet_peer_alloc(nid);
1756 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1759 lpni = lnet_peer_ni_alloc(nid);
1762 if (pref != LNET_NID_ANY)
1763 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1765 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1768 LIBCFS_FREE(lpn, sizeof(*lpn));
1770 LIBCFS_FREE(lp, sizeof(*lp));
1772 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1777 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1779 * This API handles the following combinations:
1780 * Create a peer with its primary NI if only the prim_nid is provided
1781 * Add a NID to a peer identified by the prim_nid. The peer identified
1782 * by the prim_nid must already exist.
1783 * The peer being created may be non-MR.
1785 * The caller must hold ln_api_mutex. This prevents the peer from
1786 * being created/modified/deleted by a different thread.
1789 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1791 struct lnet_peer *lp = NULL;
1792 struct lnet_peer_ni *lpni;
1795 /* The prim_nid must always be specified */
1796 if (prim_nid == LNET_NID_ANY)
1799 flags = LNET_PEER_CONFIGURED;
1801 flags |= LNET_PEER_MULTI_RAIL;
1804 * If nid isn't specified, we must create a new peer with
1805 * prim_nid as its primary nid.
1807 if (nid == LNET_NID_ANY)
1808 return lnet_peer_add(prim_nid, flags);
1810 /* Look up the prim_nid, which must exist. */
1811 lpni = lnet_find_peer_ni_locked(prim_nid);
1814 lnet_peer_ni_decref_locked(lpni);
1815 lp = lpni->lpni_peer_net->lpn_peer;
1817 /* Peer must have been configured. */
1818 if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1819 CDEBUG(D_NET, "peer %s was not configured\n",
1820 libcfs_nid2str(prim_nid));
1824 /* Primary NID must match */
1825 if (lp->lp_primary_nid != prim_nid) {
1826 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1827 libcfs_nid2str(prim_nid),
1828 libcfs_nid2str(lp->lp_primary_nid));
1832 /* Multi-Rail flag must match. */
1833 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1834 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1835 libcfs_nid2str(prim_nid));
1839 return lnet_peer_add_nid(lp, nid, flags);
1843 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1845 * This API handles the following combinations:
1846 * Delete a NI from a peer if both prim_nid and nid are provided.
1847 * Delete a peer if only prim_nid is provided.
1848 * Delete a peer if its primary nid is provided.
1850 * The caller must hold ln_api_mutex. This prevents the peer from
1851 * being modified/deleted by a different thread.
1854 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1856 struct lnet_peer *lp;
1857 struct lnet_peer_ni *lpni;
1860 if (prim_nid == LNET_NID_ANY)
1863 lpni = lnet_find_peer_ni_locked(prim_nid);
1866 lnet_peer_ni_decref_locked(lpni);
1867 lp = lpni->lpni_peer_net->lpn_peer;
1869 if (prim_nid != lp->lp_primary_nid) {
1870 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1871 libcfs_nid2str(prim_nid),
1872 libcfs_nid2str(lp->lp_primary_nid));
1876 lnet_net_lock(LNET_LOCK_EX);
1877 if (lp->lp_rtr_refcount > 0) {
1878 lnet_net_unlock(LNET_LOCK_EX);
1879 CERROR("%s is a router. Can not be deleted\n",
1880 libcfs_nid2str(prim_nid));
1883 lnet_net_unlock(LNET_LOCK_EX);
1885 if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1886 return lnet_peer_del(lp);
1888 flags = LNET_PEER_CONFIGURED;
1889 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1890 flags |= LNET_PEER_MULTI_RAIL;
1892 return lnet_peer_del_nid(lp, nid, flags);
1896 lnet_destroy_peer_ni_locked(struct kref *ref)
1898 struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1900 struct lnet_peer_table *ptable;
1901 struct lnet_peer_net *lpn;
1903 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1905 LASSERT(kref_read(&lpni->lpni_kref) == 0);
1906 LASSERT(list_empty(&lpni->lpni_txq));
1907 LASSERT(lpni->lpni_txqnob == 0);
1908 LASSERT(list_empty(&lpni->lpni_peer_nis));
1909 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1911 lpn = lpni->lpni_peer_net;
1912 lpni->lpni_peer_net = NULL;
1913 lpni->lpni_net = NULL;
1915 if (!list_empty(&lpni->lpni_hashlist)) {
1916 /* remove the peer ni from the zombie list */
1917 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1918 spin_lock(&ptable->pt_zombie_lock);
1919 list_del_init(&lpni->lpni_hashlist);
1920 ptable->pt_zombies--;
1921 spin_unlock(&ptable->pt_zombie_lock);
1924 if (lpni->lpni_pref_nnids > 1) {
1925 struct lnet_nid_list *ne, *tmp;
1927 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1929 list_del_init(&ne->nl_list);
1930 LIBCFS_FREE(ne, sizeof(*ne));
1933 LIBCFS_FREE(lpni, sizeof(*lpni));
1936 lnet_peer_net_decref_locked(lpn);
1939 struct lnet_peer_ni *
1940 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1942 struct lnet_peer_ni *lpni = NULL;
1945 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1946 return ERR_PTR(-ESHUTDOWN);
1949 * find if a peer_ni already exists.
1950 * If so then just return that.
1952 lpni = lnet_find_peer_ni_locked(nid);
1956 lnet_net_unlock(cpt);
1958 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1961 goto out_net_relock;
1964 lpni = lnet_find_peer_ni_locked(nid);
1974 * Get a peer_ni for the given nid, create it if necessary. Takes a
1975 * hold on the peer_ni.
1977 struct lnet_peer_ni *
1978 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1980 struct lnet_peer_ni *lpni = NULL;
1983 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1984 return ERR_PTR(-ESHUTDOWN);
1987 * find if a peer_ni already exists.
1988 * If so then just return that.
1990 lpni = lnet_find_peer_ni_locked(nid);
1996 * use the lnet_api_mutex to serialize the creation of the peer_ni
1997 * and the creation/deletion of the local ni/net. When a local ni is
1998 * created, if there exists a set of peer_nis on that network,
1999 * they need to be traversed and updated. When a local NI is
2000 * deleted, which could result in a network being deleted, then
2001 * all peer nis on that network need to be removed as well.
2003 * Creation through traffic should also be serialized with
2004 * creation through DLC.
2006 lnet_net_unlock(cpt);
2007 mutex_lock(&the_lnet.ln_api_mutex);
2009 * Shutdown is only set under the ln_api_lock, so a single
2010 * check here is sufficent.
2012 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2013 lpni = ERR_PTR(-ESHUTDOWN);
2014 goto out_mutex_unlock;
2017 rc = lnet_peer_ni_traffic_add(nid, pref);
2020 goto out_mutex_unlock;
2023 lpni = lnet_find_peer_ni_locked(nid);
2027 mutex_unlock(&the_lnet.ln_api_mutex);
2030 /* Lock has been dropped, check again for shutdown. */
2031 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2033 lnet_peer_ni_decref_locked(lpni);
2034 lpni = ERR_PTR(-ESHUTDOWN);
2041 lnet_peer_gw_discovery(struct lnet_peer *lp)
2045 spin_lock(&lp->lp_lock);
2046 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2048 spin_unlock(&lp->lp_lock);
2054 lnet_peer_is_uptodate(struct lnet_peer *lp)
2058 spin_lock(&lp->lp_lock);
2059 rc = lnet_peer_is_uptodate_locked(lp);
2060 spin_unlock(&lp->lp_lock);
2065 * Is a peer uptodate from the point of view of discovery?
2067 * If it is currently being processed, obviously not.
2068 * A forced Ping or Push is also handled by the discovery thread.
2070 * Otherwise look at whether the peer needs rediscovering.
2073 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2074 __must_hold(&lp->lp_lock)
2078 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2079 LNET_PEER_FORCE_PING |
2080 LNET_PEER_FORCE_PUSH)) {
2082 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2084 } else if (lnet_peer_needs_push(lp)) {
2086 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2087 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2098 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2100 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2102 /* The discovery thread holds net_lock/EX and lp_lock when it splices
2103 * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2104 * when adding to the list and queuing the peer to ensure that we do not
2105 * strand any messages on the lp_dc_pendq. This scheme ensures the
2106 * message will be resent even if the peer is already being discovered.
2107 * Therefore we needn't check the return value of
2108 * lnet_peer_queue_for_discovery(lp).
2110 lnet_net_lock(LNET_LOCK_EX);
2111 spin_lock(&lp->lp_lock);
2112 list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2113 spin_unlock(&lp->lp_lock);
2114 lnet_peer_queue_for_discovery(lp);
2115 lnet_net_unlock(LNET_LOCK_EX);
2119 * Queue a peer for the attention of the discovery thread. Call with
2120 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2121 * -EALREADY if the peer was already queued.
2123 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2127 spin_lock(&lp->lp_lock);
2128 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2129 lp->lp_state |= LNET_PEER_DISCOVERING;
2130 spin_unlock(&lp->lp_lock);
2131 if (list_empty(&lp->lp_dc_list)) {
2132 lnet_peer_addref_locked(lp);
2133 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2134 wake_up(&the_lnet.ln_dc_waitq);
2140 CDEBUG(D_NET, "Queue peer %s: %d\n",
2141 libcfs_nid2str(lp->lp_primary_nid), rc);
2147 * Discovery of a peer is complete. Wake all waiters on the peer.
2148 * Call with lnet_net_lock/EX held.
2150 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2152 struct lnet_msg *msg, *tmp;
2154 LIST_HEAD(pending_msgs);
2156 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2157 libcfs_nid2str(lp->lp_primary_nid));
2159 list_del_init(&lp->lp_dc_list);
2160 spin_lock(&lp->lp_lock);
2161 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2162 spin_unlock(&lp->lp_lock);
2163 wake_up(&lp->lp_dc_waitq);
2165 if (lp->lp_rtr_refcount > 0)
2166 lnet_router_discovery_complete(lp);
2168 lnet_net_unlock(LNET_LOCK_EX);
2170 /* iterate through all pending messages and send them again */
2171 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2172 list_del_init(&msg->msg_list);
2173 if (lp->lp_dc_error) {
2174 lnet_finalize(msg, lp->lp_dc_error);
2178 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2179 lnet_msgtyp2str(msg->msg_type),
2180 libcfs_id2str(msg->msg_target));
2181 rc = lnet_send(msg->msg_src_nid_param, msg,
2182 msg->msg_rtr_nid_param);
2184 CNETERR("Error sending %s to %s: %d\n",
2185 lnet_msgtyp2str(msg->msg_type),
2186 libcfs_id2str(msg->msg_target), rc);
2187 lnet_finalize(msg, rc);
2190 lnet_net_lock(LNET_LOCK_EX);
2191 lnet_peer_decref_locked(lp);
2195 * Handle inbound push.
2196 * Like any event handler, called with lnet_res_lock/CPT held.
2198 void lnet_peer_push_event(struct lnet_event *ev)
2200 struct lnet_ping_buffer *pbuf;
2201 struct lnet_peer *lp;
2203 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2205 /* lnet_find_peer() adds a refcount */
2206 lp = lnet_find_peer(ev->source.nid);
2208 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2209 libcfs_nid2str(ev->initiator.nid),
2210 libcfs_nid2str(ev->source.nid));
2211 pbuf->pb_needs_post = true;
2215 /* Ensure peer state remains consistent while we modify it. */
2216 spin_lock(&lp->lp_lock);
2219 * If some kind of error happened the contents of the message
2220 * cannot be used. Clear the NIDS_UPTODATE and set the
2221 * FORCE_PING flag to trigger a ping.
2224 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2225 lp->lp_state |= LNET_PEER_FORCE_PING;
2226 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2228 libcfs_nid2str(lp->lp_primary_nid),
2229 libcfs_nid2str(ev->source.nid));
2234 * A push with invalid or corrupted info. Clear the UPTODATE
2235 * flag to trigger a ping.
2237 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2238 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2239 lp->lp_state |= LNET_PEER_FORCE_PING;
2240 CDEBUG(D_NET, "Corrupted Push from %s\n",
2241 libcfs_nid2str(lp->lp_primary_nid));
2246 * Make sure we'll allocate the correct size ping buffer when
2249 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2250 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2253 * A non-Multi-Rail peer is not supposed to be capable of
2256 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2257 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2258 libcfs_nid2str(lp->lp_primary_nid));
2263 * The peer may have discovery disabled at its end. Set
2264 * NO_DISCOVERY as appropriate.
2266 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2267 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2268 libcfs_nid2str(lp->lp_primary_nid));
2270 * Mark the peer for deletion if we already know about it
2271 * and it's going from discovery set to no discovery set
2273 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2274 LNET_PEER_DISCOVERING)) &&
2275 lp->lp_state & LNET_PEER_DISCOVERED) {
2276 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2277 libcfs_nid2str(lp->lp_primary_nid),
2279 lp->lp_state |= LNET_PEER_MARK_DELETION;
2281 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2282 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2283 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2284 libcfs_nid2str(lp->lp_primary_nid));
2285 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2289 * Update the MULTI_RAIL flag based on the push. If the peer
2290 * was configured with DLC then the setting should match what
2292 * NB: We verified above that the MR feature bit is set in pi_features
2294 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2295 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2296 libcfs_nid2str(lp->lp_primary_nid), lp);
2297 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2298 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2299 libcfs_nid2str(lp->lp_primary_nid));
2300 } else if (lnet_peer_discovery_disabled) {
2301 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2302 libcfs_nid2str(lp->lp_primary_nid), lp);
2303 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2304 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2305 libcfs_nid2str(lp->lp_primary_nid), lp);
2307 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2308 libcfs_nid2str(lp->lp_primary_nid), lp);
2309 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2310 lnet_peer_clr_non_mr_pref_nids(lp);
2314 * Check for truncation of the Put message. Clear the
2315 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2316 * and tell discovery to allocate a bigger buffer.
2318 if (ev->mlength < ev->rlength) {
2319 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2320 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2321 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2322 lp->lp_state |= LNET_PEER_FORCE_PING;
2323 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2324 libcfs_nid2str(lp->lp_primary_nid),
2325 pbuf->pb_info.pi_nnis);
2329 /* always assume new data */
2330 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2331 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2334 * If there is data present that hasn't been processed yet,
2335 * we'll replace it if the Put contained newer data and it
2336 * fits. We're racing with a Ping or earlier Push in this
2339 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2340 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2341 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2342 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2343 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2344 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2345 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2346 libcfs_nid2str(lp->lp_primary_nid),
2347 LNET_PING_BUFFER_SEQNO(pbuf),
2348 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2354 * Allocate a buffer to copy the data. On a failure we drop
2355 * the Push and set FORCE_PING to force the discovery
2356 * thread to fix the problem by pinging the peer.
2358 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2360 lp->lp_state |= LNET_PEER_FORCE_PING;
2361 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2362 libcfs_nid2str(lp->lp_primary_nid),
2363 LNET_PING_BUFFER_SEQNO(pbuf));
2368 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2369 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2370 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2371 CDEBUG(D_NET, "Received Push %s %u\n",
2372 libcfs_nid2str(lp->lp_primary_nid),
2373 LNET_PING_BUFFER_SEQNO(pbuf));
2376 /* We've processed this buffer. It can be reposted */
2377 pbuf->pb_needs_post = true;
2380 * Queue the peer for discovery if not done, force it on the request
2381 * queue and wake the discovery thread if the peer was already queued,
2382 * because its status changed.
2384 spin_unlock(&lp->lp_lock);
2385 lnet_net_lock(LNET_LOCK_EX);
2386 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2387 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2388 wake_up(&the_lnet.ln_dc_waitq);
2390 /* Drop refcount from lookup */
2391 lnet_peer_decref_locked(lp);
2392 lnet_net_unlock(LNET_LOCK_EX);
2396 * Clear the discovery error state, unless we're already discovering
2397 * this peer, in which case the error is current.
2399 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2401 spin_lock(&lp->lp_lock);
2402 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2403 lp->lp_dc_error = 0;
2404 spin_unlock(&lp->lp_lock);
2408 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2409 * dropped/retaken within this function. An lnet_peer_ni is passed in
2410 * because discovery could tear down an lnet_peer.
2413 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2416 struct lnet_peer *lp;
2421 lnet_net_unlock(cpt);
2422 lnet_net_lock(LNET_LOCK_EX);
2423 lp = lpni->lpni_peer_net->lpn_peer;
2424 lnet_peer_clear_discovery_error(lp);
2427 * We're willing to be interrupted. The lpni can become a
2428 * zombie if we race with DLC, so we must check for that.
2431 /* Keep lp alive when the lnet_net_lock is unlocked */
2432 lnet_peer_addref_locked(lp);
2433 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2434 if (signal_pending(current))
2436 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2439 * Don't repeat discovery if discovery is disabled. This is
2440 * done to ensure we can use discovery as a standard ping as
2441 * well for backwards compatibility with routers which do not
2442 * have discovery or have discovery disabled
2444 if (lnet_is_discovery_disabled(lp) && count > 0)
2446 if (lp->lp_dc_error)
2448 if (lnet_peer_is_uptodate(lp))
2450 lnet_peer_queue_for_discovery(lp);
2452 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2455 * If caller requested a non-blocking operation then
2456 * return immediately. Once discovery is complete any
2457 * pending messages that were stopped due to discovery
2458 * will be transmitted.
2463 lnet_net_unlock(LNET_LOCK_EX);
2465 finish_wait(&lp->lp_dc_waitq, &wait);
2466 lnet_net_lock(LNET_LOCK_EX);
2467 lnet_peer_decref_locked(lp);
2468 /* Peer may have changed */
2469 lp = lpni->lpni_peer_net->lpn_peer;
2471 finish_wait(&lp->lp_dc_waitq, &wait);
2473 lnet_net_unlock(LNET_LOCK_EX);
2475 lnet_peer_decref_locked(lp);
2477 * The peer may have changed, so re-check and rediscover if that turns
2478 * out to have been the case. The reference count on lp ensured that
2479 * even if it was unlinked from lpni the memory could not be recycled.
2480 * Thus the check below is sufficient to determine whether the peer
2481 * changed. If the peer changed, then lp must not be dereferenced.
2483 if (lp != lpni->lpni_peer_net->lpn_peer)
2486 if (signal_pending(current))
2488 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2490 else if (lp->lp_dc_error)
2491 rc = lp->lp_dc_error;
2493 CDEBUG(D_NET, "non-blocking discovery\n");
2494 else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2497 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2498 (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2499 libcfs_nid2str(lpni->lpni_nid), rc,
2500 (!block) ? "pending discovery" : "discovery complete");
2505 /* Handle an incoming ack for a push. */
2507 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2509 struct lnet_ping_buffer *pbuf;
2511 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2512 spin_lock(&lp->lp_lock);
2513 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2514 lp->lp_push_error = ev->status;
2516 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2518 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2519 spin_unlock(&lp->lp_lock);
2521 CDEBUG(D_NET, "peer %s ev->status %d\n",
2522 libcfs_nid2str(lp->lp_primary_nid), ev->status);
2525 /* Handle a Reply message. This is the reply to a Ping message. */
2527 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2529 struct lnet_ping_buffer *pbuf;
2532 spin_lock(&lp->lp_lock);
2534 lp->lp_disc_src_nid = ev->target.nid;
2535 lp->lp_disc_dst_nid = ev->source.nid;
2538 * If some kind of error happened the contents of message
2539 * cannot be used. Set PING_FAILED to trigger a retry.
2542 lp->lp_state |= LNET_PEER_PING_FAILED;
2543 lp->lp_ping_error = ev->status;
2544 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2546 libcfs_nid2str(lp->lp_primary_nid),
2547 libcfs_nid2str(ev->source.nid));
2551 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2552 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2553 lnet_swap_pinginfo(pbuf);
2556 * A reply with invalid or corrupted info. Set PING_FAILED to
2559 rc = lnet_ping_info_validate(&pbuf->pb_info);
2561 lp->lp_state |= LNET_PEER_PING_FAILED;
2562 lp->lp_ping_error = 0;
2563 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2564 libcfs_nid2str(lp->lp_primary_nid), rc);
2569 * The peer may have discovery disabled at its end. Set
2570 * NO_DISCOVERY as appropriate.
2572 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2573 lnet_peer_discovery_disabled) {
2574 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2575 libcfs_nid2str(lp->lp_primary_nid));
2577 /* Detect whether this peer has toggled discovery from on to
2578 * off and whether we can delete and re-create the peer. Peers
2579 * that were manually configured cannot be deleted by discovery.
2580 * We need to delete this peer and re-create it if the peer was
2581 * not configured manually, is currently considered DD capable,
2583 * 1. We've already discovered the peer (the peer has toggled
2584 * the discovery feature from on to off), or
2585 * 2. The peer is considered MR, but it was not user configured
2586 * (this was a "temporary" peer created via the kernel APIs
2587 * that we're discovering for the first time)
2589 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2590 LNET_PEER_NO_DISCOVERY)) &&
2591 (lp->lp_state & (LNET_PEER_DISCOVERED |
2592 LNET_PEER_MULTI_RAIL))) {
2593 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2594 libcfs_nid2str(lp->lp_primary_nid),
2596 lp->lp_state |= LNET_PEER_MARK_DELETION;
2598 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2600 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2601 libcfs_nid2str(lp->lp_primary_nid));
2602 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2606 * Update the MULTI_RAIL flag based on the reply. If the peer
2607 * was configured with DLC then the setting should match what
2610 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2611 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2612 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2613 libcfs_nid2str(lp->lp_primary_nid), lp);
2614 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2615 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2616 libcfs_nid2str(lp->lp_primary_nid));
2617 } else if (lnet_peer_discovery_disabled) {
2619 "peer %s(%p) not MR: DD disabled locally\n",
2620 libcfs_nid2str(lp->lp_primary_nid), lp);
2621 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2623 "peer %s(%p) not MR: DD disabled remotely\n",
2624 libcfs_nid2str(lp->lp_primary_nid), lp);
2626 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2627 libcfs_nid2str(lp->lp_primary_nid), lp);
2628 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2629 lnet_peer_clr_non_mr_pref_nids(lp);
2631 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2632 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2633 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2634 libcfs_nid2str(lp->lp_primary_nid));
2636 CERROR("Multi-Rail state vanished from %s\n",
2637 libcfs_nid2str(lp->lp_primary_nid));
2638 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2643 * Make sure we'll allocate the correct size ping buffer when
2646 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2647 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2650 * Check for truncation of the Reply. Clear PING_SENT and set
2651 * PING_FAILED to trigger a retry.
2653 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2654 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2655 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2656 lp->lp_state |= LNET_PEER_PING_FAILED;
2657 lp->lp_ping_error = 0;
2658 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2659 libcfs_nid2str(lp->lp_primary_nid),
2660 pbuf->pb_info.pi_nnis);
2665 * Check the sequence numbers in the reply. These are only
2666 * available if the reply came from a Multi-Rail peer.
2668 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2669 pbuf->pb_info.pi_nnis > 1 &&
2670 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2671 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2672 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2673 libcfs_nid2str(lp->lp_primary_nid),
2674 LNET_PING_BUFFER_SEQNO(pbuf),
2677 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2680 /* We're happy with the state of the data in the buffer. */
2681 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2682 libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2683 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2684 lnet_ping_buffer_decref(lp->lp_data);
2686 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2687 lnet_ping_buffer_addref(pbuf);
2690 lp->lp_state &= ~LNET_PEER_PING_SENT;
2691 spin_unlock(&lp->lp_lock);
2693 lnet_net_lock(LNET_LOCK_EX);
2695 * If this peer is a gateway, call the routing callback to
2696 * handle the ping reply
2698 if (lp->lp_rtr_refcount > 0)
2699 lnet_router_discovery_ping_reply(lp);
2700 lnet_net_unlock(LNET_LOCK_EX);
2704 * Send event handling. Only matters for error cases, where we clean
2705 * up state on the peer and peer_ni that would otherwise be updated in
2706 * the REPLY event handler for a successful Ping, and the ACK event
2707 * handler for a successful Push.
2710 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2717 spin_lock(&lp->lp_lock);
2718 if (ev->msg_type == LNET_MSG_GET) {
2719 lp->lp_state &= ~LNET_PEER_PING_SENT;
2720 lp->lp_state |= LNET_PEER_PING_FAILED;
2721 lp->lp_ping_error = ev->status;
2722 } else { /* ev->msg_type == LNET_MSG_PUT */
2723 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2724 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2725 lp->lp_push_error = ev->status;
2727 spin_unlock(&lp->lp_lock);
2728 rc = LNET_REDISCOVER_PEER;
2730 CDEBUG(D_NET, "%s Send to %s: %d\n",
2731 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2732 libcfs_nid2str(ev->target.nid), rc);
2737 * Unlink event handling. This event is only seen if a call to
2738 * LNetMDUnlink() caused the event to be unlinked. If this call was
2739 * made after the event was set up in LNetGet() or LNetPut() then we
2740 * assume the Ping or Push timed out.
2743 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2745 spin_lock(&lp->lp_lock);
2746 /* We've passed through LNetGet() */
2747 if (lp->lp_state & LNET_PEER_PING_SENT) {
2748 lp->lp_state &= ~LNET_PEER_PING_SENT;
2749 lp->lp_state |= LNET_PEER_PING_FAILED;
2750 lp->lp_ping_error = -ETIMEDOUT;
2751 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2752 libcfs_nid2str(lp->lp_primary_nid));
2754 /* We've passed through LNetPut() */
2755 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2756 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2757 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2758 lp->lp_push_error = -ETIMEDOUT;
2759 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2760 libcfs_nid2str(lp->lp_primary_nid));
2762 spin_unlock(&lp->lp_lock);
2766 * Event handler for the discovery EQ.
2768 * Called with lnet_res_lock(cpt) held. The cpt is the
2769 * lnet_cpt_of_cookie() of the md handle cookie.
2771 static void lnet_discovery_event_handler(struct lnet_event *event)
2773 struct lnet_peer *lp = event->md_user_ptr;
2774 struct lnet_ping_buffer *pbuf;
2777 /* discovery needs to take another look */
2778 rc = LNET_REDISCOVER_PEER;
2780 CDEBUG(D_NET, "Received event: %d\n", event->type);
2782 switch (event->type) {
2783 case LNET_EVENT_ACK:
2784 lnet_discovery_event_ack(lp, event);
2786 case LNET_EVENT_REPLY:
2787 lnet_discovery_event_reply(lp, event);
2789 case LNET_EVENT_SEND:
2790 /* Only send failure triggers a retry. */
2791 rc = lnet_discovery_event_send(lp, event);
2793 case LNET_EVENT_UNLINK:
2794 /* LNetMDUnlink() was called */
2795 lnet_discovery_event_unlink(lp, event);
2798 /* Invalid events. */
2801 lnet_net_lock(LNET_LOCK_EX);
2802 if (event->unlinked) {
2803 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2804 lnet_ping_buffer_decref(pbuf);
2805 lnet_peer_decref_locked(lp);
2808 /* put peer back at end of request queue, if discovery not already
2810 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2811 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2812 wake_up(&the_lnet.ln_dc_waitq);
2814 lnet_net_unlock(LNET_LOCK_EX);
2818 * Build a peer from incoming data.
2820 * The NIDs in the incoming data are supposed to be structured as follows:
2823 * - other NIDs in same net
2824 * - NIDs in second net
2825 * - NIDs in third net
2827 * This due to the way the list of NIDs in the data is created.
2829 * Note that this function will mark the peer uptodate unless an
2830 * ENOMEM is encontered. All other errors are due to a conflict
2831 * between the DLC configuration and what discovery sees. We treat DLC
2832 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2833 * peer from becoming stuck in discovery.
2835 static int lnet_peer_merge_data(struct lnet_peer *lp,
2836 struct lnet_ping_buffer *pbuf)
2838 struct lnet_peer_net *lpn;
2839 struct lnet_peer_ni *lpni;
2840 lnet_nid_t *curnis = NULL;
2841 struct lnet_ni_status *addnis = NULL;
2842 lnet_nid_t *delnis = NULL;
2852 flags = LNET_PEER_DISCOVERED;
2853 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2854 flags |= LNET_PEER_MULTI_RAIL;
2857 * Cache the routing feature for the peer; whether it is enabled
2858 * for disabled as reported by the remote peer.
2860 spin_lock(&lp->lp_lock);
2861 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2862 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2864 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2865 spin_unlock(&lp->lp_lock);
2867 nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2868 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2869 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2870 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2871 if (!curnis || !addnis || !delnis) {
2879 /* Construct the list of NIDs present in peer. */
2881 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2882 curnis[ncurnis++] = lpni->lpni_nid;
2885 * Check for NIDs in pbuf not present in curnis[].
2886 * The loop starts at 1 to skip the loopback NID.
2888 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2889 for (j = 0; j < ncurnis; j++)
2890 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2893 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2896 * Check for NIDs in curnis[] not present in pbuf.
2897 * The nested loop starts at 1 to skip the loopback NID.
2899 * But never add the loopback NID to delnis[]: if it is
2900 * present in curnis[] then this peer is for this node.
2902 for (i = 0; i < ncurnis; i++) {
2903 if (curnis[i] == LNET_NID_LO_0)
2905 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2906 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2908 * update the information we cache for the
2909 * peer with the latest information we
2912 lpni = lnet_find_peer_ni_locked(curnis[i]);
2914 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2915 lnet_peer_ni_decref_locked(lpni);
2920 if (j == pbuf->pb_info.pi_nnis)
2921 delnis[ndelnis++] = curnis[i];
2925 * If we get here and the discovery is disabled then we don't want
2926 * to add or delete any NIs. We just updated the ones we have some
2927 * information on, and call it a day
2930 if (lnet_is_discovery_disabled(lp))
2933 for (i = 0; i < naddnis; i++) {
2934 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2936 CERROR("Error adding NID %s to peer %s: %d\n",
2937 libcfs_nid2str(addnis[i].ns_nid),
2938 libcfs_nid2str(lp->lp_primary_nid), rc);
2942 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2944 lpni->lpni_ns_status = addnis[i].ns_status;
2945 lnet_peer_ni_decref_locked(lpni);
2949 for (i = 0; i < ndelnis; i++) {
2951 * for routers it's okay to delete the primary_nid because
2952 * the upper layers don't really rely on it. So if we're
2953 * being told that the router changed its primary_nid
2954 * then it's okay to delete it.
2956 if (lp->lp_rtr_refcount > 0)
2957 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2958 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2960 CERROR("Error deleting NID %s from peer %s: %d\n",
2961 libcfs_nid2str(delnis[i]),
2962 libcfs_nid2str(lp->lp_primary_nid), rc);
2968 /* The peer net for the primary NID should be the first entry in the
2969 * peer's lp_peer_nets list, and the peer NI for the primary NID should
2970 * be the first entry in its peer net's lpn_peer_nis list.
2972 lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
2974 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
2975 libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
2979 lnet_peer_ni_decref_locked(lpni);
2981 lpn = lpni->lpni_peer_net;
2982 if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
2983 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
2985 if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
2986 list_move(&lpni->lpni_peer_nis,
2987 &lpni->lpni_peer_net->lpn_peer_nis);
2990 * Errors other than -ENOMEM are due to peers having been
2991 * configured with DLC. Ignore these because DLC overrides
2996 CFS_FREE_PTR_ARRAY(curnis, nnis);
2997 CFS_FREE_PTR_ARRAY(addnis, nnis);
2998 CFS_FREE_PTR_ARRAY(delnis, nnis);
2999 lnet_ping_buffer_decref(pbuf);
3000 CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3003 spin_lock(&lp->lp_lock);
3004 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3005 lp->lp_state |= LNET_PEER_FORCE_PING;
3006 spin_unlock(&lp->lp_lock);
3012 * The data in pbuf says lp is its primary peer, but the data was
3013 * received by a different peer. Try to update lp with the data.
3016 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3018 struct lnet_handle_md mdh;
3020 /* Queue lp for discovery, and force it on the request queue. */
3021 lnet_net_lock(LNET_LOCK_EX);
3022 if (lnet_peer_queue_for_discovery(lp))
3023 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3024 lnet_net_unlock(LNET_LOCK_EX);
3026 LNetInvalidateMDHandle(&mdh);
3029 * Decide whether we can move the peer to the DATA_PRESENT state.
3031 * We replace stale data for a multi-rail peer, repair PING_FAILED
3032 * status, and preempt FORCE_PING.
3034 * If after that we have DATA_PRESENT, we merge it into this peer.
3036 spin_lock(&lp->lp_lock);
3037 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3038 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3039 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3040 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3041 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3042 lnet_ping_buffer_decref(pbuf);
3047 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3048 lnet_ping_buffer_decref(lp->lp_data);
3050 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3052 if (lp->lp_state & LNET_PEER_PING_FAILED) {
3053 mdh = lp->lp_ping_mdh;
3054 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3055 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3056 lp->lp_ping_error = 0;
3058 if (lp->lp_state & LNET_PEER_FORCE_PING)
3059 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3060 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3061 spin_unlock(&lp->lp_lock);
3063 if (!LNetMDHandleIsInvalid(mdh))
3067 return lnet_peer_merge_data(lp, pbuf);
3069 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3073 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3077 for (i = 0; i < pinfo->pi_nnis; i++) {
3078 if (pinfo->pi_ni[i].ns_nid == nid)
3085 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3086 * to the discovery queue a reference was taken that will prevent the peer from
3087 * actually being freed by this function. After this function exits the
3088 * discovery thread should call lnet_peer_discovery_complete() which will
3089 * drop that reference as well as wake any waiters that may also be holding a
3092 static int lnet_peer_deletion(struct lnet_peer *lp)
3093 __must_hold(&lp->lp_lock)
3095 struct list_head rlist;
3096 struct lnet_route *route, *tmp;
3097 int sensitivity = lp->lp_health_sensitivity;
3099 INIT_LIST_HEAD(&rlist);
3101 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3102 LNET_PEER_FORCE_PUSH);
3103 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3104 libcfs_nid2str(lp->lp_primary_nid), lp, lp->lp_state);
3106 /* no-op if lnet_peer_del() has already been called on this peer */
3107 if (lp->lp_state & LNET_PEER_MARK_DELETED)
3110 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3113 spin_unlock(&lp->lp_lock);
3115 mutex_lock(&the_lnet.ln_api_mutex);
3117 lnet_net_lock(LNET_LOCK_EX);
3118 /* remove the peer from the discovery work
3119 * queue if it's on there in preparation
3122 if (!list_empty(&lp->lp_dc_list))
3123 list_del_init(&lp->lp_dc_list);
3124 list_for_each_entry_safe(route, tmp,
3127 lnet_move_route(route, NULL, &rlist);
3128 lnet_net_unlock(LNET_LOCK_EX);
3130 /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3133 list_for_each_entry_safe(route, tmp,
3135 /* re-add these routes */
3136 lnet_add_route(route->lr_net,
3141 LIBCFS_FREE(route, sizeof(*route));
3144 mutex_unlock(&the_lnet.ln_api_mutex);
3146 spin_lock(&lp->lp_lock);
3152 * Update a peer using the data received.
3154 static int lnet_peer_data_present(struct lnet_peer *lp)
3155 __must_hold(&lp->lp_lock)
3157 struct lnet_ping_buffer *pbuf;
3158 struct lnet_peer_ni *lpni;
3159 lnet_nid_t nid = LNET_NID_ANY;
3165 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3166 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3167 spin_unlock(&lp->lp_lock);
3170 * Modifications of peer structures are done while holding the
3171 * ln_api_mutex. A global lock is required because we may be
3172 * modifying multiple peer structures, and a mutex greatly
3173 * simplifies memory management.
3175 * The actual changes to the data structures must also protect
3176 * against concurrent lookups, for which the lnet_net_lock in
3177 * LNET_LOCK_EX mode is used.
3179 mutex_lock(&the_lnet.ln_api_mutex);
3180 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3186 * If this peer is not on the peer list then it is being torn
3187 * down, and our reference count may be all that is keeping it
3188 * alive. Don't do any work on it.
3190 if (list_empty(&lp->lp_peer_list))
3193 flags = LNET_PEER_DISCOVERED;
3194 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3195 flags |= LNET_PEER_MULTI_RAIL;
3198 * Check whether the primary NID in the message matches the
3199 * primary NID of the peer. If it does, update the peer, if
3200 * it it does not, check whether there is already a peer with
3201 * that primary NID. If no such peer exists, try to update
3202 * the primary NID of the current peer (allowed if it was
3203 * created due to message traffic) and complete the update.
3204 * If the peer did exist, hand off the data to it.
3206 * The peer for the loopback interface is a special case: this
3207 * is the peer for the local node, and we want to set its
3208 * primary NID to the correct value here. Moreover, this peer
3209 * can show up with only the loopback NID in the ping buffer.
3211 if (pbuf->pb_info.pi_nnis <= 1)
3213 nid = pbuf->pb_info.pi_ni[1].ns_nid;
3214 if (lp->lp_primary_nid == LNET_NID_LO_0) {
3215 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3217 rc = lnet_peer_merge_data(lp, pbuf);
3219 * if the primary nid of the peer is present in the ping info returned
3220 * from the peer, but it's not the local primary peer we have
3221 * cached and discovery is disabled, then we don't want to update
3222 * our local peer info, by adding or removing NIDs, we just want
3223 * to update the status of the nids that we currently have
3224 * recorded in that peer.
3226 } else if (lp->lp_primary_nid == nid ||
3227 (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3228 lnet_is_discovery_disabled(lp))) {
3229 rc = lnet_peer_merge_data(lp, pbuf);
3231 lpni = lnet_find_peer_ni_locked(nid);
3232 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3233 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3235 CERROR("Primary NID error %s versus %s: %d\n",
3236 libcfs_nid2str(lp->lp_primary_nid),
3237 libcfs_nid2str(nid), rc);
3239 rc = lnet_peer_merge_data(lp, pbuf);
3242 lnet_peer_ni_decref_locked(lpni);
3244 struct lnet_peer *new_lp;
3245 new_lp = lpni->lpni_peer_net->lpn_peer;
3247 * if lp has discovery/MR enabled that means new_lp
3248 * should have discovery/MR enabled as well, since
3249 * it's the same peer, which we're about to merge
3251 spin_lock(&lp->lp_lock);
3252 spin_lock(&new_lp->lp_lock);
3253 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3254 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3255 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3256 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3257 /* If we're processing a ping reply then we may be
3258 * about to send a push to the peer that we ping'd.
3259 * Since the ping reply that we're processing was
3260 * received by lp, we need to set the discovery source
3261 * NID for new_lp to the NID stored in lp.
3263 if (lp->lp_disc_src_nid != LNET_NID_ANY) {
3264 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3265 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3267 spin_unlock(&new_lp->lp_lock);
3268 spin_unlock(&lp->lp_lock);
3270 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3271 lnet_consolidate_routes_locked(lp, new_lp);
3272 lnet_peer_ni_decref_locked(lpni);
3276 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3278 mutex_unlock(&the_lnet.ln_api_mutex);
3280 spin_lock(&lp->lp_lock);
3281 /* Tell discovery to re-check the peer immediately. */
3283 rc = LNET_REDISCOVER_PEER;
3288 * A ping failed. Clear the PING_FAILED state and set the
3289 * FORCE_PING state, to ensure a retry even if discovery is
3290 * disabled. This avoids being left with incorrect state.
3292 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3293 __must_hold(&lp->lp_lock)
3295 struct lnet_handle_md mdh;
3298 mdh = lp->lp_ping_mdh;
3299 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3300 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3301 lp->lp_state |= LNET_PEER_FORCE_PING;
3302 rc = lp->lp_ping_error;
3303 lp->lp_ping_error = 0;
3304 spin_unlock(&lp->lp_lock);
3306 if (!LNetMDHandleIsInvalid(mdh))
3309 CDEBUG(D_NET, "peer %s:%d\n",
3310 libcfs_nid2str(lp->lp_primary_nid), rc);
3312 spin_lock(&lp->lp_lock);
3313 return rc ? rc : LNET_REDISCOVER_PEER;
3316 /* Active side of ping. */
3317 static int lnet_peer_send_ping(struct lnet_peer *lp)
3318 __must_hold(&lp->lp_lock)
3324 lp->lp_state |= LNET_PEER_PING_SENT;
3325 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3326 spin_unlock(&lp->lp_lock);
3328 cpt = lnet_net_lock_current();
3329 /* Refcount for MD. */
3330 lnet_peer_addref_locked(lp);
3331 lnet_net_unlock(cpt);
3333 nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3335 rc = lnet_send_ping(lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
3336 the_lnet.ln_dc_handler, false);
3339 * if LNetMDBind in lnet_send_ping fails we need to decrement the
3340 * refcount on the peer, otherwise LNetMDUnlink will be called
3341 * which will eventually do that.
3345 lnet_peer_decref_locked(lp);
3346 lnet_net_unlock(cpt);
3347 rc = -rc; /* change the rc to negative value */
3349 } else if (rc < 0) {
3353 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3355 spin_lock(&lp->lp_lock);
3359 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3361 * The errors that get us here are considered hard errors and
3362 * cause Discovery to terminate. So we clear PING_SENT, but do
3363 * not set either PING_FAILED or FORCE_PING. In fact we need
3364 * to clear PING_FAILED, because the unlink event handler will
3365 * have set it if we called LNetMDUnlink() above.
3367 spin_lock(&lp->lp_lock);
3368 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3373 * This function exists because you cannot call LNetMDUnlink() from an
3376 static int lnet_peer_push_failed(struct lnet_peer *lp)
3377 __must_hold(&lp->lp_lock)
3379 struct lnet_handle_md mdh;
3382 mdh = lp->lp_push_mdh;
3383 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3384 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3385 rc = lp->lp_push_error;
3386 lp->lp_push_error = 0;
3387 spin_unlock(&lp->lp_lock);
3389 if (!LNetMDHandleIsInvalid(mdh))
3392 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3393 spin_lock(&lp->lp_lock);
3394 return rc ? rc : LNET_REDISCOVER_PEER;
3398 * Mark the peer as discovered.
3400 static int lnet_peer_discovered(struct lnet_peer *lp)
3401 __must_hold(&lp->lp_lock)
3403 lp->lp_state |= LNET_PEER_DISCOVERED;
3404 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3405 LNET_PEER_REDISCOVER);
3407 lp->lp_dc_error = 0;
3409 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3414 /* Active side of push. */
3415 static int lnet_peer_send_push(struct lnet_peer *lp)
3416 __must_hold(&lp->lp_lock)
3418 struct lnet_ping_buffer *pbuf;
3419 struct lnet_process_id id;
3424 /* Don't push to a non-multi-rail peer. */
3425 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3426 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3427 /* if peer's NIDs are uptodate then peer is discovered */
3428 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3429 rc = lnet_peer_discovered(lp);
3436 lp->lp_state |= LNET_PEER_PUSH_SENT;
3437 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3438 spin_unlock(&lp->lp_lock);
3440 cpt = lnet_net_lock_current();
3441 pbuf = the_lnet.ln_ping_target;
3442 lnet_ping_buffer_addref(pbuf);
3443 lnet_net_unlock(cpt);
3445 /* Push source MD */
3446 md.start = &pbuf->pb_info;
3447 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3448 md.threshold = 2; /* Put/Ack */
3450 md.options = LNET_MD_TRACK_RESPONSE;
3451 md.handler = the_lnet.ln_dc_handler;
3454 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3456 lnet_ping_buffer_decref(pbuf);
3457 CERROR("Can't bind push source MD: %d\n", rc);
3461 cpt = lnet_net_lock_current();
3462 /* Refcount for MD. */
3463 lnet_peer_addref_locked(lp);
3464 id.pid = LNET_PID_LUSTRE;
3465 if (lp->lp_disc_dst_nid != LNET_NID_ANY)
3466 id.nid = lp->lp_disc_dst_nid;
3468 id.nid = lp->lp_primary_nid;
3469 lnet_net_unlock(cpt);
3471 rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3472 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3473 LNET_PROTO_PING_MATCHBITS, 0, 0);
3476 * reset the discovery nid. There is no need to restrict sending
3477 * from that source, if we call lnet_push_update_to_peers(). It'll
3478 * get set to a specific NID, if we initiate discovery from the
3481 lp->lp_disc_src_nid = LNET_NID_ANY;
3482 lp->lp_disc_dst_nid = LNET_NID_ANY;
3487 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3489 spin_lock(&lp->lp_lock);
3493 LNetMDUnlink(lp->lp_push_mdh);
3494 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3496 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3498 * The errors that get us here are considered hard errors and
3499 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3500 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3501 * because the unlink event handler will have set it if we
3502 * called LNetMDUnlink() above.
3504 spin_lock(&lp->lp_lock);
3505 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3510 * An unrecoverable error was encountered during discovery.
3511 * Set error status in peer and abort discovery.
3513 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3515 CDEBUG(D_NET, "Discovery error %s: %d\n",
3516 libcfs_nid2str(lp->lp_primary_nid), error);
3518 spin_lock(&lp->lp_lock);
3519 lp->lp_dc_error = error;
3520 lp->lp_state &= ~LNET_PEER_DISCOVERING;
3521 lp->lp_state |= LNET_PEER_REDISCOVER;
3522 spin_unlock(&lp->lp_lock);
3526 * Wait for work to be queued or some other change that must be
3527 * attended to. Returns non-zero if the discovery thread should shut
3530 static int lnet_peer_discovery_wait_for_work(void)
3537 cpt = lnet_net_lock_current();
3539 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3540 TASK_INTERRUPTIBLE);
3541 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3543 if (lnet_push_target_resize_needed() ||
3544 the_lnet.ln_push_target->pb_needs_post)
3546 if (!list_empty(&the_lnet.ln_dc_request))
3548 if (!list_empty(&the_lnet.ln_msg_resend))
3550 lnet_net_unlock(cpt);
3553 * wakeup max every second to check if there are peers that
3554 * have been stuck on the working queue for greater than
3557 schedule_timeout(cfs_time_seconds(1));
3558 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3559 cpt = lnet_net_lock_current();
3561 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3563 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3566 lnet_net_unlock(cpt);
3568 CDEBUG(D_NET, "woken: %d\n", rc);
3574 * Messages that were pending on a destroyed peer will be put on a global
3575 * resend list. The message resend list will be checked by
3576 * the discovery thread when it wakes up, and will resend messages. These
3577 * messages can still be sendable in the case the lpni which was the initial
3578 * cause of the message re-queue was transfered to another peer.
3580 * It is possible that LNet could be shutdown while we're iterating
3581 * through the list. lnet_shudown_lndnets() will attempt to access the
3582 * resend list, but will have to wait until the spinlock is released, by
3583 * which time there shouldn't be any more messages on the resend list.
3584 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3585 * for the messages so they can be released. The other case is that
3586 * lnet_shudown_lndnets() can finalize all the messages before this
3587 * function can visit the resend list, in which case this function will be
3590 static void lnet_resend_msgs(void)
3592 struct lnet_msg *msg, *tmp;
3596 spin_lock(&the_lnet.ln_msg_resend_lock);
3597 list_splice(&the_lnet.ln_msg_resend, &resend);
3598 spin_unlock(&the_lnet.ln_msg_resend_lock);
3600 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3601 list_del_init(&msg->msg_list);
3602 rc = lnet_send(msg->msg_src_nid_param, msg,
3603 msg->msg_rtr_nid_param);
3605 CNETERR("Error sending %s to %s: %d\n",
3606 lnet_msgtyp2str(msg->msg_type),
3607 libcfs_id2str(msg->msg_target), rc);
3608 lnet_finalize(msg, rc);
3613 /* The discovery thread. */
3614 static int lnet_peer_discovery(void *arg)
3616 struct lnet_peer *lp;
3619 wait_for_completion(&the_lnet.ln_started);
3621 CDEBUG(D_NET, "started\n");
3624 if (lnet_peer_discovery_wait_for_work())
3627 if (lnet_push_target_resize_needed())
3628 lnet_push_target_resize();
3629 else if (the_lnet.ln_push_target->pb_needs_post)
3630 lnet_push_target_post(the_lnet.ln_push_target,
3631 &the_lnet.ln_push_target_md);
3635 lnet_net_lock(LNET_LOCK_EX);
3636 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3637 lnet_net_unlock(LNET_LOCK_EX);
3642 * Process all incoming discovery work requests. When
3643 * discovery must wait on a peer to change state, it
3644 * is added to the tail of the ln_dc_working queue. A
3645 * timestamp keeps track of when the peer was added,
3646 * so we can time out discovery requests that take too
3649 while (!list_empty(&the_lnet.ln_dc_request)) {
3650 lp = list_first_entry(&the_lnet.ln_dc_request,
3651 struct lnet_peer, lp_dc_list);
3652 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3654 * set the time the peer was put on the dc_working
3655 * queue. It shouldn't remain on the queue
3656 * forever, in case the GET message (for ping)
3657 * doesn't get a REPLY or the PUT message (for
3658 * push) doesn't get an ACK.
3660 lp->lp_last_queued = ktime_get_real_seconds();
3661 lnet_net_unlock(LNET_LOCK_EX);
3663 if (lnet_push_target_resize_needed())
3664 lnet_push_target_resize();
3665 else if (the_lnet.ln_push_target->pb_needs_post)
3666 lnet_push_target_post(the_lnet.ln_push_target,
3667 &the_lnet.ln_push_target_md);
3670 * Select an action depending on the state of
3671 * the peer and whether discovery is disabled.
3672 * The check whether discovery is disabled is
3673 * done after the code that handles processing
3674 * for arrived data, cleanup for failures, and
3675 * forcing a Ping or Push.
3677 spin_lock(&lp->lp_lock);
3678 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3679 libcfs_nid2str(lp->lp_primary_nid), lp,
3681 if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3682 LNET_PEER_MARK_DELETED))
3683 rc = lnet_peer_deletion(lp);
3684 else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3685 rc = lnet_peer_data_present(lp);
3686 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3687 rc = lnet_peer_ping_failed(lp);
3688 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3689 rc = lnet_peer_push_failed(lp);
3690 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3691 rc = lnet_peer_send_ping(lp);
3692 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3693 rc = lnet_peer_send_push(lp);
3694 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3695 rc = lnet_peer_send_ping(lp);
3696 else if (lnet_peer_needs_push(lp))
3697 rc = lnet_peer_send_push(lp);
3699 rc = lnet_peer_discovered(lp);
3700 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3701 libcfs_nid2str(lp->lp_primary_nid), lp,
3703 spin_unlock(&lp->lp_lock);
3705 lnet_net_lock(LNET_LOCK_EX);
3706 if (rc == LNET_REDISCOVER_PEER) {
3707 list_move(&lp->lp_dc_list,
3708 &the_lnet.ln_dc_request);
3710 lnet_peer_discovery_error(lp, rc);
3712 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3713 lnet_peer_discovery_complete(lp);
3714 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3719 lnet_net_unlock(LNET_LOCK_EX);
3722 CDEBUG(D_NET, "stopping\n");
3724 * Clean up before telling lnet_peer_discovery_stop() that
3725 * we're done. Use wake_up() below to somewhat reduce the
3726 * size of the thundering herd if there are multiple threads
3727 * waiting on discovery of a single peer.
3730 /* Queue cleanup 1: stop all pending pings and pushes. */
3731 lnet_net_lock(LNET_LOCK_EX);
3732 while (!list_empty(&the_lnet.ln_dc_working)) {
3733 lp = list_first_entry(&the_lnet.ln_dc_working,
3734 struct lnet_peer, lp_dc_list);
3735 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3736 lnet_net_unlock(LNET_LOCK_EX);
3737 lnet_peer_cancel_discovery(lp);
3738 lnet_net_lock(LNET_LOCK_EX);
3740 lnet_net_unlock(LNET_LOCK_EX);
3742 /* Queue cleanup 2: wait for the expired queue to clear. */
3743 while (!list_empty(&the_lnet.ln_dc_expired))
3744 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3746 /* Queue cleanup 3: clear the request queue. */
3747 lnet_net_lock(LNET_LOCK_EX);
3748 while (!list_empty(&the_lnet.ln_dc_request)) {
3749 lp = list_first_entry(&the_lnet.ln_dc_request,
3750 struct lnet_peer, lp_dc_list);
3751 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3752 lnet_peer_discovery_complete(lp);
3754 lnet_net_unlock(LNET_LOCK_EX);
3756 lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3757 the_lnet.ln_dc_handler = NULL;
3759 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3760 wake_up(&the_lnet.ln_dc_waitq);
3762 CDEBUG(D_NET, "stopped\n");
3767 /* ln_api_mutex is held on entry. */
3768 int lnet_peer_discovery_start(void)
3770 struct task_struct *task;
3773 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3776 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3777 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3778 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3781 CERROR("Can't start peer discovery thread: %d\n", rc);
3783 the_lnet.ln_dc_handler = NULL;
3785 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3788 CDEBUG(D_NET, "discovery start: %d\n", rc);
3793 /* ln_api_mutex is held on entry. */
3794 void lnet_peer_discovery_stop(void)
3796 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3799 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3800 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3802 /* In the LNetNIInit() path we may be stopping discovery before it
3803 * entered its work loop
3805 if (!completion_done(&the_lnet.ln_started))
3806 complete(&the_lnet.ln_started);
3808 wake_up(&the_lnet.ln_dc_waitq);
3810 wait_event(the_lnet.ln_dc_waitq,
3811 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3813 LASSERT(list_empty(&the_lnet.ln_dc_request));
3814 LASSERT(list_empty(&the_lnet.ln_dc_working));
3815 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3817 CDEBUG(D_NET, "discovery stopped\n");
3823 lnet_debug_peer(lnet_nid_t nid)
3825 char *aliveness = "NA";
3826 struct lnet_peer_ni *lp;
3829 cpt = lnet_cpt_of_nid(nid, NULL);
3832 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3834 lnet_net_unlock(cpt);
3835 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3839 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3840 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3842 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3843 libcfs_nid2str(lp->lpni_nid), kref_read(&lp->lpni_kref),
3844 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3845 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3846 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3848 lnet_peer_ni_decref_locked(lp);
3850 lnet_net_unlock(cpt);
3853 /* Gathering information for userspace. */
3855 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3856 char aliveness[LNET_MAX_STR_LEN],
3857 __u32 *cpt_iter, __u32 *refcount,
3858 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3859 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3860 __u32 *peer_tx_qnob)
3862 struct lnet_peer_table *peer_table;
3863 struct lnet_peer_ni *lp;
3868 /* get the number of CPTs */
3869 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3871 /* if the cpt number to be examined is >= the number of cpts in
3872 * the system then indicate that there are no more cpts to examin
3874 if (*cpt_iter >= lncpt)
3877 /* get the current table */
3878 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3879 /* if the ptable is NULL then there are no more cpts to examine */
3880 if (peer_table == NULL)
3883 lnet_net_lock(*cpt_iter);
3885 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3886 struct list_head *peers = &peer_table->pt_hash[j];
3888 list_for_each_entry(lp, peers, lpni_hashlist) {
3889 if (peer_index-- > 0)
3892 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3893 if (lnet_isrouter(lp) ||
3894 lnet_peer_aliveness_enabled(lp))
3895 snprintf(aliveness, LNET_MAX_STR_LEN,
3896 lnet_is_peer_ni_alive(lp) ? "up" : "down");
3898 *nid = lp->lpni_nid;
3899 *refcount = kref_read(&lp->lpni_kref);
3900 *ni_peer_tx_credits =
3901 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3902 *peer_tx_credits = lp->lpni_txcredits;
3903 *peer_rtr_credits = lp->lpni_rtrcredits;
3904 *peer_min_rtr_credits = lp->lpni_mintxcredits;
3905 *peer_tx_qnob = lp->lpni_txqnob;
3911 lnet_net_unlock(*cpt_iter);
3915 return found ? 0 : -ENOENT;
3918 /* ln_api_mutex is held, which keeps the peer list stable */
3919 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3921 struct lnet_ioctl_element_stats *lpni_stats;
3922 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3923 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3924 struct lnet_peer_ni_credit_info *lpni_info;
3925 struct lnet_peer_ni *lpni;
3926 struct lnet_peer *lp;
3931 lp = lnet_find_peer(cfg->prcfg_prim_nid);
3938 size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3939 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3940 size *= lp->lp_nnis;
3941 if (size > cfg->prcfg_size) {
3942 cfg->prcfg_size = size;
3947 cfg->prcfg_prim_nid = lp->lp_primary_nid;
3948 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3949 cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3950 cfg->prcfg_count = lp->lp_nnis;
3951 cfg->prcfg_size = size;
3952 cfg->prcfg_state = lp->lp_state;
3954 /* Allocate helper buffers. */
3956 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3959 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3962 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3963 if (!lpni_msg_stats)
3964 goto out_free_stats;
3965 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3967 goto out_free_msg_stats;
3972 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3973 nid = lpni->lpni_nid;
3974 if (copy_to_user(bulk, &nid, sizeof(nid)))
3975 goto out_free_hstats;
3976 bulk += sizeof(nid);
3978 memset(lpni_info, 0, sizeof(*lpni_info));
3979 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3980 if (lnet_isrouter(lpni) ||
3981 lnet_peer_aliveness_enabled(lpni))
3982 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3983 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3985 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
3986 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3987 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3988 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3989 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3990 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3991 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3992 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3993 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3994 goto out_free_hstats;
3995 bulk += sizeof(*lpni_info);
3997 memset(lpni_stats, 0, sizeof(*lpni_stats));
3998 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3999 LNET_STATS_TYPE_SEND);
4000 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4001 LNET_STATS_TYPE_RECV);
4002 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4003 LNET_STATS_TYPE_DROP);
4004 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4005 goto out_free_hstats;
4006 bulk += sizeof(*lpni_stats);
4007 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4008 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4009 goto out_free_hstats;
4010 bulk += sizeof(*lpni_msg_stats);
4011 lpni_hstats->hlpni_network_timeout =
4012 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4013 lpni_hstats->hlpni_remote_dropped =
4014 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4015 lpni_hstats->hlpni_remote_timeout =
4016 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4017 lpni_hstats->hlpni_remote_error =
4018 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4019 lpni_hstats->hlpni_health_value =
4020 atomic_read(&lpni->lpni_healthv);
4021 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4022 goto out_free_hstats;
4023 bulk += sizeof(*lpni_hstats);
4028 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4030 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4032 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4034 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4036 lnet_peer_decref_locked(lp);
4041 /* must hold net_lock/0 */
4043 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4044 struct list_head *recovery_queue,
4047 /* the mt could've shutdown and cleaned up the queues */
4048 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4051 if (!list_empty(&lpni->lpni_recovery))
4054 if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4057 if (!lpni->lpni_last_alive) {
4059 "lpni %s(%p) not eligible for recovery last alive %lld\n",
4060 libcfs_nid2str(lpni->lpni_nid), lpni,
4061 lpni->lpni_last_alive);
4065 if (now > lpni->lpni_last_alive + lnet_recovery_limit) {
4066 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4067 libcfs_nid2str(lpni->lpni_nid),
4068 lpni->lpni_last_alive);
4069 /* Reset the ping count so that if this peer NI is added back to
4070 * the recovery queue we will send the first ping right away.
4072 lpni->lpni_ping_count = 0;
4076 /* This peer NI is going on the recovery queue, so take a ref on it */
4077 lnet_peer_ni_addref_locked(lpni);
4079 lnet_peer_ni_set_next_ping(lpni, now);
4081 CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4082 libcfs_nid2str(lpni->lpni_nid),
4083 lpni->lpni_ping_count,
4084 lpni->lpni_next_ping,
4085 lpni->lpni_last_alive,
4086 atomic_read(&lpni->lpni_healthv));
4088 list_add_tail(&lpni->lpni_recovery, recovery_queue);
4091 /* Call with the ln_api_mutex held */
4093 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4095 struct lnet_peer_table *ptable;
4096 struct lnet_peer *lp;
4097 struct lnet_peer_net *lpn;
4098 struct lnet_peer_ni *lpni;
4103 if (the_lnet.ln_state != LNET_STATE_RUNNING)
4106 now = ktime_get_seconds();
4109 lnet_net_lock(LNET_LOCK_EX);
4110 lpni = lnet_find_peer_ni_locked(nid);
4112 lnet_net_unlock(LNET_LOCK_EX);
4115 atomic_set(&lpni->lpni_healthv, value);
4116 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4117 &the_lnet.ln_mt_peerNIRecovq, now);
4118 lnet_peer_ni_decref_locked(lpni);
4119 lnet_net_unlock(LNET_LOCK_EX);
4123 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4126 * Walk all the peers and reset the health value for each one to the
4129 lnet_net_lock(LNET_LOCK_EX);
4130 for (cpt = 0; cpt < lncpt; cpt++) {
4131 ptable = the_lnet.ln_peer_tables[cpt];
4132 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4133 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4134 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4136 atomic_set(&lpni->lpni_healthv, value);
4137 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4138 &the_lnet.ln_mt_peerNIRecovq, now);
4143 lnet_net_unlock(LNET_LOCK_EX);