4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
34 #define DEBUG_SUBSYSTEM S_LNET
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
40 #include <linux/uaccess.h>
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(struct lnet_nid *nid)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
158 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
164 INIT_LIST_HEAD(&lpni->lpni_txq);
165 INIT_LIST_HEAD(&lpni->lpni_hashlist);
166 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167 INIT_LIST_HEAD(&lpni->lpni_recovery);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171 kref_init(&lpni->lpni_kref);
172 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
174 spin_lock_init(&lpni->lpni_lock);
176 if (lnet_peers_start_down())
177 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
179 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181 lpni->lpni_nid = *nid;
182 lpni->lpni_cpt = cpt;
183 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
185 net = lnet_get_net_locked(LNET_NID_NET(nid));
186 lpni->lpni_net = net;
188 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
194 * This peer_ni is not on a local network, so we
195 * cannot add the credits here. In case the net is
196 * added later, add the peer_ni to the remote peer ni
197 * list so it can be easily found and revisited.
199 /* FIXME: per-net implementation instead? */
200 lnet_peer_ni_addref_locked(lpni);
201 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202 &the_lnet.ln_remote_peer_ni_list);
205 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
213 struct lnet_peer_net *lpn;
215 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
219 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221 lpn->lpn_net_id = net_id;
222 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
224 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
232 struct lnet_peer *lp;
234 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
236 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237 LASSERT(list_empty(&lpn->lpn_peer_nis));
238 LASSERT(list_empty(&lpn->lpn_peer_nets));
240 lpn->lpn_peer = NULL;
241 LIBCFS_FREE(lpn, sizeof(*lpn));
243 lnet_peer_decref_locked(lp);
246 static struct lnet_peer *
247 lnet_peer_alloc(struct lnet_nid *nid)
249 struct lnet_peer *lp;
251 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
255 INIT_LIST_HEAD(&lp->lp_rtrq);
256 INIT_LIST_HEAD(&lp->lp_routes);
257 INIT_LIST_HEAD(&lp->lp_peer_list);
258 INIT_LIST_HEAD(&lp->lp_peer_nets);
259 INIT_LIST_HEAD(&lp->lp_dc_list);
260 INIT_LIST_HEAD(&lp->lp_dc_pendq);
261 INIT_LIST_HEAD(&lp->lp_rtr_list);
262 init_waitqueue_head(&lp->lp_dc_waitq);
263 spin_lock_init(&lp->lp_lock);
264 lp->lp_primary_nid = *nid;
265 lp->lp_disc_src_nid = LNET_ANY_NID;
266 lp->lp_disc_dst_nid = LNET_ANY_NID;
267 if (lnet_peers_start_down())
268 lp->lp_alive = false;
273 * all peers created on a router should have health on
274 * if it's not already on.
276 if (the_lnet.ln_routing && !lnet_health_sensitivity)
277 lp->lp_health_sensitivity = 1;
280 * Turn off discovery for loopback peer. If you're creating a peer
281 * for the loopback interface then that was initiated when we
282 * attempted to send a message over the loopback. There is no need
283 * to ever use a different interface when sending messages to
287 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
290 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
298 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
300 LASSERT(atomic_read(&lp->lp_refcount) == 0);
301 LASSERT(lp->lp_rtr_refcount == 0);
302 LASSERT(list_empty(&lp->lp_peer_nets));
303 LASSERT(list_empty(&lp->lp_peer_list));
304 LASSERT(list_empty(&lp->lp_dc_list));
307 lnet_ping_buffer_decref(lp->lp_data);
310 * if there are messages still on the pending queue, then make
311 * sure to queue them on the ln_msg_resend list so they can be
312 * resent at a later point if the discovery thread is still
314 * If the discovery thread has stopped, then the wakeup will be a
315 * no-op, and it is expected the lnet_shutdown_lndnets() will
316 * eventually be called, which will traverse this list and
317 * finalize the messages on the list.
318 * We can not resend them now because we're holding the cpt lock.
319 * Releasing the lock can cause an inconsistent state
321 spin_lock(&the_lnet.ln_msg_resend_lock);
322 spin_lock(&lp->lp_lock);
323 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324 spin_unlock(&lp->lp_lock);
325 spin_unlock(&the_lnet.ln_msg_resend_lock);
326 wake_up(&the_lnet.ln_dc_waitq);
328 LIBCFS_FREE(lp, sizeof(*lp));
332 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333 * that peer_net, detach the peer_net from the peer.
335 * Call with lnet_net_lock/EX held
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
340 struct lnet_peer_table *ptable;
341 struct lnet_peer_net *lpn;
342 struct lnet_peer *lp;
345 * Belts and suspenders: gracefully handle teardown of a
346 * partially connected peer_ni.
348 lpn = lpni->lpni_peer_net;
350 list_del_init(&lpni->lpni_peer_nis);
352 * If there are no lpni's left, we detach lpn from
353 * lp_peer_nets, so it cannot be found anymore.
355 if (list_empty(&lpn->lpn_peer_nis))
356 list_del_init(&lpn->lpn_peer_nets);
358 /* Update peer NID count. */
363 * If there are no more peer nets, make the peer unfindable
364 * via the peer_tables.
366 * Otherwise, if the peer is DISCOVERED, tell discovery to
367 * take another look at it. This is a no-op if discovery for
368 * this peer did the detaching.
370 if (list_empty(&lp->lp_peer_nets)) {
371 list_del_init(&lp->lp_peer_list);
372 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
374 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375 /* Discovery isn't running, nothing to do here. */
376 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377 lnet_peer_queue_for_discovery(lp);
378 wake_up(&the_lnet.ln_dc_waitq);
380 CDEBUG(D_NET, "peer %s NID %s\n",
381 libcfs_nidstr(&lp->lp_primary_nid),
382 libcfs_nidstr(&lpni->lpni_nid));
385 /* called with lnet_net_lock LNET_LOCK_EX held */
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
389 struct lnet_peer_table *ptable = NULL;
391 /* don't remove a peer_ni if it's also a gateway */
392 if (lnet_isrouter(lpni) && !force) {
393 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394 libcfs_nidstr(&lpni->lpni_nid));
398 lnet_peer_remove_from_remote_list(lpni);
400 /* remove peer ni from the hash list. */
401 list_del_init(&lpni->lpni_hashlist);
404 * indicate the peer is being deleted so the monitor thread can
405 * remove it from the recovery queue.
407 spin_lock(&lpni->lpni_lock);
408 lpni->lpni_state |= LNET_PEER_NI_DELETING;
409 spin_unlock(&lpni->lpni_lock);
411 /* decrement the ref count on the peer table */
412 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
415 * The peer_ni can no longer be found with a lookup. But there
416 * can be current users, so keep track of it on the zombie
417 * list until the reference count has gone to zero.
419 * The last reference may be lost in a place where the
420 * lnet_net_lock locks only a single cpt, and that cpt may not
421 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
424 spin_lock(&ptable->pt_zombie_lock);
425 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426 ptable->pt_zombies++;
427 spin_unlock(&ptable->pt_zombie_lock);
429 /* no need to keep this peer_ni on the hierarchy anymore */
430 lnet_peer_detach_peer_ni_locked(lpni);
432 /* remove hashlist reference on peer_ni */
433 lnet_peer_ni_decref_locked(lpni);
438 void lnet_peer_uninit(void)
440 struct lnet_peer_ni *lpni, *tmp;
442 lnet_net_lock(LNET_LOCK_EX);
444 /* remove all peer_nis from the remote peer and the hash list */
445 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446 lpni_on_remote_peer_ni_list)
447 lnet_peer_ni_del_locked(lpni, false);
449 lnet_peer_tables_destroy();
451 lnet_net_unlock(LNET_LOCK_EX);
455 lnet_peer_del_locked(struct lnet_peer *peer)
457 struct lnet_peer_ni *lpni = NULL, *lpni2;
460 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
462 spin_lock(&peer->lp_lock);
463 peer->lp_state |= LNET_PEER_MARK_DELETED;
464 spin_unlock(&peer->lp_lock);
466 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467 while (lpni != NULL) {
468 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469 rc = lnet_peer_ni_del_locked(lpni, false);
479 * Discovering this peer is taking too long. Cancel any Ping or Push
480 * that discovery is waiting on by unlinking the relevant MDs. The
481 * lnet_discovery_event_handler() will proceed from here and complete
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
486 struct lnet_handle_md ping_mdh;
487 struct lnet_handle_md push_mdh;
489 LNetInvalidateMDHandle(&ping_mdh);
490 LNetInvalidateMDHandle(&push_mdh);
492 spin_lock(&lp->lp_lock);
493 if (lp->lp_state & LNET_PEER_PING_SENT) {
494 ping_mdh = lp->lp_ping_mdh;
495 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
497 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498 push_mdh = lp->lp_push_mdh;
499 LNetInvalidateMDHandle(&lp->lp_push_mdh);
501 spin_unlock(&lp->lp_lock);
503 if (!LNetMDHandleIsInvalid(ping_mdh))
504 LNetMDUnlink(ping_mdh);
505 if (!LNetMDHandleIsInvalid(push_mdh))
506 LNetMDUnlink(push_mdh);
510 lnet_peer_del(struct lnet_peer *peer)
512 lnet_peer_cancel_discovery(peer);
513 lnet_net_lock(LNET_LOCK_EX);
514 lnet_peer_del_locked(peer);
515 lnet_net_unlock(LNET_LOCK_EX);
521 * Delete a NID from a peer. Call with ln_api_mutex held.
524 * -EPERM: Non-DLC deletion from DLC-configured peer.
525 * -ENOENT: No lnet_peer_ni corresponding to the nid.
526 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
527 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
530 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
532 struct lnet_peer_ni *lpni;
533 struct lnet_nid primary_nid = lp->lp_primary_nid;
536 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
538 lnet_nid4_to_nid(nid4, &nid);
539 if (!(flags & LNET_PEER_CONFIGURED)) {
540 if (lp->lp_state & LNET_PEER_CONFIGURED) {
546 /* If we're asked to lock down the primary NID we shouldn't be
549 if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
550 nid_same(&primary_nid, &nid)) {
555 lpni = lnet_peer_ni_find_locked(&nid);
560 lnet_peer_ni_decref_locked(lpni);
561 if (lp != lpni->lpni_peer_net->lpn_peer) {
567 * This function only allows deletion of the primary NID if it
570 if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
575 lnet_net_lock(LNET_LOCK_EX);
577 if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
578 struct lnet_peer_ni *lpni2;
579 /* assign the next peer_ni to be the primary */
580 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
582 lp->lp_primary_nid = lpni2->lpni_nid;
584 rc = lnet_peer_ni_del_locked(lpni, force);
586 lnet_net_unlock(LNET_LOCK_EX);
589 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
590 libcfs_nidstr(&primary_nid), libcfs_nidstr(&nid),
597 lnet_peer_table_cleanup_locked(struct lnet_net *net,
598 struct lnet_peer_table *ptable)
601 struct lnet_peer_ni *next;
602 struct lnet_peer_ni *lpni;
603 struct lnet_peer *peer;
605 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
606 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
608 if (net != NULL && net != lpni->lpni_net)
611 peer = lpni->lpni_peer_net->lpn_peer;
612 if (!nid_same(&peer->lp_primary_nid,
614 lnet_peer_ni_del_locked(lpni, false);
618 * Removing the primary NID implies removing
619 * the entire peer. Advance next beyond any
620 * peer_ni that belongs to the same peer.
622 list_for_each_entry_from(next, &ptable->pt_hash[i],
624 if (next->lpni_peer_net->lpn_peer != peer)
627 lnet_peer_del_locked(peer);
633 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
635 wait_var_event_warning(&ptable->pt_zombies,
636 ptable->pt_zombies == 0,
637 "Waiting for %d zombies on peer table\n",
642 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
643 struct lnet_peer_table *ptable)
645 struct lnet_peer_ni *lp;
646 struct lnet_peer_ni *tmp;
650 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
651 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
653 if (net != lp->lpni_net)
656 if (!lnet_isrouter(lp))
659 /* FIXME handle large-addr nid */
660 gw_nid = lnet_nid_to_nid4(
661 &lp->lpni_peer_net->lpn_peer->lp_primary_nid);
663 lnet_net_unlock(LNET_LOCK_EX);
664 lnet_del_route(LNET_NET_ANY, gw_nid);
665 lnet_net_lock(LNET_LOCK_EX);
671 lnet_peer_tables_cleanup(struct lnet_net *net)
674 struct lnet_peer_table *ptable;
676 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
677 /* If just deleting the peers for a NI, get rid of any routes these
678 * peers are gateways for. */
679 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
680 lnet_net_lock(LNET_LOCK_EX);
681 lnet_peer_table_del_rtrs_locked(net, ptable);
682 lnet_net_unlock(LNET_LOCK_EX);
685 /* Start the cleanup process */
686 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
687 lnet_net_lock(LNET_LOCK_EX);
688 lnet_peer_table_cleanup_locked(net, ptable);
689 lnet_net_unlock(LNET_LOCK_EX);
692 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
693 lnet_peer_ni_finalize_wait(ptable);
696 static struct lnet_peer_ni *
697 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
699 struct list_head *peers;
700 struct lnet_peer_ni *lp;
702 if (the_lnet.ln_state != LNET_STATE_RUNNING)
705 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
706 list_for_each_entry(lp, peers, lpni_hashlist) {
707 if (nid_same(&lp->lpni_nid, nid)) {
708 lnet_peer_ni_addref_locked(lp);
716 struct lnet_peer_ni *
717 lnet_find_peer_ni_locked(lnet_nid_t nid4)
719 struct lnet_peer_ni *lpni;
720 struct lnet_peer_table *ptable;
724 lnet_nid4_to_nid(nid4, &nid);
726 cpt = lnet_nid_cpt_hash(&nid, LNET_CPT_NUMBER);
728 ptable = the_lnet.ln_peer_tables[cpt];
729 lpni = lnet_get_peer_ni_locked(ptable, &nid);
734 struct lnet_peer_ni *
735 lnet_peer_ni_find_locked(struct lnet_nid *nid)
737 struct lnet_peer_ni *lpni;
738 struct lnet_peer_table *ptable;
741 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
743 ptable = the_lnet.ln_peer_tables[cpt];
744 lpni = lnet_get_peer_ni_locked(ptable, nid);
749 struct lnet_peer_ni *
750 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
752 struct lnet_peer_net *lpn;
753 struct lnet_peer_ni *lpni;
755 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
759 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
760 if (lnet_nid_to_nid4(&lpni->lpni_nid) == nid)
767 struct lnet_peer_ni *
768 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
770 struct lnet_peer_net *lpn;
771 struct lnet_peer_ni *lpni;
773 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
777 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
778 if (nid_same(&lpni->lpni_nid, nid))
786 lnet_find_peer(lnet_nid_t nid)
788 struct lnet_peer_ni *lpni;
789 struct lnet_peer *lp = NULL;
792 cpt = lnet_net_lock_current();
793 lpni = lnet_find_peer_ni_locked(nid);
795 lp = lpni->lpni_peer_net->lpn_peer;
796 lnet_peer_addref_locked(lp);
797 lnet_peer_ni_decref_locked(lpni);
799 lnet_net_unlock(cpt);
804 struct lnet_peer_net *
805 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
807 struct lnet_peer_net *net;
810 /* no net id provided return the first net */
811 net = list_first_entry_or_null(&lp->lp_peer_nets,
812 struct lnet_peer_net,
818 /* find the net after the one provided */
819 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
820 if (net->lpn_net_id == prev_lpn_id) {
822 * if we reached the end of the list loop to the
825 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
826 return list_first_entry_or_null(&lp->lp_peer_nets,
827 struct lnet_peer_net,
830 return list_next_entry(net, lpn_peer_nets);
837 struct lnet_peer_ni *
838 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
839 struct lnet_peer_net *peer_net,
840 struct lnet_peer_ni *prev)
842 struct lnet_peer_ni *lpni;
843 struct lnet_peer_net *net = peer_net;
847 if (list_empty(&peer->lp_peer_nets))
850 net = list_entry(peer->lp_peer_nets.next,
851 struct lnet_peer_net,
854 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
860 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
862 * if you reached the end of the peer ni list and the peer
863 * net is specified then there are no more peer nis in that
870 * we reached the end of this net ni list. move to the
873 if (prev->lpni_peer_net->lpn_peer_nets.next ==
875 /* no more nets and no more NIs. */
878 /* get the next net */
879 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
880 struct lnet_peer_net,
882 /* get the ni on it */
883 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
889 /* there are more nis left */
890 lpni = list_entry(prev->lpni_peer_nis.next,
891 struct lnet_peer_ni, lpni_peer_nis);
896 /* Call with the ln_api_mutex held */
897 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
899 struct lnet_process_id id;
900 struct lnet_peer_table *ptable;
901 struct lnet_peer *lp;
910 if (the_lnet.ln_state != LNET_STATE_RUNNING)
913 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
916 * Count the number of peers, and return E2BIG if the buffer
917 * is too small. We'll also return the desired size.
920 for (cpt = 0; cpt < lncpt; cpt++) {
921 ptable = the_lnet.ln_peer_tables[cpt];
922 count += ptable->pt_peers;
924 size = count * sizeof(*ids);
929 * Walk the peer lists and copy out the primary nids.
930 * This is safe because the peer lists are only modified
931 * while the ln_api_mutex is held. So we don't need to
932 * hold the lnet_net_lock as well, and can therefore
933 * directly call copy_to_user().
936 memset(&id, 0, sizeof(id));
937 id.pid = LNET_PID_LUSTRE;
939 for (cpt = 0; cpt < lncpt; cpt++) {
940 ptable = the_lnet.ln_peer_tables[cpt];
941 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
942 if (!nid_is_nid4(&lp->lp_primary_nid))
946 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
947 if (copy_to_user(&ids[i], &id, sizeof(id)))
960 * Start pushes to peers that need to be updated for a configuration
961 * change on this node.
964 lnet_push_update_to_peers(int force)
966 struct lnet_peer_table *ptable;
967 struct lnet_peer *lp;
971 lnet_net_lock(LNET_LOCK_EX);
972 if (lnet_peer_discovery_disabled)
974 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
975 for (cpt = 0; cpt < lncpt; cpt++) {
976 ptable = the_lnet.ln_peer_tables[cpt];
977 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
979 spin_lock(&lp->lp_lock);
980 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
981 lp->lp_state |= LNET_PEER_FORCE_PUSH;
982 spin_unlock(&lp->lp_lock);
984 if (lnet_peer_needs_push(lp))
985 lnet_peer_queue_for_discovery(lp);
988 lnet_net_unlock(LNET_LOCK_EX);
989 wake_up(&the_lnet.ln_dc_waitq);
992 /* find the NID in the preferred gateways for the remote peer
994 * false: list is not empty and NID is not preferred
995 * false: list is empty
996 * true: nid is found in the list
999 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
1000 struct lnet_nid *gw_nid)
1002 struct lnet_nid_list *ne;
1004 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
1005 libcfs_nidstr(&lpni->lpni_nid),
1006 list_empty(&lpni->lpni_rtr_pref_nids));
1008 if (list_empty(&lpni->lpni_rtr_pref_nids))
1011 /* iterate through all the preferred NIDs and see if any of them
1012 * matches the provided gw_nid
1014 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1015 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1016 libcfs_nidstr(&ne->nl_nid),
1017 libcfs_nidstr(gw_nid));
1018 if (nid_same(&ne->nl_nid, gw_nid))
1026 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
1028 struct list_head zombies;
1029 struct lnet_nid_list *ne;
1030 struct lnet_nid_list *tmp;
1031 int cpt = lpni->lpni_cpt;
1033 INIT_LIST_HEAD(&zombies);
1036 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1037 lnet_net_unlock(cpt);
1039 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1040 list_del(&ne->nl_list);
1041 LIBCFS_FREE(ne, sizeof(*ne));
1046 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1047 struct lnet_nid *gw_nid)
1049 int cpt = lpni->lpni_cpt;
1050 struct lnet_nid_list *ne = NULL;
1052 /* This function is called with api_mutex held. When the api_mutex
1053 * is held the list can not be modified, as it is only modified as
1054 * a result of applying a UDSP and that happens under api_mutex
1057 __must_hold(&the_lnet.ln_api_mutex);
1059 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1060 if (nid_same(&ne->nl_nid, gw_nid))
1064 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1068 ne->nl_nid = *gw_nid;
1070 /* Lock the cpt to protect against addition and checks in the
1071 * selection algorithm
1074 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1075 lnet_net_unlock(cpt);
1081 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1082 * this is a preferred point-to-point path. Call with lnet_net_lock in
1086 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1088 struct lnet_nid_list *ne;
1090 if (lpni->lpni_pref_nnids == 0)
1092 if (lpni->lpni_pref_nnids == 1)
1093 return nid_same(&lpni->lpni_pref.nid, nid);
1094 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1095 if (nid_same(&ne->nl_nid, nid))
1102 * Set a single ni as preferred, provided no preferred ni is already
1103 * defined. Only to be used for non-multi-rail peer_ni.
1106 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1107 struct lnet_nid *nid)
1113 spin_lock(&lpni->lpni_lock);
1114 if (LNET_NID_IS_ANY(nid)) {
1116 } else if (lpni->lpni_pref_nnids > 0) {
1118 } else if (lpni->lpni_pref_nnids == 0) {
1119 lpni->lpni_pref.nid = *nid;
1120 lpni->lpni_pref_nnids = 1;
1121 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1123 spin_unlock(&lpni->lpni_lock);
1125 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1126 libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1131 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1132 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1135 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1139 spin_lock(&lpni->lpni_lock);
1140 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1141 lpni->lpni_pref_nnids = 0;
1142 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1143 } else if (lpni->lpni_pref_nnids == 0) {
1148 spin_unlock(&lpni->lpni_lock);
1150 CDEBUG(D_NET, "peer %s: %d\n",
1151 libcfs_nidstr(&lpni->lpni_nid), rc);
1156 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1158 lpni->lpni_sel_priority = priority;
1162 * Clear the preferred NIDs from a non-multi-rail peer.
1165 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1167 struct lnet_peer_ni *lpni = NULL;
1169 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1170 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1174 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1176 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1177 struct lnet_nid_list *ne1 = NULL;
1178 struct lnet_nid_list *ne2 = NULL;
1179 struct lnet_nid *tmp_nid = NULL;
1182 if (LNET_NID_IS_ANY(nid)) {
1187 if (lpni->lpni_pref_nnids == 1 &&
1188 nid_same(&lpni->lpni_pref.nid, nid)) {
1193 /* A non-MR node may have only one preferred NI per peer_ni */
1194 if (lpni->lpni_pref_nnids > 0 &&
1195 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1200 /* add the new preferred nid to the list of preferred nids */
1201 if (lpni->lpni_pref_nnids != 0) {
1202 size_t alloc_size = sizeof(*ne1);
1204 if (lpni->lpni_pref_nnids == 1) {
1205 tmp_nid = &lpni->lpni_pref.nid;
1206 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1209 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1210 if (nid_same(&ne1->nl_nid, nid)) {
1216 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1223 /* move the originally stored nid to the list */
1224 if (lpni->lpni_pref_nnids == 1) {
1225 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1226 lpni->lpni_cpt, alloc_size);
1231 INIT_LIST_HEAD(&ne2->nl_list);
1232 ne2->nl_nid = *tmp_nid;
1237 lnet_net_lock(LNET_LOCK_EX);
1238 spin_lock(&lpni->lpni_lock);
1239 if (lpni->lpni_pref_nnids == 0) {
1240 lpni->lpni_pref.nid = *nid;
1243 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1244 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1246 lpni->lpni_pref_nnids++;
1247 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1248 spin_unlock(&lpni->lpni_lock);
1249 lnet_net_unlock(LNET_LOCK_EX);
1252 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1253 spin_lock(&lpni->lpni_lock);
1254 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1255 spin_unlock(&lpni->lpni_lock);
1257 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1258 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1263 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1265 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1266 struct lnet_nid_list *ne = NULL;
1269 if (lpni->lpni_pref_nnids == 0) {
1274 if (lpni->lpni_pref_nnids == 1) {
1275 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1280 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1281 if (nid_same(&ne->nl_nid, nid))
1282 goto remove_nid_entry;
1290 lnet_net_lock(LNET_LOCK_EX);
1291 spin_lock(&lpni->lpni_lock);
1292 if (lpni->lpni_pref_nnids == 1)
1293 lpni->lpni_pref.nid = LNET_ANY_NID;
1295 list_del_init(&ne->nl_list);
1296 if (lpni->lpni_pref_nnids == 2) {
1297 struct lnet_nid_list *ne, *tmp;
1299 list_for_each_entry_safe(ne, tmp,
1300 &lpni->lpni_pref.nids,
1302 lpni->lpni_pref.nid = ne->nl_nid;
1303 list_del_init(&ne->nl_list);
1304 LIBCFS_FREE(ne, sizeof(*ne));
1308 lpni->lpni_pref_nnids--;
1309 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1310 spin_unlock(&lpni->lpni_lock);
1311 lnet_net_unlock(LNET_LOCK_EX);
1314 LIBCFS_FREE(ne, sizeof(*ne));
1316 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1317 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1322 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1324 struct list_head zombies;
1325 struct lnet_nid_list *ne;
1326 struct lnet_nid_list *tmp;
1328 INIT_LIST_HEAD(&zombies);
1330 lnet_net_lock(LNET_LOCK_EX);
1331 if (lpni->lpni_pref_nnids == 1)
1332 lpni->lpni_pref.nid = LNET_ANY_NID;
1333 else if (lpni->lpni_pref_nnids > 1)
1334 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1335 lpni->lpni_pref_nnids = 0;
1336 lnet_net_unlock(LNET_LOCK_EX);
1338 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1339 list_del_init(&ne->nl_list);
1340 LIBCFS_FREE(ne, sizeof(*ne));
1345 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1347 /* FIXME handle large-addr nid */
1348 struct lnet_peer_ni *lpni;
1349 lnet_nid_t primary_nid = nid;
1351 lpni = lnet_find_peer_ni_locked(nid);
1353 primary_nid = lnet_nid_to_nid4(
1354 &lpni->lpni_peer_net->lpn_peer->lp_primary_nid);
1355 lnet_peer_ni_decref_locked(lpni);
1362 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1363 __must_hold(&lp->lp_lock)
1365 if (lnet_peer_discovery_disabled)
1368 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1369 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1380 lnet_is_discovery_disabled(struct lnet_peer *lp)
1384 spin_lock(&lp->lp_lock);
1385 rc = lnet_is_discovery_disabled_locked(lp);
1386 spin_unlock(&lp->lp_lock);
1392 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1394 lnet_nid_t pnid = 0;
1398 if (!nids || num_nids < 1)
1401 rc = LNetNIInit(LNET_PID_ANY);
1405 mutex_lock(&the_lnet.ln_api_mutex);
1407 mr = lnet_peer_discovery_disabled == 0;
1410 for (i = 0; i < num_nids; i++) {
1411 if (nids[i] == LNET_NID_LO_0)
1416 rc = lnet_add_peer_ni(pnid, LNET_NID_ANY, mr, true);
1417 } else if (lnet_peer_discovery_disabled) {
1418 rc = lnet_add_peer_ni(nids[i], LNET_NID_ANY, mr, true);
1420 rc = lnet_add_peer_ni(pnid, nids[i], mr, true);
1423 if (rc && rc != -EEXIST)
1428 mutex_unlock(&the_lnet.ln_api_mutex);
1432 return rc == -EEXIST ? 0 : rc;
1434 EXPORT_SYMBOL(LNetAddPeer);
1436 /* FIXME support large-addr nid */
1438 LNetPrimaryNID(lnet_nid_t nid)
1440 struct lnet_peer *lp;
1441 struct lnet_peer_ni *lpni;
1442 lnet_nid_t primary_nid = nid;
1446 if (nid == LNET_NID_LO_0)
1447 return LNET_NID_LO_0;
1449 cpt = lnet_net_lock_current();
1450 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1455 lp = lpni->lpni_peer_net->lpn_peer;
1457 /* If discovery is disabled locally then we needn't bother running
1458 * discovery here because discovery will not modify whatever
1459 * primary NID is currently set for this peer. If the specified peer is
1460 * down then this discovery can introduce long delays into the mount
1461 * process, so skip it if it isn't necessary.
1463 if (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1464 spin_lock(&lp->lp_lock);
1465 /* force a full discovery cycle */
1466 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH |
1467 LNET_PEER_LOCK_PRIMARY;
1468 spin_unlock(&lp->lp_lock);
1470 /* start discovery in the background. Messages to that
1471 * peer will not go through until the discovery is
1474 rc = lnet_discover_peer_locked(lpni, cpt, false);
1477 /* The lpni (or lp) for this NID may have changed and our ref is
1478 * the only thing keeping the old one around. Release the ref
1479 * and lookup the lpni again
1481 lnet_peer_ni_decref_locked(lpni);
1482 lpni = lnet_find_peer_ni_locked(nid);
1487 lp = lpni->lpni_peer_net->lpn_peer;
1489 primary_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
1491 lnet_peer_ni_decref_locked(lpni);
1493 lnet_net_unlock(cpt);
1495 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1496 libcfs_nid2str(primary_nid), rc);
1499 EXPORT_SYMBOL(LNetPrimaryNID);
1501 struct lnet_peer_net *
1502 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1504 struct lnet_peer_net *peer_net;
1505 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1506 if (peer_net->lpn_net_id == net_id)
1513 * Attach a peer_ni to a peer_net and peer. This function assumes
1514 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1515 * may be attached to a different peer, in which case it will be
1516 * properly detached first. The whole operation is done atomically.
1518 * This function consumes the reference on lpni and Always returns 0.
1519 * This is the last function called from functions that do return an
1520 * int, so returning 0 here allows the compiler to do a tail call.
1523 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1524 struct lnet_peer_net *lpn,
1525 struct lnet_peer_ni *lpni,
1528 struct lnet_peer_table *ptable;
1529 bool new_lpn = false;
1532 /* Install the new peer_ni */
1533 lnet_net_lock(LNET_LOCK_EX);
1534 /* Add peer_ni to global peer table hash, if necessary. */
1535 if (list_empty(&lpni->lpni_hashlist)) {
1536 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1538 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1539 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1540 ptable->pt_version++;
1541 lnet_peer_ni_addref_locked(lpni);
1544 /* Detach the peer_ni from an existing peer, if necessary. */
1545 if (lpni->lpni_peer_net) {
1546 LASSERT(lpni->lpni_peer_net != lpn);
1547 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1548 lnet_peer_detach_peer_ni_locked(lpni);
1549 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1550 lpni->lpni_peer_net = NULL;
1553 /* Add peer_ni to peer_net */
1554 lpni->lpni_peer_net = lpn;
1555 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1556 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1558 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1559 lnet_update_peer_net_healthv(lpni);
1560 lnet_peer_net_addref_locked(lpn);
1562 /* Add peer_net to peer */
1563 if (!lpn->lpn_peer) {
1566 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1567 list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1569 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1570 lnet_peer_addref_locked(lp);
1573 /* Add peer to global peer list, if necessary */
1574 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1575 if (list_empty(&lp->lp_peer_list)) {
1576 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1581 /* Update peer state */
1582 spin_lock(&lp->lp_lock);
1583 if (flags & LNET_PEER_CONFIGURED) {
1584 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1585 lp->lp_state |= LNET_PEER_CONFIGURED;
1587 if (flags & LNET_PEER_MULTI_RAIL) {
1588 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1589 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1590 lnet_peer_clr_non_mr_pref_nids(lp);
1593 if (flags & LNET_PEER_LOCK_PRIMARY)
1594 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1595 spin_unlock(&lp->lp_lock);
1601 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1603 CERROR("Failed to apply UDSPs on lpn %s\n",
1604 libcfs_net2str(lpn->lpn_net_id));
1606 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1608 CERROR("Failed to apply UDSPs on lpni %s\n",
1609 libcfs_nidstr(&lpni->lpni_nid));
1611 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1612 libcfs_nidstr(&lp->lp_primary_nid),
1613 libcfs_nidstr(&lpni->lpni_nid), flags);
1614 lnet_peer_ni_decref_locked(lpni);
1615 lnet_net_unlock(LNET_LOCK_EX);
1621 * Create a new peer, with nid as its primary nid.
1623 * Call with the lnet_api_mutex held.
1626 lnet_peer_add(lnet_nid_t nid4, unsigned int flags)
1628 struct lnet_nid nid;
1629 struct lnet_peer *lp;
1630 struct lnet_peer_net *lpn;
1631 struct lnet_peer_ni *lpni;
1634 LASSERT(nid4 != LNET_NID_ANY);
1637 * No need for the lnet_net_lock here, because the
1638 * lnet_api_mutex is held.
1640 lpni = lnet_find_peer_ni_locked(nid4);
1642 /* A peer with this NID already exists. */
1643 lp = lpni->lpni_peer_net->lpn_peer;
1644 lnet_peer_ni_decref_locked(lpni);
1646 * This is an error if the peer was configured and the
1647 * primary NID differs or an attempt is made to change
1648 * the Multi-Rail flag. Otherwise the assumption is
1649 * that an existing peer is being modified.
1651 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1652 if (lnet_nid_to_nid4(&lp->lp_primary_nid) != nid4)
1654 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1657 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1658 if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid4) {
1663 /* Delete and recreate as a configured peer. */
1667 /* Create peer, peer_net, and peer_ni. */
1669 lnet_nid4_to_nid(nid4, &nid);
1670 lp = lnet_peer_alloc(&nid);
1673 lpn = lnet_peer_net_alloc(LNET_NID_NET(&nid));
1676 lpni = lnet_peer_ni_alloc(&nid);
1680 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1683 LIBCFS_FREE(lpn, sizeof(*lpn));
1685 LIBCFS_FREE(lp, sizeof(*lp));
1687 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1688 libcfs_nid2str(nid4), flags, rc);
1693 * Add a NID to a peer. Call with ln_api_mutex held.
1696 * -EPERM: Non-DLC addition to a DLC-configured peer.
1697 * -EEXIST: The NID was configured by DLC for a different peer.
1698 * -ENOMEM: Out of memory.
1699 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1700 * non-multi-rail peer.
1703 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
1705 struct lnet_peer_net *lpn;
1706 struct lnet_peer_ni *lpni;
1707 struct lnet_nid nid;
1711 LASSERT(nid4 != LNET_NID_ANY);
1713 lnet_nid4_to_nid(nid4, &nid);
1715 /* A configured peer can only be updated through configuration. */
1716 if (!(flags & LNET_PEER_CONFIGURED)) {
1717 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1724 * The MULTI_RAIL flag can be set but not cleared, because
1725 * that would leave the peer struct in an invalid state.
1727 if (flags & LNET_PEER_MULTI_RAIL) {
1728 spin_lock(&lp->lp_lock);
1729 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1730 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1731 lnet_peer_clr_non_mr_pref_nids(lp);
1733 spin_unlock(&lp->lp_lock);
1734 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1739 lpni = lnet_find_peer_ni_locked(nid4);
1742 * A peer_ni already exists. This is only a problem if
1743 * it is not connected to this peer and was configured
1746 if (lpni->lpni_peer_net->lpn_peer == lp)
1748 if (lnet_peer_ni_is_configured(lpni)) {
1752 /* If this is the primary NID, destroy the peer. */
1753 if (lnet_peer_ni_is_primary(lpni)) {
1754 struct lnet_peer *lp2 =
1755 lpni->lpni_peer_net->lpn_peer;
1756 int rtr_refcount = lp2->lp_rtr_refcount;
1758 /* If the new peer that this NID belongs to is
1759 * a primary NID for another peer which we're
1760 * suppose to preserve the Primary for then we
1761 * don't want to mess with it. But the
1762 * configuration is wrong at this point, so we
1763 * should flag both of these peers as in a bad
1766 if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY) {
1767 spin_lock(&lp->lp_lock);
1768 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1769 spin_unlock(&lp->lp_lock);
1770 spin_lock(&lp2->lp_lock);
1771 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1772 spin_unlock(&lp2->lp_lock);
1776 * if we're trying to delete a router it means
1777 * we're moving this peer NI to a new peer so must
1778 * transfer router properties to the new peer
1780 if (rtr_refcount > 0) {
1781 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1782 lnet_rtr_transfer_to_peer(lp2, lp);
1785 lnet_peer_ni_decref_locked(lpni);
1786 lpni = lnet_peer_ni_alloc(&nid);
1793 lpni = lnet_peer_ni_alloc(&nid);
1801 * Get the peer_net. Check that we're not adding a second
1802 * peer_ni on a peer_net of a non-multi-rail peer.
1804 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid4));
1806 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid4));
1811 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1816 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1819 lnet_peer_ni_decref_locked(lpni);
1821 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1822 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nid2str(nid4),
1828 * Update the primary NID of a peer, if possible.
1830 * Call with the lnet_api_mutex held.
1833 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid,
1836 struct lnet_nid old = lp->lp_primary_nid;
1839 if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid)
1842 if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1843 lnet_nid4_to_nid(nid, &lp->lp_primary_nid);
1845 rc = lnet_peer_add_nid(lp, nid, flags);
1847 lp->lp_primary_nid = old;
1851 /* if this is a configured peer or the primary for that peer has
1852 * been locked, then we don't want to flag this scenario as
1855 if (lp->lp_state & LNET_PEER_CONFIGURED ||
1856 lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1859 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1860 libcfs_nidstr(&old), libcfs_nid2str(nid), rc);
1866 * lpni creation initiated due to traffic either sending or receiving.
1869 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1871 struct lnet_peer *lp;
1872 struct lnet_peer_net *lpn;
1873 struct lnet_peer_ni *lpni;
1877 if (LNET_NID_IS_ANY(nid)) {
1882 /* lnet_net_lock is not needed here because ln_api_lock is held */
1883 lpni = lnet_peer_ni_find_locked(nid);
1886 * We must have raced with another thread. Since we
1887 * know next to nothing about a peer_ni created by
1888 * traffic, we just assume everything is ok and
1891 lnet_peer_ni_decref_locked(lpni);
1895 /* Create peer, peer_net, and peer_ni. */
1897 lp = lnet_peer_alloc(nid);
1900 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1903 lpni = lnet_peer_ni_alloc(nid);
1906 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1908 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1911 LIBCFS_FREE(lpn, sizeof(*lpn));
1913 LIBCFS_FREE(lp, sizeof(*lp));
1915 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1920 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1922 * This API handles the following combinations:
1923 * Create a peer with its primary NI if only the prim_nid is provided
1924 * Add a NID to a peer identified by the prim_nid. The peer identified
1925 * by the prim_nid must already exist.
1926 * The peer being created may be non-MR.
1928 * The caller must hold ln_api_mutex. This prevents the peer from
1929 * being created/modified/deleted by a different thread.
1932 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr, bool temp)
1934 struct lnet_peer *lp = NULL;
1935 struct lnet_peer_ni *lpni;
1936 unsigned int flags = 0;
1938 /* The prim_nid must always be specified */
1939 if (prim_nid == LNET_NID_ANY)
1943 flags = LNET_PEER_CONFIGURED;
1946 flags |= LNET_PEER_MULTI_RAIL;
1949 * If nid isn't specified, we must create a new peer with
1950 * prim_nid as its primary nid.
1952 if (nid == LNET_NID_ANY)
1953 return lnet_peer_add(prim_nid, flags);
1955 /* Look up the prim_nid, which must exist. */
1956 lpni = lnet_find_peer_ni_locked(prim_nid);
1959 lnet_peer_ni_decref_locked(lpni);
1960 lp = lpni->lpni_peer_net->lpn_peer;
1962 /* Peer must have been configured. */
1963 if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1964 CDEBUG(D_NET, "peer %s was not configured\n",
1965 libcfs_nid2str(prim_nid));
1969 /* Primary NID must match */
1970 if (lnet_nid_to_nid4(&lp->lp_primary_nid) != prim_nid) {
1971 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1972 libcfs_nid2str(prim_nid),
1973 libcfs_nidstr(&lp->lp_primary_nid));
1977 /* Multi-Rail flag must match. */
1978 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1979 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1980 libcfs_nid2str(prim_nid));
1984 return lnet_peer_add_nid(lp, nid, flags);
1988 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1990 * This API handles the following combinations:
1991 * Delete a NI from a peer if both prim_nid and nid are provided.
1992 * Delete a peer if only prim_nid is provided.
1993 * Delete a peer if its primary nid is provided.
1995 * The caller must hold ln_api_mutex. This prevents the peer from
1996 * being modified/deleted by a different thread.
1999 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
2001 struct lnet_peer *lp;
2002 struct lnet_peer_ni *lpni;
2005 if (prim_nid == LNET_NID_ANY)
2008 lpni = lnet_find_peer_ni_locked(prim_nid);
2011 lnet_peer_ni_decref_locked(lpni);
2012 lp = lpni->lpni_peer_net->lpn_peer;
2014 if (prim_nid != lnet_nid_to_nid4(&lp->lp_primary_nid)) {
2015 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2016 libcfs_nid2str(prim_nid),
2017 libcfs_nidstr(&lp->lp_primary_nid));
2021 lnet_net_lock(LNET_LOCK_EX);
2022 if (lp->lp_rtr_refcount > 0) {
2023 lnet_net_unlock(LNET_LOCK_EX);
2024 CERROR("%s is a router. Can not be deleted\n",
2025 libcfs_nid2str(prim_nid));
2028 lnet_net_unlock(LNET_LOCK_EX);
2030 if (nid == LNET_NID_ANY || nid == lnet_nid_to_nid4(&lp->lp_primary_nid))
2031 return lnet_peer_del(lp);
2033 flags = LNET_PEER_CONFIGURED;
2034 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2035 flags |= LNET_PEER_MULTI_RAIL;
2037 return lnet_peer_del_nid(lp, nid, flags);
2041 lnet_destroy_peer_ni_locked(struct kref *ref)
2043 struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2045 struct lnet_peer_table *ptable;
2046 struct lnet_peer_net *lpn;
2048 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2050 LASSERT(kref_read(&lpni->lpni_kref) == 0);
2051 LASSERT(list_empty(&lpni->lpni_txq));
2052 LASSERT(lpni->lpni_txqnob == 0);
2053 LASSERT(list_empty(&lpni->lpni_peer_nis));
2054 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2056 lpn = lpni->lpni_peer_net;
2057 lpni->lpni_peer_net = NULL;
2058 lpni->lpni_net = NULL;
2060 if (!list_empty(&lpni->lpni_hashlist)) {
2061 /* remove the peer ni from the zombie list */
2062 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2063 spin_lock(&ptable->pt_zombie_lock);
2064 list_del_init(&lpni->lpni_hashlist);
2065 ptable->pt_zombies--;
2066 spin_unlock(&ptable->pt_zombie_lock);
2069 if (lpni->lpni_pref_nnids > 1) {
2070 struct lnet_nid_list *ne, *tmp;
2072 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2074 list_del_init(&ne->nl_list);
2075 LIBCFS_FREE(ne, sizeof(*ne));
2078 LIBCFS_FREE(lpni, sizeof(*lpni));
2081 lnet_peer_net_decref_locked(lpn);
2084 struct lnet_peer_ni *
2085 lnet_nid2peerni_ex(struct lnet_nid *nid, int cpt)
2087 struct lnet_peer_ni *lpni = NULL;
2090 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2091 return ERR_PTR(-ESHUTDOWN);
2094 * find if a peer_ni already exists.
2095 * If so then just return that.
2097 lpni = lnet_peer_ni_find_locked(nid);
2101 lnet_net_unlock(cpt);
2103 rc = lnet_peer_ni_traffic_add(nid, NULL);
2106 goto out_net_relock;
2109 lpni = lnet_peer_ni_find_locked(nid);
2119 * Get a peer_ni for the given nid, create it if necessary. Takes a
2120 * hold on the peer_ni.
2122 struct lnet_peer_ni *
2123 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2124 struct lnet_nid *pref, int cpt)
2126 struct lnet_peer_ni *lpni = NULL;
2129 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2130 return ERR_PTR(-ESHUTDOWN);
2133 * find if a peer_ni already exists.
2134 * If so then just return that.
2136 lpni = lnet_peer_ni_find_locked(nid);
2142 * use the lnet_api_mutex to serialize the creation of the peer_ni
2143 * and the creation/deletion of the local ni/net. When a local ni is
2144 * created, if there exists a set of peer_nis on that network,
2145 * they need to be traversed and updated. When a local NI is
2146 * deleted, which could result in a network being deleted, then
2147 * all peer nis on that network need to be removed as well.
2149 * Creation through traffic should also be serialized with
2150 * creation through DLC.
2152 lnet_net_unlock(cpt);
2153 mutex_lock(&the_lnet.ln_api_mutex);
2155 * Shutdown is only set under the ln_api_lock, so a single
2156 * check here is sufficent.
2158 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2159 lpni = ERR_PTR(-ESHUTDOWN);
2160 goto out_mutex_unlock;
2163 rc = lnet_peer_ni_traffic_add(nid, pref);
2166 goto out_mutex_unlock;
2169 lpni = lnet_peer_ni_find_locked(nid);
2173 mutex_unlock(&the_lnet.ln_api_mutex);
2176 /* Lock has been dropped, check again for shutdown. */
2177 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2179 lnet_peer_ni_decref_locked(lpni);
2180 lpni = ERR_PTR(-ESHUTDOWN);
2186 struct lnet_peer_ni *
2187 lnet_nid2peerni_locked(lnet_nid_t nid4, lnet_nid_t pref4, int cpt)
2189 struct lnet_nid nid, pref;
2191 lnet_nid4_to_nid(nid4, &nid);
2192 lnet_nid4_to_nid(pref4, &pref);
2193 if (pref4 == LNET_NID_ANY)
2194 return lnet_peerni_by_nid_locked(&nid, NULL, cpt);
2196 return lnet_peerni_by_nid_locked(&nid, &pref, cpt);
2200 lnet_peer_gw_discovery(struct lnet_peer *lp)
2204 spin_lock(&lp->lp_lock);
2205 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2207 spin_unlock(&lp->lp_lock);
2213 lnet_peer_is_uptodate(struct lnet_peer *lp)
2217 spin_lock(&lp->lp_lock);
2218 rc = lnet_peer_is_uptodate_locked(lp);
2219 spin_unlock(&lp->lp_lock);
2224 * Is a peer uptodate from the point of view of discovery?
2226 * If it is currently being processed, obviously not.
2227 * A forced Ping or Push is also handled by the discovery thread.
2229 * Otherwise look at whether the peer needs rediscovering.
2232 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2233 __must_hold(&lp->lp_lock)
2237 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2238 LNET_PEER_FORCE_PING |
2239 LNET_PEER_FORCE_PUSH)) {
2241 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2243 } else if (lnet_peer_needs_push(lp)) {
2245 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2246 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2257 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2259 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2261 /* The discovery thread holds net_lock/EX and lp_lock when it splices
2262 * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2263 * when adding to the list and queuing the peer to ensure that we do not
2264 * strand any messages on the lp_dc_pendq. This scheme ensures the
2265 * message will be resent even if the peer is already being discovered.
2266 * Therefore we needn't check the return value of
2267 * lnet_peer_queue_for_discovery(lp).
2269 lnet_net_lock(LNET_LOCK_EX);
2270 spin_lock(&lp->lp_lock);
2271 list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2272 spin_unlock(&lp->lp_lock);
2273 lnet_peer_queue_for_discovery(lp);
2274 lnet_net_unlock(LNET_LOCK_EX);
2278 * Queue a peer for the attention of the discovery thread. Call with
2279 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2280 * -EALREADY if the peer was already queued.
2282 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2286 spin_lock(&lp->lp_lock);
2287 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2288 lp->lp_state |= LNET_PEER_DISCOVERING;
2289 spin_unlock(&lp->lp_lock);
2290 if (list_empty(&lp->lp_dc_list)) {
2291 lnet_peer_addref_locked(lp);
2292 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2293 wake_up(&the_lnet.ln_dc_waitq);
2299 CDEBUG(D_NET, "Queue peer %s: %d\n",
2300 libcfs_nidstr(&lp->lp_primary_nid), rc);
2306 * Discovery of a peer is complete. Wake all waiters on the peer.
2307 * Call with lnet_net_lock/EX held.
2309 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2311 struct lnet_msg *msg, *tmp;
2313 LIST_HEAD(pending_msgs);
2315 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2316 libcfs_nidstr(&lp->lp_primary_nid));
2318 list_del_init(&lp->lp_dc_list);
2319 spin_lock(&lp->lp_lock);
2320 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2321 spin_unlock(&lp->lp_lock);
2322 wake_up(&lp->lp_dc_waitq);
2324 if (lp->lp_rtr_refcount > 0)
2325 lnet_router_discovery_complete(lp);
2327 lnet_net_unlock(LNET_LOCK_EX);
2329 /* iterate through all pending messages and send them again */
2330 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2331 list_del_init(&msg->msg_list);
2332 if (lp->lp_dc_error) {
2333 lnet_finalize(msg, lp->lp_dc_error);
2337 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2338 lnet_msgtyp2str(msg->msg_type),
2339 libcfs_id2str(msg->msg_target));
2340 rc = lnet_send(msg->msg_src_nid_param, msg,
2341 msg->msg_rtr_nid_param);
2343 CNETERR("Error sending %s to %s: %d\n",
2344 lnet_msgtyp2str(msg->msg_type),
2345 libcfs_id2str(msg->msg_target), rc);
2346 lnet_finalize(msg, rc);
2349 lnet_net_lock(LNET_LOCK_EX);
2350 lnet_peer_decref_locked(lp);
2354 * Handle inbound push.
2355 * Like any event handler, called with lnet_res_lock/CPT held.
2357 void lnet_peer_push_event(struct lnet_event *ev)
2359 struct lnet_ping_buffer *pbuf;
2360 struct lnet_peer *lp;
2362 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2364 /* lnet_find_peer() adds a refcount */
2365 lp = lnet_find_peer(ev->source.nid);
2367 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2368 libcfs_nid2str(ev->initiator.nid),
2369 libcfs_nid2str(ev->source.nid));
2370 pbuf->pb_needs_post = true;
2374 /* Ensure peer state remains consistent while we modify it. */
2375 spin_lock(&lp->lp_lock);
2378 * If some kind of error happened the contents of the message
2379 * cannot be used. Clear the NIDS_UPTODATE and set the
2380 * FORCE_PING flag to trigger a ping.
2383 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2384 lp->lp_state |= LNET_PEER_FORCE_PING;
2385 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2387 libcfs_nidstr(&lp->lp_primary_nid),
2388 libcfs_nid2str(ev->source.nid));
2393 * A push with invalid or corrupted info. Clear the UPTODATE
2394 * flag to trigger a ping.
2396 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2397 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2398 lp->lp_state |= LNET_PEER_FORCE_PING;
2399 CDEBUG(D_NET, "Corrupted Push from %s\n",
2400 libcfs_nidstr(&lp->lp_primary_nid));
2405 * Make sure we'll allocate the correct size ping buffer when
2408 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2409 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2412 * A non-Multi-Rail peer is not supposed to be capable of
2415 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2416 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2417 libcfs_nidstr(&lp->lp_primary_nid));
2422 * The peer may have discovery disabled at its end. Set
2423 * NO_DISCOVERY as appropriate.
2425 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2426 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2427 libcfs_nidstr(&lp->lp_primary_nid));
2429 * Mark the peer for deletion if we already know about it
2430 * and it's going from discovery set to no discovery set
2432 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2433 LNET_PEER_DISCOVERING)) &&
2434 lp->lp_state & LNET_PEER_DISCOVERED) {
2435 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2436 libcfs_nidstr(&lp->lp_primary_nid),
2438 lp->lp_state |= LNET_PEER_MARK_DELETION;
2440 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2441 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2442 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2443 libcfs_nidstr(&lp->lp_primary_nid));
2444 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2448 * Update the MULTI_RAIL flag based on the push. If the peer
2449 * was configured with DLC then the setting should match what
2451 * NB: We verified above that the MR feature bit is set in pi_features
2453 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2454 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2455 libcfs_nidstr(&lp->lp_primary_nid), lp);
2456 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2457 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2458 libcfs_nidstr(&lp->lp_primary_nid));
2459 } else if (lnet_peer_discovery_disabled) {
2460 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2461 libcfs_nidstr(&lp->lp_primary_nid), lp);
2462 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2463 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2464 libcfs_nidstr(&lp->lp_primary_nid), lp);
2466 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2467 libcfs_nidstr(&lp->lp_primary_nid), lp);
2468 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2469 lnet_peer_clr_non_mr_pref_nids(lp);
2473 * Check for truncation of the Put message. Clear the
2474 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2475 * and tell discovery to allocate a bigger buffer.
2477 if (ev->mlength < ev->rlength) {
2478 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2479 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2480 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2481 lp->lp_state |= LNET_PEER_FORCE_PING;
2482 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2483 libcfs_nidstr(&lp->lp_primary_nid),
2484 pbuf->pb_info.pi_nnis);
2488 /* always assume new data */
2489 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2490 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2493 * If there is data present that hasn't been processed yet,
2494 * we'll replace it if the Put contained newer data and it
2495 * fits. We're racing with a Ping or earlier Push in this
2498 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2499 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2500 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2501 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2502 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2503 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2504 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2505 libcfs_nidstr(&lp->lp_primary_nid),
2506 LNET_PING_BUFFER_SEQNO(pbuf),
2507 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2513 * Allocate a buffer to copy the data. On a failure we drop
2514 * the Push and set FORCE_PING to force the discovery
2515 * thread to fix the problem by pinging the peer.
2517 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2519 lp->lp_state |= LNET_PEER_FORCE_PING;
2520 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2521 libcfs_nidstr(&lp->lp_primary_nid),
2522 LNET_PING_BUFFER_SEQNO(pbuf));
2527 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2528 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2529 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2530 CDEBUG(D_NET, "Received Push %s %u\n",
2531 libcfs_nidstr(&lp->lp_primary_nid),
2532 LNET_PING_BUFFER_SEQNO(pbuf));
2535 /* We've processed this buffer. It can be reposted */
2536 pbuf->pb_needs_post = true;
2539 * Queue the peer for discovery if not done, force it on the request
2540 * queue and wake the discovery thread if the peer was already queued,
2541 * because its status changed.
2543 spin_unlock(&lp->lp_lock);
2544 lnet_net_lock(LNET_LOCK_EX);
2545 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2546 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2547 wake_up(&the_lnet.ln_dc_waitq);
2549 /* Drop refcount from lookup */
2550 lnet_peer_decref_locked(lp);
2551 lnet_net_unlock(LNET_LOCK_EX);
2555 * Clear the discovery error state, unless we're already discovering
2556 * this peer, in which case the error is current.
2558 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2560 spin_lock(&lp->lp_lock);
2561 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2562 lp->lp_dc_error = 0;
2563 spin_unlock(&lp->lp_lock);
2567 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2568 * dropped/retaken within this function. An lnet_peer_ni is passed in
2569 * because discovery could tear down an lnet_peer.
2572 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2575 struct lnet_peer *lp;
2580 lnet_net_unlock(cpt);
2581 lnet_net_lock(LNET_LOCK_EX);
2582 lp = lpni->lpni_peer_net->lpn_peer;
2583 lnet_peer_clear_discovery_error(lp);
2586 * We're willing to be interrupted. The lpni can become a
2587 * zombie if we race with DLC, so we must check for that.
2590 /* Keep lp alive when the lnet_net_lock is unlocked */
2591 lnet_peer_addref_locked(lp);
2592 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2593 if (signal_pending(current))
2595 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2598 * Don't repeat discovery if discovery is disabled. This is
2599 * done to ensure we can use discovery as a standard ping as
2600 * well for backwards compatibility with routers which do not
2601 * have discovery or have discovery disabled
2603 if (lnet_is_discovery_disabled(lp) && count > 0)
2605 if (lp->lp_dc_error)
2607 if (lnet_peer_is_uptodate(lp))
2609 lnet_peer_queue_for_discovery(lp);
2611 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2614 * If caller requested a non-blocking operation then
2615 * return immediately. Once discovery is complete any
2616 * pending messages that were stopped due to discovery
2617 * will be transmitted.
2622 lnet_net_unlock(LNET_LOCK_EX);
2624 finish_wait(&lp->lp_dc_waitq, &wait);
2625 lnet_net_lock(LNET_LOCK_EX);
2626 lnet_peer_decref_locked(lp);
2627 /* Peer may have changed */
2628 lp = lpni->lpni_peer_net->lpn_peer;
2630 finish_wait(&lp->lp_dc_waitq, &wait);
2632 lnet_net_unlock(LNET_LOCK_EX);
2634 lnet_peer_decref_locked(lp);
2636 * The peer may have changed, so re-check and rediscover if that turns
2637 * out to have been the case. The reference count on lp ensured that
2638 * even if it was unlinked from lpni the memory could not be recycled.
2639 * Thus the check below is sufficient to determine whether the peer
2640 * changed. If the peer changed, then lp must not be dereferenced.
2642 if (lp != lpni->lpni_peer_net->lpn_peer)
2645 if (signal_pending(current))
2647 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2649 else if (lp->lp_dc_error)
2650 rc = lp->lp_dc_error;
2652 CDEBUG(D_NET, "non-blocking discovery\n");
2653 else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2656 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2657 (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2658 libcfs_nidstr(&lpni->lpni_nid), rc,
2659 (!block) ? "pending discovery" : "discovery complete");
2664 /* Handle an incoming ack for a push. */
2666 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2668 struct lnet_ping_buffer *pbuf;
2670 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2671 spin_lock(&lp->lp_lock);
2672 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2673 lp->lp_push_error = ev->status;
2675 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2677 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2678 spin_unlock(&lp->lp_lock);
2680 CDEBUG(D_NET, "peer %s ev->status %d\n",
2681 libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2684 /* Handle a Reply message. This is the reply to a Ping message. */
2686 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2688 struct lnet_ping_buffer *pbuf;
2691 spin_lock(&lp->lp_lock);
2693 lnet_nid4_to_nid(ev->target.nid, &lp->lp_disc_src_nid);
2694 lnet_nid4_to_nid(ev->source.nid, &lp->lp_disc_dst_nid);
2697 * If some kind of error happened the contents of message
2698 * cannot be used. Set PING_FAILED to trigger a retry.
2701 lp->lp_state |= LNET_PEER_PING_FAILED;
2702 lp->lp_ping_error = ev->status;
2703 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2705 libcfs_nidstr(&lp->lp_primary_nid),
2706 libcfs_nid2str(ev->source.nid));
2710 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2711 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2712 lnet_swap_pinginfo(pbuf);
2715 * A reply with invalid or corrupted info. Set PING_FAILED to
2718 rc = lnet_ping_info_validate(&pbuf->pb_info);
2720 lp->lp_state |= LNET_PEER_PING_FAILED;
2721 lp->lp_ping_error = 0;
2722 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2723 libcfs_nidstr(&lp->lp_primary_nid), rc);
2728 * The peer may have discovery disabled at its end. Set
2729 * NO_DISCOVERY as appropriate.
2731 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2732 lnet_peer_discovery_disabled) {
2733 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2734 libcfs_nidstr(&lp->lp_primary_nid));
2736 /* Detect whether this peer has toggled discovery from on to
2737 * off and whether we can delete and re-create the peer. Peers
2738 * that were manually configured cannot be deleted by discovery.
2739 * We need to delete this peer and re-create it if the peer was
2740 * not configured manually, is currently considered DD capable,
2742 * 1. We've already discovered the peer (the peer has toggled
2743 * the discovery feature from on to off), or
2744 * 2. The peer is considered MR, but it was not user configured
2745 * (this was a "temporary" peer created via the kernel APIs
2746 * that we're discovering for the first time)
2748 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2749 LNET_PEER_NO_DISCOVERY)) &&
2750 (lp->lp_state & (LNET_PEER_DISCOVERED |
2751 LNET_PEER_MULTI_RAIL))) {
2752 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2753 libcfs_nidstr(&lp->lp_primary_nid),
2755 lp->lp_state |= LNET_PEER_MARK_DELETION;
2757 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2759 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2760 libcfs_nidstr(&lp->lp_primary_nid));
2761 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2765 * Update the MULTI_RAIL flag based on the reply. If the peer
2766 * was configured with DLC then the setting should match what
2769 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2770 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2771 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2772 libcfs_nidstr(&lp->lp_primary_nid), lp);
2773 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2774 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2775 libcfs_nidstr(&lp->lp_primary_nid));
2776 } else if (lnet_peer_discovery_disabled) {
2778 "peer %s(%p) not MR: DD disabled locally\n",
2779 libcfs_nidstr(&lp->lp_primary_nid), lp);
2780 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2782 "peer %s(%p) not MR: DD disabled remotely\n",
2783 libcfs_nidstr(&lp->lp_primary_nid), lp);
2785 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2786 libcfs_nidstr(&lp->lp_primary_nid), lp);
2787 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2788 lnet_peer_clr_non_mr_pref_nids(lp);
2790 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2791 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2792 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2793 libcfs_nidstr(&lp->lp_primary_nid));
2795 CERROR("Multi-Rail state vanished from %s\n",
2796 libcfs_nidstr(&lp->lp_primary_nid));
2797 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2802 * Make sure we'll allocate the correct size ping buffer when
2805 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2806 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2809 * Check for truncation of the Reply. Clear PING_SENT and set
2810 * PING_FAILED to trigger a retry.
2812 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2813 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2814 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2815 lp->lp_state |= LNET_PEER_PING_FAILED;
2816 lp->lp_ping_error = 0;
2817 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2818 libcfs_nidstr(&lp->lp_primary_nid),
2819 pbuf->pb_info.pi_nnis);
2824 * Check the sequence numbers in the reply. These are only
2825 * available if the reply came from a Multi-Rail peer.
2827 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2828 pbuf->pb_info.pi_nnis > 1 &&
2829 lnet_nid_to_nid4(&lp->lp_primary_nid) ==
2830 pbuf->pb_info.pi_ni[1].ns_nid) {
2831 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2832 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2833 libcfs_nidstr(&lp->lp_primary_nid),
2834 LNET_PING_BUFFER_SEQNO(pbuf),
2837 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2840 /* We're happy with the state of the data in the buffer. */
2841 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2842 libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2844 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2845 lnet_ping_buffer_decref(lp->lp_data);
2847 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2848 lnet_ping_buffer_addref(pbuf);
2851 lp->lp_state &= ~LNET_PEER_PING_SENT;
2852 spin_unlock(&lp->lp_lock);
2854 lnet_net_lock(LNET_LOCK_EX);
2856 * If this peer is a gateway, call the routing callback to
2857 * handle the ping reply
2859 if (lp->lp_rtr_refcount > 0)
2860 lnet_router_discovery_ping_reply(lp);
2861 lnet_net_unlock(LNET_LOCK_EX);
2865 * Send event handling. Only matters for error cases, where we clean
2866 * up state on the peer and peer_ni that would otherwise be updated in
2867 * the REPLY event handler for a successful Ping, and the ACK event
2868 * handler for a successful Push.
2871 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2878 spin_lock(&lp->lp_lock);
2879 if (ev->msg_type == LNET_MSG_GET) {
2880 lp->lp_state &= ~LNET_PEER_PING_SENT;
2881 lp->lp_state |= LNET_PEER_PING_FAILED;
2882 lp->lp_ping_error = ev->status;
2883 } else { /* ev->msg_type == LNET_MSG_PUT */
2884 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2885 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2886 lp->lp_push_error = ev->status;
2888 spin_unlock(&lp->lp_lock);
2889 rc = LNET_REDISCOVER_PEER;
2891 CDEBUG(D_NET, "%s Send to %s: %d\n",
2892 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2893 libcfs_nid2str(ev->target.nid), rc);
2898 * Unlink event handling. This event is only seen if a call to
2899 * LNetMDUnlink() caused the event to be unlinked. If this call was
2900 * made after the event was set up in LNetGet() or LNetPut() then we
2901 * assume the Ping or Push timed out.
2904 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2906 spin_lock(&lp->lp_lock);
2907 /* We've passed through LNetGet() */
2908 if (lp->lp_state & LNET_PEER_PING_SENT) {
2909 lp->lp_state &= ~LNET_PEER_PING_SENT;
2910 lp->lp_state |= LNET_PEER_PING_FAILED;
2911 lp->lp_ping_error = -ETIMEDOUT;
2912 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2913 libcfs_nidstr(&lp->lp_primary_nid));
2915 /* We've passed through LNetPut() */
2916 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2917 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2918 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2919 lp->lp_push_error = -ETIMEDOUT;
2920 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2921 libcfs_nidstr(&lp->lp_primary_nid));
2923 spin_unlock(&lp->lp_lock);
2927 * Event handler for the discovery EQ.
2929 * Called with lnet_res_lock(cpt) held. The cpt is the
2930 * lnet_cpt_of_cookie() of the md handle cookie.
2932 static void lnet_discovery_event_handler(struct lnet_event *event)
2934 struct lnet_peer *lp = event->md_user_ptr;
2935 struct lnet_ping_buffer *pbuf;
2938 /* discovery needs to take another look */
2939 rc = LNET_REDISCOVER_PEER;
2941 CDEBUG(D_NET, "Received event: %d\n", event->type);
2943 switch (event->type) {
2944 case LNET_EVENT_ACK:
2945 lnet_discovery_event_ack(lp, event);
2947 case LNET_EVENT_REPLY:
2948 lnet_discovery_event_reply(lp, event);
2950 case LNET_EVENT_SEND:
2951 /* Only send failure triggers a retry. */
2952 rc = lnet_discovery_event_send(lp, event);
2954 case LNET_EVENT_UNLINK:
2955 /* LNetMDUnlink() was called */
2956 lnet_discovery_event_unlink(lp, event);
2959 /* Invalid events. */
2962 lnet_net_lock(LNET_LOCK_EX);
2963 if (event->unlinked) {
2964 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2965 lnet_ping_buffer_decref(pbuf);
2966 lnet_peer_decref_locked(lp);
2969 /* put peer back at end of request queue, if discovery not already
2971 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2972 lnet_peer_queue_for_discovery(lp)) {
2973 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2974 wake_up(&the_lnet.ln_dc_waitq);
2976 lnet_net_unlock(LNET_LOCK_EX);
2980 * Build a peer from incoming data.
2982 * The NIDs in the incoming data are supposed to be structured as follows:
2985 * - other NIDs in same net
2986 * - NIDs in second net
2987 * - NIDs in third net
2989 * This due to the way the list of NIDs in the data is created.
2991 * Note that this function will mark the peer uptodate unless an
2992 * ENOMEM is encontered. All other errors are due to a conflict
2993 * between the DLC configuration and what discovery sees. We treat DLC
2994 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2995 * peer from becoming stuck in discovery.
2997 static int lnet_peer_merge_data(struct lnet_peer *lp,
2998 struct lnet_ping_buffer *pbuf)
3000 struct lnet_peer_net *lpn;
3001 struct lnet_peer_ni *lpni;
3002 lnet_nid_t *curnis = NULL;
3003 struct lnet_ni_status *addnis = NULL;
3004 lnet_nid_t *delnis = NULL;
3014 flags = LNET_PEER_DISCOVERED;
3015 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3016 flags |= LNET_PEER_MULTI_RAIL;
3019 * Cache the routing feature for the peer; whether it is enabled
3020 * for disabled as reported by the remote peer.
3022 spin_lock(&lp->lp_lock);
3023 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3024 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3026 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3027 spin_unlock(&lp->lp_lock);
3029 nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
3030 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3031 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3032 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3033 if (!curnis || !addnis || !delnis) {
3041 /* Construct the list of NIDs present in peer. */
3043 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3044 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
3047 * Check for NIDs in pbuf not present in curnis[].
3048 * The loop starts at 1 to skip the loopback NID.
3050 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
3051 for (j = 0; j < ncurnis; j++)
3052 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
3055 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
3058 * Check for NIDs in curnis[] not present in pbuf.
3059 * The nested loop starts at 1 to skip the loopback NID.
3061 * But never add the loopback NID to delnis[]: if it is
3062 * present in curnis[] then this peer is for this node.
3064 for (i = 0; i < ncurnis; i++) {
3065 if (curnis[i] == LNET_NID_LO_0)
3067 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
3068 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
3070 * update the information we cache for the
3071 * peer with the latest information we
3074 lpni = lnet_find_peer_ni_locked(curnis[i]);
3076 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
3077 lnet_peer_ni_decref_locked(lpni);
3082 if (j == pbuf->pb_info.pi_nnis)
3083 delnis[ndelnis++] = curnis[i];
3087 * If we get here and the discovery is disabled then we don't want
3088 * to add or delete any NIs. We just updated the ones we have some
3089 * information on, and call it a day
3092 if (lnet_is_discovery_disabled(lp))
3095 for (i = 0; i < naddnis; i++) {
3096 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
3098 CERROR("Error adding NID %s to peer %s: %d\n",
3099 libcfs_nid2str(addnis[i].ns_nid),
3100 libcfs_nidstr(&lp->lp_primary_nid), rc);
3104 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
3106 lpni->lpni_ns_status = addnis[i].ns_status;
3107 lnet_peer_ni_decref_locked(lpni);
3111 for (i = 0; i < ndelnis; i++) {
3113 * for routers it's okay to delete the primary_nid because
3114 * the upper layers don't really rely on it. So if we're
3115 * being told that the router changed its primary_nid
3116 * then it's okay to delete it.
3118 if (lp->lp_rtr_refcount > 0)
3119 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3120 rc = lnet_peer_del_nid(lp, delnis[i], flags);
3122 CERROR("Error deleting NID %s from peer %s: %d\n",
3123 libcfs_nid2str(delnis[i]),
3124 libcfs_nidstr(&lp->lp_primary_nid), rc);
3130 /* The peer net for the primary NID should be the first entry in the
3131 * peer's lp_peer_nets list, and the peer NI for the primary NID should
3132 * be the first entry in its peer net's lpn_peer_nis list.
3134 lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
3136 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3137 libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3141 lnet_peer_ni_decref_locked(lpni);
3143 lpn = lpni->lpni_peer_net;
3144 if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3145 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3147 if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3148 list_move(&lpni->lpni_peer_nis,
3149 &lpni->lpni_peer_net->lpn_peer_nis);
3152 * Errors other than -ENOMEM are due to peers having been
3153 * configured with DLC. Ignore these because DLC overrides
3158 CFS_FREE_PTR_ARRAY(curnis, nnis);
3159 CFS_FREE_PTR_ARRAY(addnis, nnis);
3160 CFS_FREE_PTR_ARRAY(delnis, nnis);
3161 lnet_ping_buffer_decref(pbuf);
3162 CDEBUG(D_NET, "peer %s (%p): %d\n",
3163 libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3166 spin_lock(&lp->lp_lock);
3167 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3168 lp->lp_state |= LNET_PEER_FORCE_PING;
3169 spin_unlock(&lp->lp_lock);
3175 * The data in pbuf says lp is its primary peer, but the data was
3176 * received by a different peer. Try to update lp with the data.
3179 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3181 struct lnet_handle_md mdh;
3183 /* Queue lp for discovery, and force it on the request queue. */
3184 lnet_net_lock(LNET_LOCK_EX);
3185 if (lnet_peer_queue_for_discovery(lp))
3186 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3187 lnet_net_unlock(LNET_LOCK_EX);
3189 LNetInvalidateMDHandle(&mdh);
3192 * Decide whether we can move the peer to the DATA_PRESENT state.
3194 * We replace stale data for a multi-rail peer, repair PING_FAILED
3195 * status, and preempt FORCE_PING.
3197 * If after that we have DATA_PRESENT, we merge it into this peer.
3199 spin_lock(&lp->lp_lock);
3200 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3201 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3202 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3203 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3204 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3205 lnet_ping_buffer_decref(pbuf);
3210 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3211 lnet_ping_buffer_decref(lp->lp_data);
3213 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3215 if (lp->lp_state & LNET_PEER_PING_FAILED) {
3216 mdh = lp->lp_ping_mdh;
3217 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3218 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3219 lp->lp_ping_error = 0;
3221 if (lp->lp_state & LNET_PEER_FORCE_PING)
3222 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3223 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3224 spin_unlock(&lp->lp_lock);
3226 if (!LNetMDHandleIsInvalid(mdh))
3230 return lnet_peer_merge_data(lp, pbuf);
3232 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3236 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3240 for (i = 0; i < pinfo->pi_nnis; i++) {
3241 if (pinfo->pi_ni[i].ns_nid == nid)
3248 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3249 * to the discovery queue a reference was taken that will prevent the peer from
3250 * actually being freed by this function. After this function exits the
3251 * discovery thread should call lnet_peer_discovery_complete() which will
3252 * drop that reference as well as wake any waiters that may also be holding a
3255 static int lnet_peer_deletion(struct lnet_peer *lp)
3256 __must_hold(&lp->lp_lock)
3258 struct list_head rlist;
3259 struct lnet_route *route, *tmp;
3260 int sensitivity = lp->lp_health_sensitivity;
3262 INIT_LIST_HEAD(&rlist);
3264 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3265 LNET_PEER_FORCE_PUSH);
3266 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3267 libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3269 /* no-op if lnet_peer_del() has already been called on this peer */
3270 if (lp->lp_state & LNET_PEER_MARK_DELETED)
3273 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3276 spin_unlock(&lp->lp_lock);
3278 mutex_lock(&the_lnet.ln_api_mutex);
3280 lnet_net_lock(LNET_LOCK_EX);
3281 /* remove the peer from the discovery work
3282 * queue if it's on there in preparation
3285 if (!list_empty(&lp->lp_dc_list))
3286 list_del_init(&lp->lp_dc_list);
3287 list_for_each_entry_safe(route, tmp,
3290 lnet_move_route(route, NULL, &rlist);
3291 lnet_net_unlock(LNET_LOCK_EX);
3293 /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3296 list_for_each_entry_safe(route, tmp,
3298 /* re-add these routes */
3299 lnet_add_route(route->lr_net,
3304 LIBCFS_FREE(route, sizeof(*route));
3307 mutex_unlock(&the_lnet.ln_api_mutex);
3309 spin_lock(&lp->lp_lock);
3315 * Update a peer using the data received.
3317 static int lnet_peer_data_present(struct lnet_peer *lp)
3318 __must_hold(&lp->lp_lock)
3320 struct lnet_ping_buffer *pbuf;
3321 struct lnet_peer_ni *lpni;
3322 lnet_nid_t nid = LNET_NID_ANY;
3328 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3329 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3330 spin_unlock(&lp->lp_lock);
3333 * Modifications of peer structures are done while holding the
3334 * ln_api_mutex. A global lock is required because we may be
3335 * modifying multiple peer structures, and a mutex greatly
3336 * simplifies memory management.
3338 * The actual changes to the data structures must also protect
3339 * against concurrent lookups, for which the lnet_net_lock in
3340 * LNET_LOCK_EX mode is used.
3342 mutex_lock(&the_lnet.ln_api_mutex);
3343 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3349 * If this peer is not on the peer list then it is being torn
3350 * down, and our reference count may be all that is keeping it
3351 * alive. Don't do any work on it.
3353 if (list_empty(&lp->lp_peer_list))
3356 flags = LNET_PEER_DISCOVERED;
3357 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3358 flags |= LNET_PEER_MULTI_RAIL;
3361 * Check whether the primary NID in the message matches the
3362 * primary NID of the peer. If it does, update the peer, if
3363 * it it does not, check whether there is already a peer with
3364 * that primary NID. If no such peer exists, try to update
3365 * the primary NID of the current peer (allowed if it was
3366 * created due to message traffic) and complete the update.
3367 * If the peer did exist, hand off the data to it.
3369 * The peer for the loopback interface is a special case: this
3370 * is the peer for the local node, and we want to set its
3371 * primary NID to the correct value here. Moreover, this peer
3372 * can show up with only the loopback NID in the ping buffer.
3374 if (pbuf->pb_info.pi_nnis <= 1)
3376 nid = pbuf->pb_info.pi_ni[1].ns_nid;
3377 if (nid_is_lo0(&lp->lp_primary_nid)) {
3378 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3380 rc = lnet_peer_merge_data(lp, pbuf);
3382 * if the primary nid of the peer is present in the ping info returned
3383 * from the peer, but it's not the local primary peer we have
3384 * cached and discovery is disabled, then we don't want to update
3385 * our local peer info, by adding or removing NIDs, we just want
3386 * to update the status of the nids that we currently have
3387 * recorded in that peer.
3389 } else if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid ||
3390 (lnet_is_nid_in_ping_info(lnet_nid_to_nid4(&lp->lp_primary_nid),
3392 lnet_is_discovery_disabled(lp))) {
3393 rc = lnet_peer_merge_data(lp, pbuf);
3395 lpni = lnet_find_peer_ni_locked(nid);
3396 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3397 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3399 CERROR("Primary NID error %s versus %s: %d\n",
3400 libcfs_nidstr(&lp->lp_primary_nid),
3401 libcfs_nid2str(nid), rc);
3403 rc = lnet_peer_merge_data(lp, pbuf);
3406 lnet_peer_ni_decref_locked(lpni);
3408 struct lnet_peer *new_lp;
3409 new_lp = lpni->lpni_peer_net->lpn_peer;
3411 * if lp has discovery/MR enabled that means new_lp
3412 * should have discovery/MR enabled as well, since
3413 * it's the same peer, which we're about to merge
3415 spin_lock(&lp->lp_lock);
3416 spin_lock(&new_lp->lp_lock);
3417 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3418 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3419 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3420 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3421 /* If we're processing a ping reply then we may be
3422 * about to send a push to the peer that we ping'd.
3423 * Since the ping reply that we're processing was
3424 * received by lp, we need to set the discovery source
3425 * NID for new_lp to the NID stored in lp.
3427 if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3428 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3429 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3431 spin_unlock(&new_lp->lp_lock);
3432 spin_unlock(&lp->lp_lock);
3434 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3435 lnet_consolidate_routes_locked(lp, new_lp);
3436 lnet_peer_ni_decref_locked(lpni);
3440 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3441 libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3443 mutex_unlock(&the_lnet.ln_api_mutex);
3445 spin_lock(&lp->lp_lock);
3446 /* Tell discovery to re-check the peer immediately. */
3448 rc = LNET_REDISCOVER_PEER;
3453 * A ping failed. Clear the PING_FAILED state and set the
3454 * FORCE_PING state, to ensure a retry even if discovery is
3455 * disabled. This avoids being left with incorrect state.
3457 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3458 __must_hold(&lp->lp_lock)
3460 struct lnet_handle_md mdh;
3463 mdh = lp->lp_ping_mdh;
3464 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3465 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3466 lp->lp_state |= LNET_PEER_FORCE_PING;
3467 rc = lp->lp_ping_error;
3468 lp->lp_ping_error = 0;
3469 spin_unlock(&lp->lp_lock);
3471 if (!LNetMDHandleIsInvalid(mdh))
3474 CDEBUG(D_NET, "peer %s:%d\n",
3475 libcfs_nidstr(&lp->lp_primary_nid), rc);
3477 spin_lock(&lp->lp_lock);
3478 return rc ? rc : LNET_REDISCOVER_PEER;
3481 /* Active side of ping. */
3482 static int lnet_peer_send_ping(struct lnet_peer *lp)
3483 __must_hold(&lp->lp_lock)
3489 lp->lp_state |= LNET_PEER_PING_SENT;
3490 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3491 spin_unlock(&lp->lp_lock);
3493 cpt = lnet_net_lock_current();
3494 /* Refcount for MD. */
3495 lnet_peer_addref_locked(lp);
3496 lnet_net_unlock(cpt);
3498 nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3500 rc = lnet_send_ping(lnet_nid_to_nid4(&lp->lp_primary_nid),
3501 &lp->lp_ping_mdh, nnis, lp,
3502 the_lnet.ln_dc_handler, false);
3505 * if LNetMDBind in lnet_send_ping fails we need to decrement the
3506 * refcount on the peer, otherwise LNetMDUnlink will be called
3507 * which will eventually do that.
3511 lnet_peer_decref_locked(lp);
3512 lnet_net_unlock(cpt);
3513 rc = -rc; /* change the rc to negative value */
3515 } else if (rc < 0) {
3519 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3521 spin_lock(&lp->lp_lock);
3525 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3527 * The errors that get us here are considered hard errors and
3528 * cause Discovery to terminate. So we clear PING_SENT, but do
3529 * not set either PING_FAILED or FORCE_PING. In fact we need
3530 * to clear PING_FAILED, because the unlink event handler will
3531 * have set it if we called LNetMDUnlink() above.
3533 spin_lock(&lp->lp_lock);
3534 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3539 * This function exists because you cannot call LNetMDUnlink() from an
3542 static int lnet_peer_push_failed(struct lnet_peer *lp)
3543 __must_hold(&lp->lp_lock)
3545 struct lnet_handle_md mdh;
3548 mdh = lp->lp_push_mdh;
3549 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3550 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3551 rc = lp->lp_push_error;
3552 lp->lp_push_error = 0;
3553 spin_unlock(&lp->lp_lock);
3555 if (!LNetMDHandleIsInvalid(mdh))
3558 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3559 spin_lock(&lp->lp_lock);
3560 return rc ? rc : LNET_REDISCOVER_PEER;
3564 * Mark the peer as discovered.
3566 static int lnet_peer_discovered(struct lnet_peer *lp)
3567 __must_hold(&lp->lp_lock)
3569 lp->lp_state |= LNET_PEER_DISCOVERED;
3570 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3571 LNET_PEER_REDISCOVER);
3573 lp->lp_dc_error = 0;
3575 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3580 /* Active side of push. */
3581 static int lnet_peer_send_push(struct lnet_peer *lp)
3582 __must_hold(&lp->lp_lock)
3584 struct lnet_ping_buffer *pbuf;
3585 struct lnet_process_id id;
3590 /* Don't push to a non-multi-rail peer. */
3591 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3592 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3593 /* if peer's NIDs are uptodate then peer is discovered */
3594 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3595 rc = lnet_peer_discovered(lp);
3602 lp->lp_state |= LNET_PEER_PUSH_SENT;
3603 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3604 spin_unlock(&lp->lp_lock);
3606 cpt = lnet_net_lock_current();
3607 pbuf = the_lnet.ln_ping_target;
3608 lnet_ping_buffer_addref(pbuf);
3609 lnet_net_unlock(cpt);
3611 /* Push source MD */
3612 md.start = &pbuf->pb_info;
3613 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3614 md.threshold = 2; /* Put/Ack */
3616 md.options = LNET_MD_TRACK_RESPONSE;
3617 md.handler = the_lnet.ln_dc_handler;
3620 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3622 lnet_ping_buffer_decref(pbuf);
3623 CERROR("Can't bind push source MD: %d\n", rc);
3627 cpt = lnet_net_lock_current();
3628 /* Refcount for MD. */
3629 lnet_peer_addref_locked(lp);
3630 id.pid = LNET_PID_LUSTRE;
3631 if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3632 id.nid = lnet_nid_to_nid4(&lp->lp_disc_dst_nid);
3634 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
3635 lnet_net_unlock(cpt);
3637 rc = LNetPut(lnet_nid_to_nid4(&lp->lp_disc_src_nid), lp->lp_push_mdh,
3638 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3639 LNET_PROTO_PING_MATCHBITS, 0, 0);
3642 * reset the discovery nid. There is no need to restrict sending
3643 * from that source, if we call lnet_push_update_to_peers(). It'll
3644 * get set to a specific NID, if we initiate discovery from the
3647 lp->lp_disc_src_nid = LNET_ANY_NID;
3648 lp->lp_disc_dst_nid = LNET_ANY_NID;
3653 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3655 spin_lock(&lp->lp_lock);
3659 LNetMDUnlink(lp->lp_push_mdh);
3660 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3662 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3665 * The errors that get us here are considered hard errors and
3666 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3667 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3668 * because the unlink event handler will have set it if we
3669 * called LNetMDUnlink() above.
3671 spin_lock(&lp->lp_lock);
3672 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3677 * An unrecoverable error was encountered during discovery.
3678 * Set error status in peer and abort discovery.
3680 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3682 CDEBUG(D_NET, "Discovery error %s: %d\n",
3683 libcfs_nidstr(&lp->lp_primary_nid), error);
3685 spin_lock(&lp->lp_lock);
3686 lp->lp_dc_error = error;
3687 lp->lp_state &= ~LNET_PEER_DISCOVERING;
3688 lp->lp_state |= LNET_PEER_REDISCOVER;
3689 spin_unlock(&lp->lp_lock);
3693 * Wait for work to be queued or some other change that must be
3694 * attended to. Returns non-zero if the discovery thread should shut
3697 static int lnet_peer_discovery_wait_for_work(void)
3704 cpt = lnet_net_lock_current();
3706 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3707 TASK_INTERRUPTIBLE);
3708 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3710 if (lnet_push_target_resize_needed() ||
3711 the_lnet.ln_push_target->pb_needs_post)
3713 if (!list_empty(&the_lnet.ln_dc_request))
3715 if (!list_empty(&the_lnet.ln_msg_resend))
3717 lnet_net_unlock(cpt);
3720 * wakeup max every second to check if there are peers that
3721 * have been stuck on the working queue for greater than
3724 schedule_timeout(cfs_time_seconds(1));
3725 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3726 cpt = lnet_net_lock_current();
3728 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3730 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3733 lnet_net_unlock(cpt);
3735 CDEBUG(D_NET, "woken: %d\n", rc);
3741 * Messages that were pending on a destroyed peer will be put on a global
3742 * resend list. The message resend list will be checked by
3743 * the discovery thread when it wakes up, and will resend messages. These
3744 * messages can still be sendable in the case the lpni which was the initial
3745 * cause of the message re-queue was transfered to another peer.
3747 * It is possible that LNet could be shutdown while we're iterating
3748 * through the list. lnet_shudown_lndnets() will attempt to access the
3749 * resend list, but will have to wait until the spinlock is released, by
3750 * which time there shouldn't be any more messages on the resend list.
3751 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3752 * for the messages so they can be released. The other case is that
3753 * lnet_shudown_lndnets() can finalize all the messages before this
3754 * function can visit the resend list, in which case this function will be
3757 static void lnet_resend_msgs(void)
3759 struct lnet_msg *msg, *tmp;
3763 spin_lock(&the_lnet.ln_msg_resend_lock);
3764 list_splice(&the_lnet.ln_msg_resend, &resend);
3765 spin_unlock(&the_lnet.ln_msg_resend_lock);
3767 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3768 list_del_init(&msg->msg_list);
3769 rc = lnet_send(msg->msg_src_nid_param, msg,
3770 msg->msg_rtr_nid_param);
3772 CNETERR("Error sending %s to %s: %d\n",
3773 lnet_msgtyp2str(msg->msg_type),
3774 libcfs_id2str(msg->msg_target), rc);
3775 lnet_finalize(msg, rc);
3780 /* The discovery thread. */
3781 static int lnet_peer_discovery(void *arg)
3783 struct lnet_peer *lp;
3786 wait_for_completion(&the_lnet.ln_started);
3788 CDEBUG(D_NET, "started\n");
3791 if (lnet_peer_discovery_wait_for_work())
3794 if (lnet_push_target_resize_needed())
3795 lnet_push_target_resize();
3796 else if (the_lnet.ln_push_target->pb_needs_post)
3797 lnet_push_target_post(the_lnet.ln_push_target,
3798 &the_lnet.ln_push_target_md);
3802 lnet_net_lock(LNET_LOCK_EX);
3803 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3804 lnet_net_unlock(LNET_LOCK_EX);
3809 * Process all incoming discovery work requests. When
3810 * discovery must wait on a peer to change state, it
3811 * is added to the tail of the ln_dc_working queue. A
3812 * timestamp keeps track of when the peer was added,
3813 * so we can time out discovery requests that take too
3816 while (!list_empty(&the_lnet.ln_dc_request)) {
3817 lp = list_first_entry(&the_lnet.ln_dc_request,
3818 struct lnet_peer, lp_dc_list);
3819 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3821 * set the time the peer was put on the dc_working
3822 * queue. It shouldn't remain on the queue
3823 * forever, in case the GET message (for ping)
3824 * doesn't get a REPLY or the PUT message (for
3825 * push) doesn't get an ACK.
3827 lp->lp_last_queued = ktime_get_real_seconds();
3828 lnet_net_unlock(LNET_LOCK_EX);
3830 if (lnet_push_target_resize_needed())
3831 lnet_push_target_resize();
3832 else if (the_lnet.ln_push_target->pb_needs_post)
3833 lnet_push_target_post(the_lnet.ln_push_target,
3834 &the_lnet.ln_push_target_md);
3837 * Select an action depending on the state of
3838 * the peer and whether discovery is disabled.
3839 * The check whether discovery is disabled is
3840 * done after the code that handles processing
3841 * for arrived data, cleanup for failures, and
3842 * forcing a Ping or Push.
3844 spin_lock(&lp->lp_lock);
3845 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3846 libcfs_nidstr(&lp->lp_primary_nid), lp,
3848 if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3849 LNET_PEER_MARK_DELETED))
3850 rc = lnet_peer_deletion(lp);
3851 else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3852 rc = lnet_peer_data_present(lp);
3853 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3854 rc = lnet_peer_ping_failed(lp);
3855 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3856 rc = lnet_peer_push_failed(lp);
3857 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3858 rc = lnet_peer_send_ping(lp);
3859 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3860 rc = lnet_peer_send_push(lp);
3861 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3862 rc = lnet_peer_send_ping(lp);
3863 else if (lnet_peer_needs_push(lp))
3864 rc = lnet_peer_send_push(lp);
3866 rc = lnet_peer_discovered(lp);
3867 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3868 libcfs_nidstr(&lp->lp_primary_nid), lp,
3870 spin_unlock(&lp->lp_lock);
3872 lnet_net_lock(LNET_LOCK_EX);
3873 if (rc == LNET_REDISCOVER_PEER) {
3874 list_move(&lp->lp_dc_list,
3875 &the_lnet.ln_dc_request);
3877 lnet_peer_discovery_error(lp, rc);
3879 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3880 lnet_peer_discovery_complete(lp);
3881 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3886 lnet_net_unlock(LNET_LOCK_EX);
3889 CDEBUG(D_NET, "stopping\n");
3891 * Clean up before telling lnet_peer_discovery_stop() that
3892 * we're done. Use wake_up() below to somewhat reduce the
3893 * size of the thundering herd if there are multiple threads
3894 * waiting on discovery of a single peer.
3897 /* Queue cleanup 1: stop all pending pings and pushes. */
3898 lnet_net_lock(LNET_LOCK_EX);
3899 while (!list_empty(&the_lnet.ln_dc_working)) {
3900 lp = list_first_entry(&the_lnet.ln_dc_working,
3901 struct lnet_peer, lp_dc_list);
3902 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3903 lnet_net_unlock(LNET_LOCK_EX);
3904 lnet_peer_cancel_discovery(lp);
3905 lnet_net_lock(LNET_LOCK_EX);
3907 lnet_net_unlock(LNET_LOCK_EX);
3909 /* Queue cleanup 2: wait for the expired queue to clear. */
3910 while (!list_empty(&the_lnet.ln_dc_expired))
3911 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3913 /* Queue cleanup 3: clear the request queue. */
3914 lnet_net_lock(LNET_LOCK_EX);
3915 while (!list_empty(&the_lnet.ln_dc_request)) {
3916 lp = list_first_entry(&the_lnet.ln_dc_request,
3917 struct lnet_peer, lp_dc_list);
3918 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3919 lnet_peer_discovery_complete(lp);
3921 lnet_net_unlock(LNET_LOCK_EX);
3923 lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3924 the_lnet.ln_dc_handler = NULL;
3926 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3927 wake_up(&the_lnet.ln_dc_waitq);
3929 CDEBUG(D_NET, "stopped\n");
3934 /* ln_api_mutex is held on entry. */
3935 int lnet_peer_discovery_start(void)
3937 struct task_struct *task;
3940 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3943 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3944 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3945 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3948 CERROR("Can't start peer discovery thread: %d\n", rc);
3950 the_lnet.ln_dc_handler = NULL;
3952 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3955 CDEBUG(D_NET, "discovery start: %d\n", rc);
3960 /* ln_api_mutex is held on entry. */
3961 void lnet_peer_discovery_stop(void)
3963 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3966 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3967 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3969 /* In the LNetNIInit() path we may be stopping discovery before it
3970 * entered its work loop
3972 if (!completion_done(&the_lnet.ln_started))
3973 complete(&the_lnet.ln_started);
3975 wake_up(&the_lnet.ln_dc_waitq);
3977 wait_event(the_lnet.ln_dc_waitq,
3978 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3980 LASSERT(list_empty(&the_lnet.ln_dc_request));
3981 LASSERT(list_empty(&the_lnet.ln_dc_working));
3982 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3984 CDEBUG(D_NET, "discovery stopped\n");
3990 lnet_debug_peer(lnet_nid_t nid)
3992 char *aliveness = "NA";
3993 struct lnet_peer_ni *lp;
3996 cpt = lnet_cpt_of_nid(nid, NULL);
3999 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
4001 lnet_net_unlock(cpt);
4002 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
4006 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4007 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4009 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4010 libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4011 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4012 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4013 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4015 lnet_peer_ni_decref_locked(lp);
4017 lnet_net_unlock(cpt);
4020 /* Gathering information for userspace. */
4022 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4023 char aliveness[LNET_MAX_STR_LEN],
4024 __u32 *cpt_iter, __u32 *refcount,
4025 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4026 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4027 __u32 *peer_tx_qnob)
4029 struct lnet_peer_table *peer_table;
4030 struct lnet_peer_ni *lp;
4035 /* get the number of CPTs */
4036 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4038 /* if the cpt number to be examined is >= the number of cpts in
4039 * the system then indicate that there are no more cpts to examin
4041 if (*cpt_iter >= lncpt)
4044 /* get the current table */
4045 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4046 /* if the ptable is NULL then there are no more cpts to examine */
4047 if (peer_table == NULL)
4050 lnet_net_lock(*cpt_iter);
4052 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4053 struct list_head *peers = &peer_table->pt_hash[j];
4055 list_for_each_entry(lp, peers, lpni_hashlist) {
4056 if (!nid_is_nid4(&lp->lpni_nid))
4058 if (peer_index-- > 0)
4061 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4062 if (lnet_isrouter(lp) ||
4063 lnet_peer_aliveness_enabled(lp))
4064 snprintf(aliveness, LNET_MAX_STR_LEN,
4065 lnet_is_peer_ni_alive(lp) ? "up" : "down");
4067 *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4068 *refcount = kref_read(&lp->lpni_kref);
4069 *ni_peer_tx_credits =
4070 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4071 *peer_tx_credits = lp->lpni_txcredits;
4072 *peer_rtr_credits = lp->lpni_rtrcredits;
4073 *peer_min_rtr_credits = lp->lpni_mintxcredits;
4074 *peer_tx_qnob = lp->lpni_txqnob;
4080 lnet_net_unlock(*cpt_iter);
4084 return found ? 0 : -ENOENT;
4087 /* ln_api_mutex is held, which keeps the peer list stable */
4088 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4090 struct lnet_ioctl_element_stats *lpni_stats;
4091 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4092 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4093 struct lnet_peer_ni_credit_info *lpni_info;
4094 struct lnet_peer_ni *lpni;
4095 struct lnet_peer *lp;
4100 lp = lnet_find_peer(cfg->prcfg_prim_nid);
4107 size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4108 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4109 size *= lp->lp_nnis;
4110 if (size > cfg->prcfg_size) {
4111 cfg->prcfg_size = size;
4116 cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4117 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4118 cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4119 cfg->prcfg_count = lp->lp_nnis;
4120 cfg->prcfg_size = size;
4121 cfg->prcfg_state = lp->lp_state;
4123 /* Allocate helper buffers. */
4125 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4128 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4131 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4132 if (!lpni_msg_stats)
4133 goto out_free_stats;
4134 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4136 goto out_free_msg_stats;
4141 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4142 if (!nid_is_nid4(&lpni->lpni_nid))
4144 nid = lnet_nid_to_nid4(&lpni->lpni_nid);
4145 if (copy_to_user(bulk, &nid, sizeof(nid)))
4146 goto out_free_hstats;
4147 bulk += sizeof(nid);
4149 memset(lpni_info, 0, sizeof(*lpni_info));
4150 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4151 if (lnet_isrouter(lpni) ||
4152 lnet_peer_aliveness_enabled(lpni))
4153 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4154 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4156 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4157 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4158 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4159 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4160 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4161 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4162 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4163 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4164 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4165 goto out_free_hstats;
4166 bulk += sizeof(*lpni_info);
4168 memset(lpni_stats, 0, sizeof(*lpni_stats));
4169 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4170 LNET_STATS_TYPE_SEND);
4171 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4172 LNET_STATS_TYPE_RECV);
4173 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4174 LNET_STATS_TYPE_DROP);
4175 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4176 goto out_free_hstats;
4177 bulk += sizeof(*lpni_stats);
4178 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4179 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4180 goto out_free_hstats;
4181 bulk += sizeof(*lpni_msg_stats);
4182 lpni_hstats->hlpni_network_timeout =
4183 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4184 lpni_hstats->hlpni_remote_dropped =
4185 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4186 lpni_hstats->hlpni_remote_timeout =
4187 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4188 lpni_hstats->hlpni_remote_error =
4189 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4190 lpni_hstats->hlpni_health_value =
4191 atomic_read(&lpni->lpni_healthv);
4192 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4193 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4194 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4195 goto out_free_hstats;
4196 bulk += sizeof(*lpni_hstats);
4201 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4203 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4205 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4207 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4209 lnet_peer_decref_locked(lp);
4214 /* must hold net_lock/0 */
4216 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4217 struct list_head *recovery_queue,
4220 /* the mt could've shutdown and cleaned up the queues */
4221 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4224 if (!list_empty(&lpni->lpni_recovery))
4227 if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4230 if (!lpni->lpni_last_alive) {
4232 "lpni %s(%p) not eligible for recovery last alive %lld\n",
4233 libcfs_nidstr(&lpni->lpni_nid), lpni,
4234 lpni->lpni_last_alive);
4238 if (lnet_recovery_limit &&
4239 now > lpni->lpni_last_alive + lnet_recovery_limit) {
4240 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4241 libcfs_nidstr(&lpni->lpni_nid),
4242 lpni->lpni_last_alive);
4243 /* Reset the ping count so that if this peer NI is added back to
4244 * the recovery queue we will send the first ping right away.
4246 lpni->lpni_ping_count = 0;
4250 /* This peer NI is going on the recovery queue, so take a ref on it */
4251 lnet_peer_ni_addref_locked(lpni);
4253 lnet_peer_ni_set_next_ping(lpni, now);
4255 CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4256 libcfs_nidstr(&lpni->lpni_nid),
4257 lpni->lpni_ping_count,
4258 lpni->lpni_next_ping,
4259 lpni->lpni_last_alive,
4260 atomic_read(&lpni->lpni_healthv));
4262 list_add_tail(&lpni->lpni_recovery, recovery_queue);
4265 /* Call with the ln_api_mutex held */
4267 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4269 struct lnet_peer_table *ptable;
4270 struct lnet_peer *lp;
4271 struct lnet_peer_net *lpn;
4272 struct lnet_peer_ni *lpni;
4277 if (the_lnet.ln_state != LNET_STATE_RUNNING)
4280 now = ktime_get_seconds();
4283 lnet_net_lock(LNET_LOCK_EX);
4284 lpni = lnet_find_peer_ni_locked(nid);
4286 lnet_net_unlock(LNET_LOCK_EX);
4289 lnet_set_lpni_healthv_locked(lpni, value);
4290 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4291 &the_lnet.ln_mt_peerNIRecovq, now);
4292 lnet_peer_ni_decref_locked(lpni);
4293 lnet_net_unlock(LNET_LOCK_EX);
4297 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4300 * Walk all the peers and reset the health value for each one to the
4303 lnet_net_lock(LNET_LOCK_EX);
4304 for (cpt = 0; cpt < lncpt; cpt++) {
4305 ptable = the_lnet.ln_peer_tables[cpt];
4306 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4307 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4308 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4310 lnet_set_lpni_healthv_locked(lpni,
4312 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4313 &the_lnet.ln_mt_peerNIRecovq, now);
4318 lnet_net_unlock(LNET_LOCK_EX);