4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
41 #include <linux/uaccess.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
158 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
164 INIT_LIST_HEAD(&lpni->lpni_txq);
165 INIT_LIST_HEAD(&lpni->lpni_hashlist);
166 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167 INIT_LIST_HEAD(&lpni->lpni_recovery);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171 spin_lock_init(&lpni->lpni_lock);
173 if (lnet_peers_start_down())
174 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
176 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
177 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
178 lpni->lpni_nid = nid;
179 lpni->lpni_cpt = cpt;
180 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
182 net = lnet_get_net_locked(LNET_NIDNET(nid));
183 lpni->lpni_net = net;
185 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
186 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
187 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
188 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
191 * This peer_ni is not on a local network, so we
192 * cannot add the credits here. In case the net is
193 * added later, add the peer_ni to the remote peer ni
194 * list so it can be easily found and revisited.
196 /* FIXME: per-net implementation instead? */
197 atomic_inc(&lpni->lpni_refcount);
198 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
199 &the_lnet.ln_remote_peer_ni_list);
202 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
207 static struct lnet_peer_net *
208 lnet_peer_net_alloc(__u32 net_id)
210 struct lnet_peer_net *lpn;
212 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
217 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
218 lpn->lpn_net_id = net_id;
220 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
226 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
228 struct lnet_peer *lp;
230 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
232 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
233 LASSERT(list_empty(&lpn->lpn_peer_nis));
234 LASSERT(list_empty(&lpn->lpn_peer_nets));
236 lpn->lpn_peer = NULL;
237 LIBCFS_FREE(lpn, sizeof(*lpn));
239 lnet_peer_decref_locked(lp);
242 static struct lnet_peer *
243 lnet_peer_alloc(lnet_nid_t nid)
245 struct lnet_peer *lp;
247 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
251 INIT_LIST_HEAD(&lp->lp_rtrq);
252 INIT_LIST_HEAD(&lp->lp_routes);
253 INIT_LIST_HEAD(&lp->lp_peer_list);
254 INIT_LIST_HEAD(&lp->lp_peer_nets);
255 INIT_LIST_HEAD(&lp->lp_dc_list);
256 INIT_LIST_HEAD(&lp->lp_dc_pendq);
257 INIT_LIST_HEAD(&lp->lp_rtr_list);
258 init_waitqueue_head(&lp->lp_dc_waitq);
259 spin_lock_init(&lp->lp_lock);
260 lp->lp_primary_nid = nid;
262 * Turn off discovery for loopback peer. If you're creating a peer
263 * for the loopback interface then that was initiated when we
264 * attempted to send a message over the loopback. There is no need
265 * to ever use a different interface when sending messages to
268 if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
269 lp->lp_state = LNET_PEER_NO_DISCOVERY;
270 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
272 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
278 lnet_destroy_peer_locked(struct lnet_peer *lp)
280 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
282 LASSERT(atomic_read(&lp->lp_refcount) == 0);
283 LASSERT(lp->lp_rtr_refcount == 0);
284 LASSERT(list_empty(&lp->lp_peer_nets));
285 LASSERT(list_empty(&lp->lp_peer_list));
286 LASSERT(list_empty(&lp->lp_dc_list));
289 lnet_ping_buffer_decref(lp->lp_data);
292 * if there are messages still on the pending queue, then make
293 * sure to queue them on the ln_msg_resend list so they can be
294 * resent at a later point if the discovery thread is still
296 * If the discovery thread has stopped, then the wakeup will be a
297 * no-op, and it is expected the lnet_shutdown_lndnets() will
298 * eventually be called, which will traverse this list and
299 * finalize the messages on the list.
300 * We can not resend them now because we're holding the cpt lock.
301 * Releasing the lock can cause an inconsistent state
303 spin_lock(&the_lnet.ln_msg_resend_lock);
304 spin_lock(&lp->lp_lock);
305 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
306 spin_unlock(&lp->lp_lock);
307 spin_unlock(&the_lnet.ln_msg_resend_lock);
308 wake_up(&the_lnet.ln_dc_waitq);
310 LIBCFS_FREE(lp, sizeof(*lp));
314 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
315 * that peer_net, detach the peer_net from the peer.
317 * Call with lnet_net_lock/EX held
320 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
322 struct lnet_peer_table *ptable;
323 struct lnet_peer_net *lpn;
324 struct lnet_peer *lp;
327 * Belts and suspenders: gracefully handle teardown of a
328 * partially connected peer_ni.
330 lpn = lpni->lpni_peer_net;
332 list_del_init(&lpni->lpni_peer_nis);
334 * If there are no lpni's left, we detach lpn from
335 * lp_peer_nets, so it cannot be found anymore.
337 if (list_empty(&lpn->lpn_peer_nis))
338 list_del_init(&lpn->lpn_peer_nets);
340 /* Update peer NID count. */
345 * If there are no more peer nets, make the peer unfindable
346 * via the peer_tables.
348 * Otherwise, if the peer is DISCOVERED, tell discovery to
349 * take another look at it. This is a no-op if discovery for
350 * this peer did the detaching.
352 if (list_empty(&lp->lp_peer_nets)) {
353 list_del_init(&lp->lp_peer_list);
354 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
356 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
357 /* Discovery isn't running, nothing to do here. */
358 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
359 lnet_peer_queue_for_discovery(lp);
360 wake_up(&the_lnet.ln_dc_waitq);
362 CDEBUG(D_NET, "peer %s NID %s\n",
363 libcfs_nid2str(lp->lp_primary_nid),
364 libcfs_nid2str(lpni->lpni_nid));
367 /* called with lnet_net_lock LNET_LOCK_EX held */
369 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
371 struct lnet_peer_table *ptable = NULL;
373 /* don't remove a peer_ni if it's also a gateway */
374 if (lnet_isrouter(lpni) && !force) {
375 CERROR("Peer NI %s is a gateway. Can not delete it\n",
376 libcfs_nid2str(lpni->lpni_nid));
380 lnet_peer_remove_from_remote_list(lpni);
382 /* remove peer ni from the hash list. */
383 list_del_init(&lpni->lpni_hashlist);
386 * indicate the peer is being deleted so the monitor thread can
387 * remove it from the recovery queue.
389 spin_lock(&lpni->lpni_lock);
390 lpni->lpni_state |= LNET_PEER_NI_DELETING;
391 spin_unlock(&lpni->lpni_lock);
393 /* decrement the ref count on the peer table */
394 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
395 LASSERT(ptable->pt_number > 0);
399 * The peer_ni can no longer be found with a lookup. But there
400 * can be current users, so keep track of it on the zombie
401 * list until the reference count has gone to zero.
403 * The last reference may be lost in a place where the
404 * lnet_net_lock locks only a single cpt, and that cpt may not
405 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
408 spin_lock(&ptable->pt_zombie_lock);
409 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
410 ptable->pt_zombies++;
411 spin_unlock(&ptable->pt_zombie_lock);
413 /* no need to keep this peer_ni on the hierarchy anymore */
414 lnet_peer_detach_peer_ni_locked(lpni);
416 /* remove hashlist reference on peer_ni */
417 lnet_peer_ni_decref_locked(lpni);
422 void lnet_peer_uninit(void)
424 struct lnet_peer_ni *lpni, *tmp;
426 lnet_net_lock(LNET_LOCK_EX);
428 /* remove all peer_nis from the remote peer and the hash list */
429 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
430 lpni_on_remote_peer_ni_list)
431 lnet_peer_ni_del_locked(lpni, false);
433 lnet_peer_tables_destroy();
435 lnet_net_unlock(LNET_LOCK_EX);
439 lnet_peer_del_locked(struct lnet_peer *peer)
441 struct lnet_peer_ni *lpni = NULL, *lpni2;
444 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
446 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
447 while (lpni != NULL) {
448 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
449 rc = lnet_peer_ni_del_locked(lpni, false);
459 lnet_peer_del(struct lnet_peer *peer)
461 lnet_net_lock(LNET_LOCK_EX);
462 lnet_peer_del_locked(peer);
463 lnet_net_unlock(LNET_LOCK_EX);
469 * Delete a NID from a peer. Call with ln_api_mutex held.
472 * -EPERM: Non-DLC deletion from DLC-configured peer.
473 * -ENOENT: No lnet_peer_ni corresponding to the nid.
474 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
475 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
478 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
480 struct lnet_peer_ni *lpni;
481 lnet_nid_t primary_nid = lp->lp_primary_nid;
483 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
485 if (!(flags & LNET_PEER_CONFIGURED)) {
486 if (lp->lp_state & LNET_PEER_CONFIGURED) {
491 lpni = lnet_find_peer_ni_locked(nid);
496 lnet_peer_ni_decref_locked(lpni);
497 if (lp != lpni->lpni_peer_net->lpn_peer) {
503 * This function only allows deletion of the primary NID if it
506 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
511 lnet_net_lock(LNET_LOCK_EX);
513 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
514 struct lnet_peer_ni *lpni2;
515 /* assign the next peer_ni to be the primary */
516 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
518 lp->lp_primary_nid = lpni->lpni_nid;
520 rc = lnet_peer_ni_del_locked(lpni, force);
522 lnet_net_unlock(LNET_LOCK_EX);
525 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
526 libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
532 lnet_peer_table_cleanup_locked(struct lnet_net *net,
533 struct lnet_peer_table *ptable)
536 struct lnet_peer_ni *next;
537 struct lnet_peer_ni *lpni;
538 struct lnet_peer *peer;
540 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
541 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
543 if (net != NULL && net != lpni->lpni_net)
546 peer = lpni->lpni_peer_net->lpn_peer;
547 if (peer->lp_primary_nid != lpni->lpni_nid) {
548 lnet_peer_ni_del_locked(lpni, false);
552 * Removing the primary NID implies removing
553 * the entire peer. Advance next beyond any
554 * peer_ni that belongs to the same peer.
556 list_for_each_entry_from(next, &ptable->pt_hash[i],
558 if (next->lpni_peer_net->lpn_peer != peer)
561 lnet_peer_del_locked(peer);
567 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
571 spin_lock(&ptable->pt_zombie_lock);
572 while (ptable->pt_zombies) {
573 spin_unlock(&ptable->pt_zombie_lock);
575 if (is_power_of_2(i)) {
577 "Waiting for %d zombies on peer table\n",
580 set_current_state(TASK_UNINTERRUPTIBLE);
581 schedule_timeout(cfs_time_seconds(1) >> 1);
582 spin_lock(&ptable->pt_zombie_lock);
584 spin_unlock(&ptable->pt_zombie_lock);
588 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
589 struct lnet_peer_table *ptable)
591 struct lnet_peer_ni *lp;
592 struct lnet_peer_ni *tmp;
596 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
597 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
599 if (net != lp->lpni_net)
602 if (!lnet_isrouter(lp))
605 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
607 lnet_net_unlock(LNET_LOCK_EX);
608 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), gw_nid);
609 lnet_net_lock(LNET_LOCK_EX);
615 lnet_peer_tables_cleanup(struct lnet_net *net)
618 struct lnet_peer_table *ptable;
620 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
621 /* If just deleting the peers for a NI, get rid of any routes these
622 * peers are gateways for. */
623 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
624 lnet_net_lock(LNET_LOCK_EX);
625 lnet_peer_table_del_rtrs_locked(net, ptable);
626 lnet_net_unlock(LNET_LOCK_EX);
629 /* Start the cleanup process */
630 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
631 lnet_net_lock(LNET_LOCK_EX);
632 lnet_peer_table_cleanup_locked(net, ptable);
633 lnet_net_unlock(LNET_LOCK_EX);
636 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
637 lnet_peer_ni_finalize_wait(ptable);
640 static struct lnet_peer_ni *
641 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
643 struct list_head *peers;
644 struct lnet_peer_ni *lp;
646 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
648 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
649 list_for_each_entry(lp, peers, lpni_hashlist) {
650 if (lp->lpni_nid == nid) {
651 lnet_peer_ni_addref_locked(lp);
659 struct lnet_peer_ni *
660 lnet_find_peer_ni_locked(lnet_nid_t nid)
662 struct lnet_peer_ni *lpni;
663 struct lnet_peer_table *ptable;
666 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
668 ptable = the_lnet.ln_peer_tables[cpt];
669 lpni = lnet_get_peer_ni_locked(ptable, nid);
674 struct lnet_peer_ni *
675 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
677 struct lnet_peer_net *lpn;
678 struct lnet_peer_ni *lpni;
680 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
684 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
685 if (lpni->lpni_nid == nid)
693 lnet_find_peer(lnet_nid_t nid)
695 struct lnet_peer_ni *lpni;
696 struct lnet_peer *lp = NULL;
699 cpt = lnet_net_lock_current();
700 lpni = lnet_find_peer_ni_locked(nid);
702 lp = lpni->lpni_peer_net->lpn_peer;
703 lnet_peer_addref_locked(lp);
704 lnet_peer_ni_decref_locked(lpni);
706 lnet_net_unlock(cpt);
711 struct lnet_peer_ni *
712 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
713 struct lnet_peer_net *peer_net,
714 struct lnet_peer_ni *prev)
716 struct lnet_peer_ni *lpni;
717 struct lnet_peer_net *net = peer_net;
721 if (list_empty(&peer->lp_peer_nets))
724 net = list_entry(peer->lp_peer_nets.next,
725 struct lnet_peer_net,
728 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
734 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
736 * if you reached the end of the peer ni list and the peer
737 * net is specified then there are no more peer nis in that
744 * we reached the end of this net ni list. move to the
747 if (prev->lpni_peer_net->lpn_peer_nets.next ==
749 /* no more nets and no more NIs. */
752 /* get the next net */
753 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
754 struct lnet_peer_net,
756 /* get the ni on it */
757 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
763 /* there are more nis left */
764 lpni = list_entry(prev->lpni_peer_nis.next,
765 struct lnet_peer_ni, lpni_peer_nis);
770 /* Call with the ln_api_mutex held */
771 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
773 struct lnet_process_id id;
774 struct lnet_peer_table *ptable;
775 struct lnet_peer *lp;
784 if (the_lnet.ln_state != LNET_STATE_RUNNING)
787 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
790 * Count the number of peers, and return E2BIG if the buffer
791 * is too small. We'll also return the desired size.
794 for (cpt = 0; cpt < lncpt; cpt++) {
795 ptable = the_lnet.ln_peer_tables[cpt];
796 count += ptable->pt_peers;
798 size = count * sizeof(*ids);
803 * Walk the peer lists and copy out the primary nids.
804 * This is safe because the peer lists are only modified
805 * while the ln_api_mutex is held. So we don't need to
806 * hold the lnet_net_lock as well, and can therefore
807 * directly call copy_to_user().
810 memset(&id, 0, sizeof(id));
811 id.pid = LNET_PID_LUSTRE;
813 for (cpt = 0; cpt < lncpt; cpt++) {
814 ptable = the_lnet.ln_peer_tables[cpt];
815 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
818 id.nid = lp->lp_primary_nid;
819 if (copy_to_user(&ids[i], &id, sizeof(id)))
832 * Start pushes to peers that need to be updated for a configuration
833 * change on this node.
836 lnet_push_update_to_peers(int force)
838 struct lnet_peer_table *ptable;
839 struct lnet_peer *lp;
843 lnet_net_lock(LNET_LOCK_EX);
844 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
845 for (cpt = 0; cpt < lncpt; cpt++) {
846 ptable = the_lnet.ln_peer_tables[cpt];
847 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
849 spin_lock(&lp->lp_lock);
850 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
851 lp->lp_state |= LNET_PEER_FORCE_PUSH;
852 spin_unlock(&lp->lp_lock);
854 if (lnet_peer_needs_push(lp))
855 lnet_peer_queue_for_discovery(lp);
858 lnet_net_unlock(LNET_LOCK_EX);
859 wake_up(&the_lnet.ln_dc_waitq);
863 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
864 * this is a preferred point-to-point path. Call with lnet_net_lock in
868 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
872 if (lpni->lpni_pref_nnids == 0)
874 if (lpni->lpni_pref_nnids == 1)
875 return lpni->lpni_pref.nid == nid;
876 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
877 if (lpni->lpni_pref.nids[i] == nid)
884 * Set a single ni as preferred, provided no preferred ni is already
885 * defined. Only to be used for non-multi-rail peer_ni.
888 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
892 spin_lock(&lpni->lpni_lock);
893 if (nid == LNET_NID_ANY) {
895 } else if (lpni->lpni_pref_nnids > 0) {
897 } else if (lpni->lpni_pref_nnids == 0) {
898 lpni->lpni_pref.nid = nid;
899 lpni->lpni_pref_nnids = 1;
900 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
902 spin_unlock(&lpni->lpni_lock);
904 CDEBUG(D_NET, "peer %s nid %s: %d\n",
905 libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
910 * Clear the preferred NID from a non-multi-rail peer_ni, provided
911 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
914 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
918 spin_lock(&lpni->lpni_lock);
919 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
920 lpni->lpni_pref_nnids = 0;
921 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
922 } else if (lpni->lpni_pref_nnids == 0) {
927 spin_unlock(&lpni->lpni_lock);
929 CDEBUG(D_NET, "peer %s: %d\n",
930 libcfs_nid2str(lpni->lpni_nid), rc);
935 * Clear the preferred NIDs from a non-multi-rail peer.
938 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
940 struct lnet_peer_ni *lpni = NULL;
942 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
943 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
947 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
949 lnet_nid_t *nids = NULL;
950 lnet_nid_t *oldnids = NULL;
951 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
956 if (nid == LNET_NID_ANY) {
961 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
966 /* A non-MR node may have only one preferred NI per peer_ni */
967 if (lpni->lpni_pref_nnids > 0) {
968 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
974 if (lpni->lpni_pref_nnids != 0) {
975 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
976 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
981 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
982 if (lpni->lpni_pref.nids[i] == nid) {
983 LIBCFS_FREE(nids, size);
987 nids[i] = lpni->lpni_pref.nids[i];
992 lnet_net_lock(LNET_LOCK_EX);
993 spin_lock(&lpni->lpni_lock);
994 if (lpni->lpni_pref_nnids == 0) {
995 lpni->lpni_pref.nid = nid;
997 oldnids = lpni->lpni_pref.nids;
998 lpni->lpni_pref.nids = nids;
1000 lpni->lpni_pref_nnids++;
1001 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1002 spin_unlock(&lpni->lpni_lock);
1003 lnet_net_unlock(LNET_LOCK_EX);
1006 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1007 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
1010 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1011 spin_lock(&lpni->lpni_lock);
1012 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1013 spin_unlock(&lpni->lpni_lock);
1015 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1016 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1021 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1023 lnet_nid_t *nids = NULL;
1024 lnet_nid_t *oldnids = NULL;
1025 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1030 if (lpni->lpni_pref_nnids == 0) {
1035 if (lpni->lpni_pref_nnids == 1) {
1036 if (lpni->lpni_pref.nid != nid) {
1040 } else if (lpni->lpni_pref_nnids == 2) {
1041 if (lpni->lpni_pref.nids[0] != nid &&
1042 lpni->lpni_pref.nids[1] != nid) {
1047 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1048 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
1053 for (i = 0, j = 0; i < lpni->lpni_pref_nnids; i++) {
1054 if (lpni->lpni_pref.nids[i] != nid)
1056 nids[j++] = lpni->lpni_pref.nids[i];
1058 /* Check if we actually removed a nid. */
1059 if (j == lpni->lpni_pref_nnids) {
1060 LIBCFS_FREE(nids, size);
1066 lnet_net_lock(LNET_LOCK_EX);
1067 spin_lock(&lpni->lpni_lock);
1068 if (lpni->lpni_pref_nnids == 1) {
1069 lpni->lpni_pref.nid = LNET_NID_ANY;
1070 } else if (lpni->lpni_pref_nnids == 2) {
1071 oldnids = lpni->lpni_pref.nids;
1072 if (oldnids[0] == nid)
1073 lpni->lpni_pref.nid = oldnids[1];
1075 lpni->lpni_pref.nid = oldnids[2];
1077 oldnids = lpni->lpni_pref.nids;
1078 lpni->lpni_pref.nids = nids;
1080 lpni->lpni_pref_nnids--;
1081 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1082 spin_unlock(&lpni->lpni_lock);
1083 lnet_net_unlock(LNET_LOCK_EX);
1086 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
1087 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
1090 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1091 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1096 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1098 struct lnet_peer_ni *lpni;
1099 lnet_nid_t primary_nid = nid;
1101 lpni = lnet_find_peer_ni_locked(nid);
1103 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1104 lnet_peer_ni_decref_locked(lpni);
1111 LNetPrimaryNID(lnet_nid_t nid)
1113 struct lnet_peer *lp;
1114 struct lnet_peer_ni *lpni;
1115 lnet_nid_t primary_nid = nid;
1119 cpt = lnet_net_lock_current();
1120 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1125 lp = lpni->lpni_peer_net->lpn_peer;
1126 while (!lnet_peer_is_uptodate(lp)) {
1127 rc = lnet_discover_peer_locked(lpni, cpt, true);
1130 lp = lpni->lpni_peer_net->lpn_peer;
1132 primary_nid = lp->lp_primary_nid;
1134 lnet_peer_ni_decref_locked(lpni);
1136 lnet_net_unlock(cpt);
1138 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1139 libcfs_nid2str(primary_nid), rc);
1142 EXPORT_SYMBOL(LNetPrimaryNID);
1144 struct lnet_peer_net *
1145 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1147 struct lnet_peer_net *peer_net;
1148 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1149 if (peer_net->lpn_net_id == net_id)
1156 * Attach a peer_ni to a peer_net and peer. This function assumes
1157 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1158 * may be attached to a different peer, in which case it will be
1159 * properly detached first. The whole operation is done atomically.
1161 * Always returns 0. This is the last function called from functions
1162 * that do return an int, so returning 0 here allows the compiler to
1166 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1167 struct lnet_peer_net *lpn,
1168 struct lnet_peer_ni *lpni,
1171 struct lnet_peer_table *ptable;
1173 /* Install the new peer_ni */
1174 lnet_net_lock(LNET_LOCK_EX);
1175 /* Add peer_ni to global peer table hash, if necessary. */
1176 if (list_empty(&lpni->lpni_hashlist)) {
1177 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1179 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1180 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1181 ptable->pt_version++;
1182 ptable->pt_number++;
1183 /* This is the 1st refcount on lpni. */
1184 atomic_inc(&lpni->lpni_refcount);
1187 /* Detach the peer_ni from an existing peer, if necessary. */
1188 if (lpni->lpni_peer_net) {
1189 LASSERT(lpni->lpni_peer_net != lpn);
1190 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1191 lnet_peer_detach_peer_ni_locked(lpni);
1192 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1193 lpni->lpni_peer_net = NULL;
1196 /* Add peer_ni to peer_net */
1197 lpni->lpni_peer_net = lpn;
1198 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1199 lnet_peer_net_addref_locked(lpn);
1201 /* Add peer_net to peer */
1202 if (!lpn->lpn_peer) {
1204 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1205 lnet_peer_addref_locked(lp);
1208 /* Add peer to global peer list, if necessary */
1209 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1210 if (list_empty(&lp->lp_peer_list)) {
1211 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1216 /* Update peer state */
1217 spin_lock(&lp->lp_lock);
1218 if (flags & LNET_PEER_CONFIGURED) {
1219 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1220 lp->lp_state |= LNET_PEER_CONFIGURED;
1222 if (flags & LNET_PEER_MULTI_RAIL) {
1223 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1224 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1225 lnet_peer_clr_non_mr_pref_nids(lp);
1228 spin_unlock(&lp->lp_lock);
1231 lnet_net_unlock(LNET_LOCK_EX);
1233 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1234 libcfs_nid2str(lp->lp_primary_nid),
1235 libcfs_nid2str(lpni->lpni_nid), flags);
1241 * Create a new peer, with nid as its primary nid.
1243 * Call with the lnet_api_mutex held.
1246 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1248 struct lnet_peer *lp;
1249 struct lnet_peer_net *lpn;
1250 struct lnet_peer_ni *lpni;
1253 LASSERT(nid != LNET_NID_ANY);
1256 * No need for the lnet_net_lock here, because the
1257 * lnet_api_mutex is held.
1259 lpni = lnet_find_peer_ni_locked(nid);
1261 /* A peer with this NID already exists. */
1262 lp = lpni->lpni_peer_net->lpn_peer;
1263 lnet_peer_ni_decref_locked(lpni);
1265 * This is an error if the peer was configured and the
1266 * primary NID differs or an attempt is made to change
1267 * the Multi-Rail flag. Otherwise the assumption is
1268 * that an existing peer is being modified.
1270 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1271 if (lp->lp_primary_nid != nid)
1273 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1277 /* Delete and recreate as a configured peer. */
1281 /* Create peer, peer_net, and peer_ni. */
1283 lp = lnet_peer_alloc(nid);
1286 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1289 lpni = lnet_peer_ni_alloc(nid);
1293 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1296 LIBCFS_FREE(lpn, sizeof(*lpn));
1298 LIBCFS_FREE(lp, sizeof(*lp));
1300 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1301 libcfs_nid2str(nid), flags, rc);
1306 * Add a NID to a peer. Call with ln_api_mutex held.
1309 * -EPERM: Non-DLC addition to a DLC-configured peer.
1310 * -EEXIST: The NID was configured by DLC for a different peer.
1311 * -ENOMEM: Out of memory.
1312 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1313 * non-multi-rail peer.
1316 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1318 struct lnet_peer_net *lpn;
1319 struct lnet_peer_ni *lpni;
1323 LASSERT(nid != LNET_NID_ANY);
1325 /* A configured peer can only be updated through configuration. */
1326 if (!(flags & LNET_PEER_CONFIGURED)) {
1327 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1334 * The MULTI_RAIL flag can be set but not cleared, because
1335 * that would leave the peer struct in an invalid state.
1337 if (flags & LNET_PEER_MULTI_RAIL) {
1338 spin_lock(&lp->lp_lock);
1339 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1340 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1341 lnet_peer_clr_non_mr_pref_nids(lp);
1343 spin_unlock(&lp->lp_lock);
1344 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1349 lpni = lnet_find_peer_ni_locked(nid);
1352 * A peer_ni already exists. This is only a problem if
1353 * it is not connected to this peer and was configured
1356 lnet_peer_ni_decref_locked(lpni);
1357 if (lpni->lpni_peer_net->lpn_peer == lp)
1359 if (lnet_peer_ni_is_configured(lpni)) {
1363 /* If this is the primary NID, destroy the peer. */
1364 if (lnet_peer_ni_is_primary(lpni)) {
1365 lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1366 lpni = lnet_peer_ni_alloc(nid);
1373 lpni = lnet_peer_ni_alloc(nid);
1381 * Get the peer_net. Check that we're not adding a second
1382 * peer_ni on a peer_net of a non-multi-rail peer.
1384 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1386 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1391 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1396 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1399 /* If the peer_ni was allocated above its peer_net pointer is NULL */
1400 if (!lpni->lpni_peer_net)
1401 LIBCFS_FREE(lpni, sizeof(*lpni));
1403 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1404 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1410 * Update the primary NID of a peer, if possible.
1412 * Call with the lnet_api_mutex held.
1415 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1417 lnet_nid_t old = lp->lp_primary_nid;
1420 if (lp->lp_primary_nid == nid)
1422 rc = lnet_peer_add_nid(lp, nid, flags);
1425 lp->lp_primary_nid = nid;
1427 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1428 libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1433 * lpni creation initiated due to traffic either sending or receiving.
1436 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1438 struct lnet_peer *lp;
1439 struct lnet_peer_net *lpn;
1440 struct lnet_peer_ni *lpni;
1444 if (nid == LNET_NID_ANY) {
1449 /* lnet_net_lock is not needed here because ln_api_lock is held */
1450 lpni = lnet_find_peer_ni_locked(nid);
1453 * We must have raced with another thread. Since we
1454 * know next to nothing about a peer_ni created by
1455 * traffic, we just assume everything is ok and
1458 lnet_peer_ni_decref_locked(lpni);
1462 /* Create peer, peer_net, and peer_ni. */
1464 lp = lnet_peer_alloc(nid);
1467 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1470 lpni = lnet_peer_ni_alloc(nid);
1473 if (pref != LNET_NID_ANY)
1474 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1476 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1479 LIBCFS_FREE(lpn, sizeof(*lpn));
1481 LIBCFS_FREE(lp, sizeof(*lp));
1483 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1488 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1490 * This API handles the following combinations:
1491 * Create a peer with its primary NI if only the prim_nid is provided
1492 * Add a NID to a peer identified by the prim_nid. The peer identified
1493 * by the prim_nid must already exist.
1494 * The peer being created may be non-MR.
1496 * The caller must hold ln_api_mutex. This prevents the peer from
1497 * being created/modified/deleted by a different thread.
1500 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1502 struct lnet_peer *lp = NULL;
1503 struct lnet_peer_ni *lpni;
1506 /* The prim_nid must always be specified */
1507 if (prim_nid == LNET_NID_ANY)
1510 flags = LNET_PEER_CONFIGURED;
1512 flags |= LNET_PEER_MULTI_RAIL;
1515 * If nid isn't specified, we must create a new peer with
1516 * prim_nid as its primary nid.
1518 if (nid == LNET_NID_ANY)
1519 return lnet_peer_add(prim_nid, flags);
1521 /* Look up the prim_nid, which must exist. */
1522 lpni = lnet_find_peer_ni_locked(prim_nid);
1525 lnet_peer_ni_decref_locked(lpni);
1526 lp = lpni->lpni_peer_net->lpn_peer;
1528 /* Peer must have been configured. */
1529 if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1530 CDEBUG(D_NET, "peer %s was not configured\n",
1531 libcfs_nid2str(prim_nid));
1535 /* Primary NID must match */
1536 if (lp->lp_primary_nid != prim_nid) {
1537 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1538 libcfs_nid2str(prim_nid),
1539 libcfs_nid2str(lp->lp_primary_nid));
1543 /* Multi-Rail flag must match. */
1544 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1545 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1546 libcfs_nid2str(prim_nid));
1550 return lnet_peer_add_nid(lp, nid, flags);
1554 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1556 * This API handles the following combinations:
1557 * Delete a NI from a peer if both prim_nid and nid are provided.
1558 * Delete a peer if only prim_nid is provided.
1559 * Delete a peer if its primary nid is provided.
1561 * The caller must hold ln_api_mutex. This prevents the peer from
1562 * being modified/deleted by a different thread.
1565 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1567 struct lnet_peer *lp;
1568 struct lnet_peer_ni *lpni;
1571 if (prim_nid == LNET_NID_ANY)
1574 lpni = lnet_find_peer_ni_locked(prim_nid);
1577 lnet_peer_ni_decref_locked(lpni);
1578 lp = lpni->lpni_peer_net->lpn_peer;
1580 if (prim_nid != lp->lp_primary_nid) {
1581 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1582 libcfs_nid2str(prim_nid),
1583 libcfs_nid2str(lp->lp_primary_nid));
1587 lnet_net_lock(LNET_LOCK_EX);
1588 if (lp->lp_rtr_refcount > 0) {
1589 lnet_net_unlock(LNET_LOCK_EX);
1590 CERROR("%s is a router. Can not be deleted\n",
1591 libcfs_nid2str(prim_nid));
1594 lnet_net_unlock(LNET_LOCK_EX);
1596 if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1597 return lnet_peer_del(lp);
1599 flags = LNET_PEER_CONFIGURED;
1600 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1601 flags |= LNET_PEER_MULTI_RAIL;
1603 return lnet_peer_del_nid(lp, nid, flags);
1607 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1609 struct lnet_peer_table *ptable;
1610 struct lnet_peer_net *lpn;
1612 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1614 LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1615 LASSERT(list_empty(&lpni->lpni_txq));
1616 LASSERT(lpni->lpni_txqnob == 0);
1617 LASSERT(list_empty(&lpni->lpni_peer_nis));
1618 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1620 lpn = lpni->lpni_peer_net;
1621 lpni->lpni_peer_net = NULL;
1622 lpni->lpni_net = NULL;
1624 /* remove the peer ni from the zombie list */
1625 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1626 spin_lock(&ptable->pt_zombie_lock);
1627 list_del_init(&lpni->lpni_hashlist);
1628 ptable->pt_zombies--;
1629 spin_unlock(&ptable->pt_zombie_lock);
1631 if (lpni->lpni_pref_nnids > 1) {
1632 LIBCFS_FREE(lpni->lpni_pref.nids,
1633 sizeof(*lpni->lpni_pref.nids) * lpni->lpni_pref_nnids);
1635 LIBCFS_FREE(lpni, sizeof(*lpni));
1637 lnet_peer_net_decref_locked(lpn);
1640 struct lnet_peer_ni *
1641 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1643 struct lnet_peer_ni *lpni = NULL;
1646 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1647 return ERR_PTR(-ESHUTDOWN);
1650 * find if a peer_ni already exists.
1651 * If so then just return that.
1653 lpni = lnet_find_peer_ni_locked(nid);
1657 lnet_net_unlock(cpt);
1659 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1662 goto out_net_relock;
1665 lpni = lnet_find_peer_ni_locked(nid);
1675 * Get a peer_ni for the given nid, create it if necessary. Takes a
1676 * hold on the peer_ni.
1678 struct lnet_peer_ni *
1679 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1681 struct lnet_peer_ni *lpni = NULL;
1684 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1685 return ERR_PTR(-ESHUTDOWN);
1688 * find if a peer_ni already exists.
1689 * If so then just return that.
1691 lpni = lnet_find_peer_ni_locked(nid);
1697 * use the lnet_api_mutex to serialize the creation of the peer_ni
1698 * and the creation/deletion of the local ni/net. When a local ni is
1699 * created, if there exists a set of peer_nis on that network,
1700 * they need to be traversed and updated. When a local NI is
1701 * deleted, which could result in a network being deleted, then
1702 * all peer nis on that network need to be removed as well.
1704 * Creation through traffic should also be serialized with
1705 * creation through DLC.
1707 lnet_net_unlock(cpt);
1708 mutex_lock(&the_lnet.ln_api_mutex);
1710 * Shutdown is only set under the ln_api_lock, so a single
1711 * check here is sufficent.
1713 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1714 lpni = ERR_PTR(-ESHUTDOWN);
1715 goto out_mutex_unlock;
1718 rc = lnet_peer_ni_traffic_add(nid, pref);
1721 goto out_mutex_unlock;
1724 lpni = lnet_find_peer_ni_locked(nid);
1728 mutex_unlock(&the_lnet.ln_api_mutex);
1731 /* Lock has been dropped, check again for shutdown. */
1732 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1734 lnet_peer_ni_decref_locked(lpni);
1735 lpni = ERR_PTR(-ESHUTDOWN);
1746 lnet_peer_gw_discovery(struct lnet_peer *lp)
1750 spin_lock(&lp->lp_lock);
1751 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
1753 spin_unlock(&lp->lp_lock);
1759 * Is a peer uptodate from the point of view of discovery?
1761 * If it is currently being processed, obviously not.
1762 * A forced Ping or Push is also handled by the discovery thread.
1764 * Otherwise look at whether the peer needs rediscovering.
1767 lnet_peer_is_uptodate(struct lnet_peer *lp)
1771 spin_lock(&lp->lp_lock);
1772 if (lp->lp_state & (LNET_PEER_DISCOVERING |
1773 LNET_PEER_FORCE_PING |
1774 LNET_PEER_FORCE_PUSH)) {
1776 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
1778 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
1779 if (lnet_peer_discovery_disabled)
1783 } else if (lnet_peer_needs_push(lp)) {
1785 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
1786 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
1793 spin_unlock(&lp->lp_lock);
1799 * Queue a peer for the attention of the discovery thread. Call with
1800 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
1801 * -EALREADY if the peer was already queued.
1803 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
1807 spin_lock(&lp->lp_lock);
1808 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
1809 lp->lp_state |= LNET_PEER_DISCOVERING;
1810 spin_unlock(&lp->lp_lock);
1811 if (list_empty(&lp->lp_dc_list)) {
1812 lnet_peer_addref_locked(lp);
1813 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
1814 wake_up(&the_lnet.ln_dc_waitq);
1820 CDEBUG(D_NET, "Queue peer %s: %d\n",
1821 libcfs_nid2str(lp->lp_primary_nid), rc);
1827 * Discovery of a peer is complete. Wake all waiters on the peer.
1828 * Call with lnet_net_lock/EX held.
1830 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
1832 struct lnet_msg *msg, *tmp;
1834 struct list_head pending_msgs;
1836 INIT_LIST_HEAD(&pending_msgs);
1838 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
1839 libcfs_nid2str(lp->lp_primary_nid));
1841 list_del_init(&lp->lp_dc_list);
1842 spin_lock(&lp->lp_lock);
1843 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
1844 spin_unlock(&lp->lp_lock);
1845 wake_up_all(&lp->lp_dc_waitq);
1847 if (lp->lp_rtr_refcount > 0)
1848 lnet_router_discovery_complete(lp);
1850 lnet_net_unlock(LNET_LOCK_EX);
1852 /* iterate through all pending messages and send them again */
1853 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
1854 list_del_init(&msg->msg_list);
1855 if (lp->lp_dc_error) {
1856 lnet_finalize(msg, lp->lp_dc_error);
1860 CDEBUG(D_NET, "sending pending message %s to target %s\n",
1861 lnet_msgtyp2str(msg->msg_type),
1862 libcfs_id2str(msg->msg_target));
1863 rc = lnet_send(msg->msg_src_nid_param, msg,
1864 msg->msg_rtr_nid_param);
1866 CNETERR("Error sending %s to %s: %d\n",
1867 lnet_msgtyp2str(msg->msg_type),
1868 libcfs_id2str(msg->msg_target), rc);
1869 lnet_finalize(msg, rc);
1872 lnet_net_lock(LNET_LOCK_EX);
1873 lnet_peer_decref_locked(lp);
1877 * Handle inbound push.
1878 * Like any event handler, called with lnet_res_lock/CPT held.
1880 void lnet_peer_push_event(struct lnet_event *ev)
1882 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1883 struct lnet_peer *lp;
1885 /* lnet_find_peer() adds a refcount */
1886 lp = lnet_find_peer(ev->source.nid);
1888 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
1889 libcfs_nid2str(ev->initiator.nid),
1890 libcfs_nid2str(ev->source.nid));
1894 /* Ensure peer state remains consistent while we modify it. */
1895 spin_lock(&lp->lp_lock);
1898 * If some kind of error happened the contents of the message
1899 * cannot be used. Clear the NIDS_UPTODATE and set the
1900 * FORCE_PING flag to trigger a ping.
1903 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1904 lp->lp_state |= LNET_PEER_FORCE_PING;
1905 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
1907 libcfs_nid2str(lp->lp_primary_nid),
1908 libcfs_nid2str(ev->source.nid));
1913 * A push with invalid or corrupted info. Clear the UPTODATE
1914 * flag to trigger a ping.
1916 if (lnet_ping_info_validate(&pbuf->pb_info)) {
1917 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1918 lp->lp_state |= LNET_PEER_FORCE_PING;
1919 CDEBUG(D_NET, "Corrupted Push from %s\n",
1920 libcfs_nid2str(lp->lp_primary_nid));
1925 * Make sure we'll allocate the correct size ping buffer when
1928 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
1929 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
1932 * A non-Multi-Rail peer is not supposed to be capable of
1935 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
1936 CERROR("Push from non-Multi-Rail peer %s dropped\n",
1937 libcfs_nid2str(lp->lp_primary_nid));
1942 * Check the MULTIRAIL flag. Complain if the peer was DLC
1943 * configured without it.
1945 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1946 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1947 CERROR("Push says %s is Multi-Rail, DLC says not\n",
1948 libcfs_nid2str(lp->lp_primary_nid));
1950 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1951 lnet_peer_clr_non_mr_pref_nids(lp);
1956 * The peer may have discovery disabled at its end. Set
1957 * NO_DISCOVERY as appropriate.
1959 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
1960 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
1961 libcfs_nid2str(lp->lp_primary_nid));
1962 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
1963 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
1964 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
1965 libcfs_nid2str(lp->lp_primary_nid));
1966 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
1970 * Check for truncation of the Put message. Clear the
1971 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
1972 * and tell discovery to allocate a bigger buffer.
1974 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
1975 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
1976 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
1977 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1978 lp->lp_state |= LNET_PEER_FORCE_PING;
1979 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
1980 libcfs_nid2str(lp->lp_primary_nid),
1981 pbuf->pb_info.pi_nnis);
1986 * Check whether the Put data is stale. Stale data can just be
1989 if (pbuf->pb_info.pi_nnis > 1 &&
1990 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid &&
1991 LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno) {
1992 CDEBUG(D_NET, "Stale Push from %s: got %u have %u\n",
1993 libcfs_nid2str(lp->lp_primary_nid),
1994 LNET_PING_BUFFER_SEQNO(pbuf),
2000 * Check whether the Put data is new, in which case we clear
2001 * the UPTODATE flag and prepare to process it.
2003 * If the Put data is current, and the peer is UPTODATE then
2004 * we assome everything is all right and drop the data as
2007 if (LNET_PING_BUFFER_SEQNO(pbuf) > lp->lp_peer_seqno) {
2008 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2009 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2010 } else if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
2011 CDEBUG(D_NET, "Stale Push from %s: got %u have %u\n",
2012 libcfs_nid2str(lp->lp_primary_nid),
2013 LNET_PING_BUFFER_SEQNO(pbuf),
2019 * If there is data present that hasn't been processed yet,
2020 * we'll replace it if the Put contained newer data and it
2021 * fits. We're racing with a Ping or earlier Push in this
2024 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2025 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2026 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2027 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2028 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2029 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2030 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2031 libcfs_nid2str(lp->lp_primary_nid),
2032 LNET_PING_BUFFER_SEQNO(pbuf),
2033 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2039 * Allocate a buffer to copy the data. On a failure we drop
2040 * the Push and set FORCE_PING to force the discovery
2041 * thread to fix the problem by pinging the peer.
2043 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2045 lp->lp_state |= LNET_PEER_FORCE_PING;
2046 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2047 libcfs_nid2str(lp->lp_primary_nid),
2048 LNET_PING_BUFFER_SEQNO(pbuf));
2053 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2054 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2055 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2056 CDEBUG(D_NET, "Received Push %s %u\n",
2057 libcfs_nid2str(lp->lp_primary_nid),
2058 LNET_PING_BUFFER_SEQNO(pbuf));
2062 * Queue the peer for discovery if not done, force it on the request
2063 * queue and wake the discovery thread if the peer was already queued,
2064 * because its status changed.
2066 spin_unlock(&lp->lp_lock);
2067 lnet_net_lock(LNET_LOCK_EX);
2068 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2069 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2070 wake_up(&the_lnet.ln_dc_waitq);
2072 /* Drop refcount from lookup */
2073 lnet_peer_decref_locked(lp);
2074 lnet_net_unlock(LNET_LOCK_EX);
2078 * Clear the discovery error state, unless we're already discovering
2079 * this peer, in which case the error is current.
2081 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2083 spin_lock(&lp->lp_lock);
2084 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2085 lp->lp_dc_error = 0;
2086 spin_unlock(&lp->lp_lock);
2090 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2091 * dropped/retaken within this function. An lnet_peer_ni is passed in
2092 * because discovery could tear down an lnet_peer.
2095 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2098 struct lnet_peer *lp;
2102 lnet_net_unlock(cpt);
2103 lnet_net_lock(LNET_LOCK_EX);
2104 lp = lpni->lpni_peer_net->lpn_peer;
2105 lnet_peer_clear_discovery_error(lp);
2108 * We're willing to be interrupted. The lpni can become a
2109 * zombie if we race with DLC, so we must check for that.
2112 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2113 if (signal_pending(current))
2115 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2117 if (lp->lp_dc_error)
2119 if (lnet_peer_is_uptodate(lp))
2121 lnet_peer_queue_for_discovery(lp);
2123 * if caller requested a non-blocking operation then
2124 * return immediately. Once discovery is complete then the
2125 * peer ref will be decremented and any pending messages
2126 * that were stopped due to discovery will be transmitted.
2131 lnet_peer_addref_locked(lp);
2132 lnet_net_unlock(LNET_LOCK_EX);
2134 finish_wait(&lp->lp_dc_waitq, &wait);
2135 lnet_net_lock(LNET_LOCK_EX);
2136 lnet_peer_decref_locked(lp);
2137 /* Peer may have changed */
2138 lp = lpni->lpni_peer_net->lpn_peer;
2140 finish_wait(&lp->lp_dc_waitq, &wait);
2142 lnet_net_unlock(LNET_LOCK_EX);
2146 * If the peer has changed after we've discovered the older peer,
2147 * then we need to discovery the new peer to make sure the
2148 * interface information is up to date
2150 if (lp != lpni->lpni_peer_net->lpn_peer)
2153 if (signal_pending(current))
2155 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2157 else if (lp->lp_dc_error)
2158 rc = lp->lp_dc_error;
2160 CDEBUG(D_NET, "non-blocking discovery\n");
2161 else if (!lnet_peer_is_uptodate(lp))
2164 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2165 (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2166 libcfs_nid2str(lpni->lpni_nid), rc,
2167 (!block) ? "pending discovery" : "discovery complete");
2172 /* Handle an incoming ack for a push. */
2174 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2176 struct lnet_ping_buffer *pbuf;
2178 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
2179 spin_lock(&lp->lp_lock);
2180 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2181 lp->lp_push_error = ev->status;
2183 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2185 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2186 spin_unlock(&lp->lp_lock);
2188 CDEBUG(D_NET, "peer %s ev->status %d\n",
2189 libcfs_nid2str(lp->lp_primary_nid), ev->status);
2192 /* Handle a Reply message. This is the reply to a Ping message. */
2194 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2196 struct lnet_ping_buffer *pbuf;
2199 spin_lock(&lp->lp_lock);
2202 * If some kind of error happened the contents of message
2203 * cannot be used. Set PING_FAILED to trigger a retry.
2206 lp->lp_state |= LNET_PEER_PING_FAILED;
2207 lp->lp_ping_error = ev->status;
2208 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2210 libcfs_nid2str(lp->lp_primary_nid),
2211 libcfs_nid2str(ev->source.nid));
2215 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
2216 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2217 lnet_swap_pinginfo(pbuf);
2220 * A reply with invalid or corrupted info. Set PING_FAILED to
2223 rc = lnet_ping_info_validate(&pbuf->pb_info);
2225 lp->lp_state |= LNET_PEER_PING_FAILED;
2226 lp->lp_ping_error = 0;
2227 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2228 libcfs_nid2str(lp->lp_primary_nid), rc);
2233 * Update the MULTI_RAIL flag based on the reply. If the peer
2234 * was configured with DLC then the setting should match what
2237 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2238 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2239 /* Everything's fine */
2240 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2241 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2242 libcfs_nid2str(lp->lp_primary_nid));
2244 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2245 lnet_peer_clr_non_mr_pref_nids(lp);
2247 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2248 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2249 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2250 libcfs_nid2str(lp->lp_primary_nid));
2252 CERROR("Multi-Rail state vanished from %s\n",
2253 libcfs_nid2str(lp->lp_primary_nid));
2254 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2259 * Make sure we'll allocate the correct size ping buffer when
2262 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2263 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2266 * The peer may have discovery disabled at its end. Set
2267 * NO_DISCOVERY as appropriate.
2269 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2270 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2271 libcfs_nid2str(lp->lp_primary_nid));
2272 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2273 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2274 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2275 libcfs_nid2str(lp->lp_primary_nid));
2276 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2280 * Check for truncation of the Reply. Clear PING_SENT and set
2281 * PING_FAILED to trigger a retry.
2283 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2284 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2285 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2286 lp->lp_state |= LNET_PEER_PING_FAILED;
2287 lp->lp_ping_error = 0;
2288 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2289 libcfs_nid2str(lp->lp_primary_nid),
2290 pbuf->pb_info.pi_nnis);
2295 * Check the sequence numbers in the reply. These are only
2296 * available if the reply came from a Multi-Rail peer.
2298 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2299 pbuf->pb_info.pi_nnis > 1 &&
2300 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2301 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno) {
2302 CDEBUG(D_NET, "Stale Reply from %s: got %u have %u\n",
2303 libcfs_nid2str(lp->lp_primary_nid),
2304 LNET_PING_BUFFER_SEQNO(pbuf),
2309 if (LNET_PING_BUFFER_SEQNO(pbuf) > lp->lp_peer_seqno)
2310 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2313 /* We're happy with the state of the data in the buffer. */
2314 CDEBUG(D_NET, "peer %s data present %u\n",
2315 libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno);
2316 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2317 lnet_ping_buffer_decref(lp->lp_data);
2319 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2320 lnet_ping_buffer_addref(pbuf);
2323 lp->lp_state &= ~LNET_PEER_PING_SENT;
2324 spin_unlock(&lp->lp_lock);
2328 * Send event handling. Only matters for error cases, where we clean
2329 * up state on the peer and peer_ni that would otherwise be updated in
2330 * the REPLY event handler for a successful Ping, and the ACK event
2331 * handler for a successful Push.
2334 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2341 spin_lock(&lp->lp_lock);
2342 if (ev->msg_type == LNET_MSG_GET) {
2343 lp->lp_state &= ~LNET_PEER_PING_SENT;
2344 lp->lp_state |= LNET_PEER_PING_FAILED;
2345 lp->lp_ping_error = ev->status;
2346 } else { /* ev->msg_type == LNET_MSG_PUT */
2347 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2348 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2349 lp->lp_push_error = ev->status;
2351 spin_unlock(&lp->lp_lock);
2352 rc = LNET_REDISCOVER_PEER;
2354 CDEBUG(D_NET, "%s Send to %s: %d\n",
2355 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2356 libcfs_nid2str(ev->target.nid), rc);
2361 * Unlink event handling. This event is only seen if a call to
2362 * LNetMDUnlink() caused the event to be unlinked. If this call was
2363 * made after the event was set up in LNetGet() or LNetPut() then we
2364 * assume the Ping or Push timed out.
2367 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2369 spin_lock(&lp->lp_lock);
2370 /* We've passed through LNetGet() */
2371 if (lp->lp_state & LNET_PEER_PING_SENT) {
2372 lp->lp_state &= ~LNET_PEER_PING_SENT;
2373 lp->lp_state |= LNET_PEER_PING_FAILED;
2374 lp->lp_ping_error = -ETIMEDOUT;
2375 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2376 libcfs_nid2str(lp->lp_primary_nid));
2378 /* We've passed through LNetPut() */
2379 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2380 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2381 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2382 lp->lp_push_error = -ETIMEDOUT;
2383 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2384 libcfs_nid2str(lp->lp_primary_nid));
2386 spin_unlock(&lp->lp_lock);
2390 * Event handler for the discovery EQ.
2392 * Called with lnet_res_lock(cpt) held. The cpt is the
2393 * lnet_cpt_of_cookie() of the md handle cookie.
2395 static void lnet_discovery_event_handler(struct lnet_event *event)
2397 struct lnet_peer *lp = event->md.user_ptr;
2398 struct lnet_ping_buffer *pbuf;
2401 /* discovery needs to take another look */
2402 rc = LNET_REDISCOVER_PEER;
2404 CDEBUG(D_NET, "Received event: %d\n", event->type);
2406 switch (event->type) {
2407 case LNET_EVENT_ACK:
2408 lnet_discovery_event_ack(lp, event);
2410 case LNET_EVENT_REPLY:
2411 lnet_discovery_event_reply(lp, event);
2413 case LNET_EVENT_SEND:
2414 /* Only send failure triggers a retry. */
2415 rc = lnet_discovery_event_send(lp, event);
2417 case LNET_EVENT_UNLINK:
2418 /* LNetMDUnlink() was called */
2419 lnet_discovery_event_unlink(lp, event);
2422 /* Invalid events. */
2425 lnet_net_lock(LNET_LOCK_EX);
2426 if (event->unlinked) {
2427 pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
2428 lnet_ping_buffer_decref(pbuf);
2429 lnet_peer_decref_locked(lp);
2432 /* put peer back at end of request queue, if discovery not already
2434 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2435 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2436 wake_up(&the_lnet.ln_dc_waitq);
2438 lnet_net_unlock(LNET_LOCK_EX);
2442 * Build a peer from incoming data.
2444 * The NIDs in the incoming data are supposed to be structured as follows:
2447 * - other NIDs in same net
2448 * - NIDs in second net
2449 * - NIDs in third net
2451 * This due to the way the list of NIDs in the data is created.
2453 * Note that this function will mark the peer uptodate unless an
2454 * ENOMEM is encontered. All other errors are due to a conflict
2455 * between the DLC configuration and what discovery sees. We treat DLC
2456 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2457 * peer from becoming stuck in discovery.
2459 static int lnet_peer_merge_data(struct lnet_peer *lp,
2460 struct lnet_ping_buffer *pbuf)
2462 struct lnet_peer_ni *lpni;
2463 lnet_nid_t *curnis = NULL;
2464 struct lnet_ni_status *addnis = NULL;
2465 lnet_nid_t *delnis = NULL;
2475 flags = LNET_PEER_DISCOVERED;
2476 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2477 flags |= LNET_PEER_MULTI_RAIL;
2480 * Cache the routing feature for the peer; whether it is enabled
2481 * for disabled as reported by the remote peer.
2483 spin_lock(&lp->lp_lock);
2484 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2485 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2487 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2488 spin_unlock(&lp->lp_lock);
2490 nnis = MAX(lp->lp_nnis, pbuf->pb_info.pi_nnis);
2491 LIBCFS_ALLOC(curnis, nnis * sizeof(*curnis));
2492 LIBCFS_ALLOC(addnis, nnis * sizeof(*addnis));
2493 LIBCFS_ALLOC(delnis, nnis * sizeof(*delnis));
2494 if (!curnis || !addnis || !delnis) {
2502 /* Construct the list of NIDs present in peer. */
2504 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2505 curnis[ncurnis++] = lpni->lpni_nid;
2508 * Check for NIDs in pbuf not present in curnis[].
2509 * The loop starts at 1 to skip the loopback NID.
2511 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2512 for (j = 0; j < ncurnis; j++)
2513 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2516 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2519 * Check for NIDs in curnis[] not present in pbuf.
2520 * The nested loop starts at 1 to skip the loopback NID.
2522 * But never add the loopback NID to delnis[]: if it is
2523 * present in curnis[] then this peer is for this node.
2525 for (i = 0; i < ncurnis; i++) {
2526 if (LNET_NETTYP(LNET_NIDNET(curnis[i])) == LOLND)
2528 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2529 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2531 * update the information we cache for the
2532 * peer with the latest information we
2535 lpni = lnet_find_peer_ni_locked(curnis[i]);
2537 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2538 lnet_peer_ni_decref_locked(lpni);
2543 if (j == pbuf->pb_info.pi_nnis)
2544 delnis[ndelnis++] = curnis[i];
2547 for (i = 0; i < naddnis; i++) {
2548 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2550 CERROR("Error adding NID %s to peer %s: %d\n",
2551 libcfs_nid2str(addnis[i].ns_nid),
2552 libcfs_nid2str(lp->lp_primary_nid), rc);
2556 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2558 lpni->lpni_ns_status = addnis[i].ns_status;
2559 lnet_peer_ni_decref_locked(lpni);
2563 for (i = 0; i < ndelnis; i++) {
2565 * for routers it's okay to delete the primary_nid because
2566 * the upper layers don't really rely on it. So if we're
2567 * being told that the router changed its primary_nid
2568 * then it's okay to delete it.
2570 if (lp->lp_rtr_refcount > 0)
2571 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2572 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2574 CERROR("Error deleting NID %s from peer %s: %d\n",
2575 libcfs_nid2str(delnis[i]),
2576 libcfs_nid2str(lp->lp_primary_nid), rc);
2582 * Errors other than -ENOMEM are due to peers having been
2583 * configured with DLC. Ignore these because DLC overrides
2588 LIBCFS_FREE(curnis, nnis * sizeof(*curnis));
2589 LIBCFS_FREE(addnis, nnis * sizeof(*addnis));
2590 LIBCFS_FREE(delnis, nnis * sizeof(*delnis));
2591 lnet_ping_buffer_decref(pbuf);
2592 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2595 spin_lock(&lp->lp_lock);
2596 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2597 lp->lp_state |= LNET_PEER_FORCE_PING;
2598 spin_unlock(&lp->lp_lock);
2604 * The data in pbuf says lp is its primary peer, but the data was
2605 * received by a different peer. Try to update lp with the data.
2608 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2610 struct lnet_handle_md mdh;
2612 /* Queue lp for discovery, and force it on the request queue. */
2613 lnet_net_lock(LNET_LOCK_EX);
2614 if (lnet_peer_queue_for_discovery(lp))
2615 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2616 lnet_net_unlock(LNET_LOCK_EX);
2618 LNetInvalidateMDHandle(&mdh);
2621 * Decide whether we can move the peer to the DATA_PRESENT state.
2623 * We replace stale data for a multi-rail peer, repair PING_FAILED
2624 * status, and preempt FORCE_PING.
2626 * If after that we have DATA_PRESENT, we merge it into this peer.
2628 spin_lock(&lp->lp_lock);
2629 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2630 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2631 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2632 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2633 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2634 lnet_ping_buffer_decref(pbuf);
2639 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2640 lnet_ping_buffer_decref(lp->lp_data);
2642 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2644 if (lp->lp_state & LNET_PEER_PING_FAILED) {
2645 mdh = lp->lp_ping_mdh;
2646 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2647 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2648 lp->lp_ping_error = 0;
2650 if (lp->lp_state & LNET_PEER_FORCE_PING)
2651 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2652 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2653 spin_unlock(&lp->lp_lock);
2655 if (!LNetMDHandleIsInvalid(mdh))
2659 return lnet_peer_merge_data(lp, pbuf);
2661 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2666 * Update a peer using the data received.
2668 static int lnet_peer_data_present(struct lnet_peer *lp)
2669 __must_hold(&lp->lp_lock)
2671 struct lnet_ping_buffer *pbuf;
2672 struct lnet_peer_ni *lpni;
2673 lnet_nid_t nid = LNET_NID_ANY;
2679 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2680 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2681 spin_unlock(&lp->lp_lock);
2684 * Modifications of peer structures are done while holding the
2685 * ln_api_mutex. A global lock is required because we may be
2686 * modifying multiple peer structures, and a mutex greatly
2687 * simplifies memory management.
2689 * The actual changes to the data structures must also protect
2690 * against concurrent lookups, for which the lnet_net_lock in
2691 * LNET_LOCK_EX mode is used.
2693 mutex_lock(&the_lnet.ln_api_mutex);
2694 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2700 * If this peer is not on the peer list then it is being torn
2701 * down, and our reference count may be all that is keeping it
2702 * alive. Don't do any work on it.
2704 if (list_empty(&lp->lp_peer_list))
2707 flags = LNET_PEER_DISCOVERED;
2708 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2709 flags |= LNET_PEER_MULTI_RAIL;
2712 * Check whether the primary NID in the message matches the
2713 * primary NID of the peer. If it does, update the peer, if
2714 * it it does not, check whether there is already a peer with
2715 * that primary NID. If no such peer exists, try to update
2716 * the primary NID of the current peer (allowed if it was
2717 * created due to message traffic) and complete the update.
2718 * If the peer did exist, hand off the data to it.
2720 * The peer for the loopback interface is a special case: this
2721 * is the peer for the local node, and we want to set its
2722 * primary NID to the correct value here. Moreover, this peer
2723 * can show up with only the loopback NID in the ping buffer.
2725 if (pbuf->pb_info.pi_nnis <= 1)
2727 nid = pbuf->pb_info.pi_ni[1].ns_nid;
2728 if (LNET_NETTYP(LNET_NIDNET(lp->lp_primary_nid)) == LOLND) {
2729 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2731 rc = lnet_peer_merge_data(lp, pbuf);
2732 } else if (lp->lp_primary_nid == nid) {
2733 rc = lnet_peer_merge_data(lp, pbuf);
2735 lpni = lnet_find_peer_ni_locked(nid);
2737 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2739 CERROR("Primary NID error %s versus %s: %d\n",
2740 libcfs_nid2str(lp->lp_primary_nid),
2741 libcfs_nid2str(nid), rc);
2743 rc = lnet_peer_merge_data(lp, pbuf);
2746 struct lnet_peer *new_lp;
2747 new_lp = lpni->lpni_peer_net->lpn_peer;
2748 rc = lnet_peer_set_primary_data(new_lp, pbuf);
2749 lnet_consolidate_routes_locked(lp, new_lp);
2750 lnet_peer_ni_decref_locked(lpni);
2754 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2755 mutex_unlock(&the_lnet.ln_api_mutex);
2757 spin_lock(&lp->lp_lock);
2758 /* Tell discovery to re-check the peer immediately. */
2760 rc = LNET_REDISCOVER_PEER;
2765 * A ping failed. Clear the PING_FAILED state and set the
2766 * FORCE_PING state, to ensure a retry even if discovery is
2767 * disabled. This avoids being left with incorrect state.
2769 static int lnet_peer_ping_failed(struct lnet_peer *lp)
2770 __must_hold(&lp->lp_lock)
2772 struct lnet_handle_md mdh;
2775 mdh = lp->lp_ping_mdh;
2776 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2777 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2778 lp->lp_state |= LNET_PEER_FORCE_PING;
2779 rc = lp->lp_ping_error;
2780 lp->lp_ping_error = 0;
2781 spin_unlock(&lp->lp_lock);
2783 if (!LNetMDHandleIsInvalid(mdh))
2786 CDEBUG(D_NET, "peer %s:%d\n",
2787 libcfs_nid2str(lp->lp_primary_nid), rc);
2789 spin_lock(&lp->lp_lock);
2790 return rc ? rc : LNET_REDISCOVER_PEER;
2794 * Select NID to send a Ping or Push to.
2796 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
2798 struct lnet_peer_ni *lpni;
2800 /* Look for a direct-connected NID for this peer. */
2802 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2803 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
2808 return lpni->lpni_nid;
2810 /* Look for a routed-connected NID for this peer. */
2812 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2813 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
2818 return lpni->lpni_nid;
2820 return LNET_NID_ANY;
2823 /* Active side of ping. */
2824 static int lnet_peer_send_ping(struct lnet_peer *lp)
2825 __must_hold(&lp->lp_lock)
2832 lp->lp_state |= LNET_PEER_PING_SENT;
2833 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2834 spin_unlock(&lp->lp_lock);
2836 cpt = lnet_net_lock_current();
2837 /* Refcount for MD. */
2838 lnet_peer_addref_locked(lp);
2839 pnid = lnet_peer_select_nid(lp);
2840 lnet_net_unlock(cpt);
2842 nnis = MAX(lp->lp_data_nnis, LNET_INTERFACES_MIN);
2844 rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
2845 the_lnet.ln_dc_eqh, false);
2848 * if LNetMDBind in lnet_send_ping fails we need to decrement the
2849 * refcount on the peer, otherwise LNetMDUnlink will be called
2850 * which will eventually do that.
2854 lnet_peer_decref_locked(lp);
2855 lnet_net_unlock(cpt);
2856 rc = -rc; /* change the rc to negative value */
2858 } else if (rc < 0) {
2862 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2864 spin_lock(&lp->lp_lock);
2868 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2870 * The errors that get us here are considered hard errors and
2871 * cause Discovery to terminate. So we clear PING_SENT, but do
2872 * not set either PING_FAILED or FORCE_PING. In fact we need
2873 * to clear PING_FAILED, because the unlink event handler will
2874 * have set it if we called LNetMDUnlink() above.
2876 spin_lock(&lp->lp_lock);
2877 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
2882 * This function exists because you cannot call LNetMDUnlink() from an
2885 static int lnet_peer_push_failed(struct lnet_peer *lp)
2886 __must_hold(&lp->lp_lock)
2888 struct lnet_handle_md mdh;
2891 mdh = lp->lp_push_mdh;
2892 LNetInvalidateMDHandle(&lp->lp_push_mdh);
2893 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
2894 rc = lp->lp_push_error;
2895 lp->lp_push_error = 0;
2896 spin_unlock(&lp->lp_lock);
2898 if (!LNetMDHandleIsInvalid(mdh))
2901 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2902 spin_lock(&lp->lp_lock);
2903 return rc ? rc : LNET_REDISCOVER_PEER;
2906 /* Active side of push. */
2907 static int lnet_peer_send_push(struct lnet_peer *lp)
2908 __must_hold(&lp->lp_lock)
2910 struct lnet_ping_buffer *pbuf;
2911 struct lnet_process_id id;
2916 /* Don't push to a non-multi-rail peer. */
2917 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
2918 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
2922 lp->lp_state |= LNET_PEER_PUSH_SENT;
2923 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
2924 spin_unlock(&lp->lp_lock);
2926 cpt = lnet_net_lock_current();
2927 pbuf = the_lnet.ln_ping_target;
2928 lnet_ping_buffer_addref(pbuf);
2929 lnet_net_unlock(cpt);
2931 /* Push source MD */
2932 md.start = &pbuf->pb_info;
2933 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
2934 md.threshold = 2; /* Put/Ack */
2937 md.eq_handle = the_lnet.ln_dc_eqh;
2940 rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh);
2942 lnet_ping_buffer_decref(pbuf);
2943 CERROR("Can't bind push source MD: %d\n", rc);
2946 cpt = lnet_net_lock_current();
2947 /* Refcount for MD. */
2948 lnet_peer_addref_locked(lp);
2949 id.pid = LNET_PID_LUSTRE;
2950 id.nid = lnet_peer_select_nid(lp);
2951 lnet_net_unlock(cpt);
2953 if (id.nid == LNET_NID_ANY) {
2958 rc = LNetPut(LNET_NID_ANY, lp->lp_push_mdh,
2959 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
2960 LNET_PROTO_PING_MATCHBITS, 0, 0);
2965 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2967 spin_lock(&lp->lp_lock);
2971 LNetMDUnlink(lp->lp_push_mdh);
2972 LNetInvalidateMDHandle(&lp->lp_push_mdh);
2974 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2976 * The errors that get us here are considered hard errors and
2977 * cause Discovery to terminate. So we clear PUSH_SENT, but do
2978 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
2979 * because the unlink event handler will have set it if we
2980 * called LNetMDUnlink() above.
2982 spin_lock(&lp->lp_lock);
2983 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
2988 * An unrecoverable error was encountered during discovery.
2989 * Set error status in peer and abort discovery.
2991 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
2993 CDEBUG(D_NET, "Discovery error %s: %d\n",
2994 libcfs_nid2str(lp->lp_primary_nid), error);
2996 spin_lock(&lp->lp_lock);
2997 lp->lp_dc_error = error;
2998 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2999 lp->lp_state |= LNET_PEER_REDISCOVER;
3000 spin_unlock(&lp->lp_lock);
3004 * Mark the peer as discovered.
3006 static int lnet_peer_discovered(struct lnet_peer *lp)
3007 __must_hold(&lp->lp_lock)
3009 lp->lp_state |= LNET_PEER_DISCOVERED;
3010 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3011 LNET_PEER_REDISCOVER);
3013 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3019 * Mark the peer as to be rediscovered.
3021 static int lnet_peer_rediscover(struct lnet_peer *lp)
3022 __must_hold(&lp->lp_lock)
3024 lp->lp_state |= LNET_PEER_REDISCOVER;
3025 lp->lp_state &= ~LNET_PEER_DISCOVERING;
3027 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3033 * Discovering this peer is taking too long. Cancel any Ping or Push
3034 * that discovery is waiting on by unlinking the relevant MDs. The
3035 * lnet_discovery_event_handler() will proceed from here and complete
3038 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3040 struct lnet_handle_md ping_mdh;
3041 struct lnet_handle_md push_mdh;
3043 LNetInvalidateMDHandle(&ping_mdh);
3044 LNetInvalidateMDHandle(&push_mdh);
3046 spin_lock(&lp->lp_lock);
3047 if (lp->lp_state & LNET_PEER_PING_SENT) {
3048 ping_mdh = lp->lp_ping_mdh;
3049 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3051 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3052 push_mdh = lp->lp_push_mdh;
3053 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3055 spin_unlock(&lp->lp_lock);
3057 if (!LNetMDHandleIsInvalid(ping_mdh))
3058 LNetMDUnlink(ping_mdh);
3059 if (!LNetMDHandleIsInvalid(push_mdh))
3060 LNetMDUnlink(push_mdh);
3064 * Wait for work to be queued or some other change that must be
3065 * attended to. Returns non-zero if the discovery thread should shut
3068 static int lnet_peer_discovery_wait_for_work(void)
3075 cpt = lnet_net_lock_current();
3077 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3078 TASK_INTERRUPTIBLE);
3079 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3081 if (lnet_push_target_resize_needed())
3083 if (!list_empty(&the_lnet.ln_dc_request))
3085 if (!list_empty(&the_lnet.ln_msg_resend))
3087 lnet_net_unlock(cpt);
3090 * wakeup max every second to check if there are peers that
3091 * have been stuck on the working queue for greater than
3094 schedule_timeout(cfs_time_seconds(1));
3095 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3096 cpt = lnet_net_lock_current();
3098 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3100 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3103 lnet_net_unlock(cpt);
3105 CDEBUG(D_NET, "woken: %d\n", rc);
3111 * Messages that were pending on a destroyed peer will be put on a global
3112 * resend list. The message resend list will be checked by
3113 * the discovery thread when it wakes up, and will resend messages. These
3114 * messages can still be sendable in the case the lpni which was the initial
3115 * cause of the message re-queue was transfered to another peer.
3117 * It is possible that LNet could be shutdown while we're iterating
3118 * through the list. lnet_shudown_lndnets() will attempt to access the
3119 * resend list, but will have to wait until the spinlock is released, by
3120 * which time there shouldn't be any more messages on the resend list.
3121 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3122 * for the messages so they can be released. The other case is that
3123 * lnet_shudown_lndnets() can finalize all the messages before this
3124 * function can visit the resend list, in which case this function will be
3127 static void lnet_resend_msgs(void)
3129 struct lnet_msg *msg, *tmp;
3130 struct list_head resend;
3133 INIT_LIST_HEAD(&resend);
3135 spin_lock(&the_lnet.ln_msg_resend_lock);
3136 list_splice(&the_lnet.ln_msg_resend, &resend);
3137 spin_unlock(&the_lnet.ln_msg_resend_lock);
3139 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3140 list_del_init(&msg->msg_list);
3141 rc = lnet_send(msg->msg_src_nid_param, msg,
3142 msg->msg_rtr_nid_param);
3144 CNETERR("Error sending %s to %s: %d\n",
3145 lnet_msgtyp2str(msg->msg_type),
3146 libcfs_id2str(msg->msg_target), rc);
3147 lnet_finalize(msg, rc);
3152 /* The discovery thread. */
3153 static int lnet_peer_discovery(void *arg)
3155 struct lnet_peer *lp;
3158 CDEBUG(D_NET, "started\n");
3159 cfs_block_allsigs();
3162 if (lnet_peer_discovery_wait_for_work())
3167 if (lnet_push_target_resize_needed())
3168 lnet_push_target_resize();
3170 lnet_net_lock(LNET_LOCK_EX);
3171 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3175 * Process all incoming discovery work requests. When
3176 * discovery must wait on a peer to change state, it
3177 * is added to the tail of the ln_dc_working queue. A
3178 * timestamp keeps track of when the peer was added,
3179 * so we can time out discovery requests that take too
3182 while (!list_empty(&the_lnet.ln_dc_request)) {
3183 lp = list_first_entry(&the_lnet.ln_dc_request,
3184 struct lnet_peer, lp_dc_list);
3185 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3187 * set the time the peer was put on the dc_working
3188 * queue. It shouldn't remain on the queue
3189 * forever, in case the GET message (for ping)
3190 * doesn't get a REPLY or the PUT message (for
3191 * push) doesn't get an ACK.
3193 lp->lp_last_queued = ktime_get_real_seconds();
3194 lnet_net_unlock(LNET_LOCK_EX);
3197 * Select an action depending on the state of
3198 * the peer and whether discovery is disabled.
3199 * The check whether discovery is disabled is
3200 * done after the code that handles processing
3201 * for arrived data, cleanup for failures, and
3202 * forcing a Ping or Push.
3204 spin_lock(&lp->lp_lock);
3205 CDEBUG(D_NET, "peer %s state %#x\n",
3206 libcfs_nid2str(lp->lp_primary_nid),
3208 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3209 rc = lnet_peer_data_present(lp);
3210 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3211 rc = lnet_peer_ping_failed(lp);
3212 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3213 rc = lnet_peer_push_failed(lp);
3214 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3215 rc = lnet_peer_send_ping(lp);
3216 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3217 rc = lnet_peer_send_push(lp);
3218 else if (lnet_peer_discovery_disabled)
3219 rc = lnet_peer_rediscover(lp);
3220 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3221 rc = lnet_peer_send_ping(lp);
3222 else if (lnet_peer_needs_push(lp))
3223 rc = lnet_peer_send_push(lp);
3225 rc = lnet_peer_discovered(lp);
3226 CDEBUG(D_NET, "peer %s state %#x rc %d\n",
3227 libcfs_nid2str(lp->lp_primary_nid),
3229 spin_unlock(&lp->lp_lock);
3231 lnet_net_lock(LNET_LOCK_EX);
3232 if (rc == LNET_REDISCOVER_PEER) {
3233 list_move(&lp->lp_dc_list,
3234 &the_lnet.ln_dc_request);
3236 lnet_peer_discovery_error(lp, rc);
3238 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3239 lnet_peer_discovery_complete(lp);
3240 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3244 lnet_net_unlock(LNET_LOCK_EX);
3247 CDEBUG(D_NET, "stopping\n");
3249 * Clean up before telling lnet_peer_discovery_stop() that
3250 * we're done. Use wake_up() below to somewhat reduce the
3251 * size of the thundering herd if there are multiple threads
3252 * waiting on discovery of a single peer.
3255 /* Queue cleanup 1: stop all pending pings and pushes. */
3256 lnet_net_lock(LNET_LOCK_EX);
3257 while (!list_empty(&the_lnet.ln_dc_working)) {
3258 lp = list_first_entry(&the_lnet.ln_dc_working,
3259 struct lnet_peer, lp_dc_list);
3260 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3261 lnet_net_unlock(LNET_LOCK_EX);
3262 lnet_peer_cancel_discovery(lp);
3263 lnet_net_lock(LNET_LOCK_EX);
3265 lnet_net_unlock(LNET_LOCK_EX);
3267 /* Queue cleanup 2: wait for the expired queue to clear. */
3268 while (!list_empty(&the_lnet.ln_dc_expired))
3269 schedule_timeout(cfs_time_seconds(1));
3271 /* Queue cleanup 3: clear the request queue. */
3272 lnet_net_lock(LNET_LOCK_EX);
3273 while (!list_empty(&the_lnet.ln_dc_request)) {
3274 lp = list_first_entry(&the_lnet.ln_dc_request,
3275 struct lnet_peer, lp_dc_list);
3276 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3277 lnet_peer_discovery_complete(lp);
3279 lnet_net_unlock(LNET_LOCK_EX);
3281 LNetEQFree(the_lnet.ln_dc_eqh);
3282 LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
3284 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3285 wake_up(&the_lnet.ln_dc_waitq);
3287 CDEBUG(D_NET, "stopped\n");
3292 /* ln_api_mutex is held on entry. */
3293 int lnet_peer_discovery_start(void)
3295 struct task_struct *task;
3298 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3301 rc = LNetEQAlloc(0, lnet_discovery_event_handler, &the_lnet.ln_dc_eqh);
3303 CERROR("Can't allocate discovery EQ: %d\n", rc);
3307 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3308 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3311 CERROR("Can't start peer discovery thread: %d\n", rc);
3313 LNetEQFree(the_lnet.ln_dc_eqh);
3314 LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
3316 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3319 CDEBUG(D_NET, "discovery start: %d\n", rc);
3324 /* ln_api_mutex is held on entry. */
3325 void lnet_peer_discovery_stop(void)
3327 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3330 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3331 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3332 wake_up(&the_lnet.ln_dc_waitq);
3334 wait_event(the_lnet.ln_dc_waitq,
3335 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3337 LASSERT(list_empty(&the_lnet.ln_dc_request));
3338 LASSERT(list_empty(&the_lnet.ln_dc_working));
3339 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3341 CDEBUG(D_NET, "discovery stopped\n");
3347 lnet_debug_peer(lnet_nid_t nid)
3349 char *aliveness = "NA";
3350 struct lnet_peer_ni *lp;
3353 cpt = lnet_cpt_of_nid(nid, NULL);
3356 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3358 lnet_net_unlock(cpt);
3359 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3363 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3364 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3366 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3367 libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3368 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3369 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3370 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3372 lnet_peer_ni_decref_locked(lp);
3374 lnet_net_unlock(cpt);
3377 /* Gathering information for userspace. */
3379 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3380 char aliveness[LNET_MAX_STR_LEN],
3381 __u32 *cpt_iter, __u32 *refcount,
3382 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3383 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3384 __u32 *peer_tx_qnob)
3386 struct lnet_peer_table *peer_table;
3387 struct lnet_peer_ni *lp;
3392 /* get the number of CPTs */
3393 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3395 /* if the cpt number to be examined is >= the number of cpts in
3396 * the system then indicate that there are no more cpts to examin
3398 if (*cpt_iter >= lncpt)
3401 /* get the current table */
3402 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3403 /* if the ptable is NULL then there are no more cpts to examine */
3404 if (peer_table == NULL)
3407 lnet_net_lock(*cpt_iter);
3409 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3410 struct list_head *peers = &peer_table->pt_hash[j];
3412 list_for_each_entry(lp, peers, lpni_hashlist) {
3413 if (peer_index-- > 0)
3416 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3417 if (lnet_isrouter(lp) ||
3418 lnet_peer_aliveness_enabled(lp))
3419 snprintf(aliveness, LNET_MAX_STR_LEN,
3420 lnet_is_peer_ni_alive(lp) ? "up" : "down");
3422 *nid = lp->lpni_nid;
3423 *refcount = atomic_read(&lp->lpni_refcount);
3424 *ni_peer_tx_credits =
3425 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3426 *peer_tx_credits = lp->lpni_txcredits;
3427 *peer_rtr_credits = lp->lpni_rtrcredits;
3428 *peer_min_rtr_credits = lp->lpni_mintxcredits;
3429 *peer_tx_qnob = lp->lpni_txqnob;
3435 lnet_net_unlock(*cpt_iter);
3439 return found ? 0 : -ENOENT;
3442 /* ln_api_mutex is held, which keeps the peer list stable */
3443 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3445 struct lnet_ioctl_element_stats *lpni_stats;
3446 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3447 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3448 struct lnet_peer_ni_credit_info *lpni_info;
3449 struct lnet_peer_ni *lpni;
3450 struct lnet_peer *lp;
3455 lp = lnet_find_peer(cfg->prcfg_prim_nid);
3462 size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3463 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3464 size *= lp->lp_nnis;
3465 if (size > cfg->prcfg_size) {
3466 cfg->prcfg_size = size;
3471 cfg->prcfg_prim_nid = lp->lp_primary_nid;
3472 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3473 cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3474 cfg->prcfg_count = lp->lp_nnis;
3475 cfg->prcfg_size = size;
3476 cfg->prcfg_state = lp->lp_state;
3478 /* Allocate helper buffers. */
3480 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3483 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3486 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3487 if (!lpni_msg_stats)
3488 goto out_free_stats;
3489 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3491 goto out_free_msg_stats;
3496 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3497 nid = lpni->lpni_nid;
3498 if (copy_to_user(bulk, &nid, sizeof(nid)))
3499 goto out_free_hstats;
3500 bulk += sizeof(nid);
3502 memset(lpni_info, 0, sizeof(*lpni_info));
3503 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3504 if (lnet_isrouter(lpni) ||
3505 lnet_peer_aliveness_enabled(lpni))
3506 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3507 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3509 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3510 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3511 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3512 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3513 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3514 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3515 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3516 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3517 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3518 goto out_free_hstats;
3519 bulk += sizeof(*lpni_info);
3521 memset(lpni_stats, 0, sizeof(*lpni_stats));
3522 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3523 LNET_STATS_TYPE_SEND);
3524 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3525 LNET_STATS_TYPE_RECV);
3526 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3527 LNET_STATS_TYPE_DROP);
3528 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3529 goto out_free_hstats;
3530 bulk += sizeof(*lpni_stats);
3531 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3532 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3533 goto out_free_hstats;
3534 bulk += sizeof(*lpni_msg_stats);
3535 lpni_hstats->hlpni_network_timeout =
3536 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3537 lpni_hstats->hlpni_remote_dropped =
3538 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3539 lpni_hstats->hlpni_remote_timeout =
3540 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3541 lpni_hstats->hlpni_remote_error =
3542 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3543 lpni_hstats->hlpni_health_value =
3544 atomic_read(&lpni->lpni_healthv);
3545 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3546 goto out_free_hstats;
3547 bulk += sizeof(*lpni_hstats);
3552 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3554 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3556 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3558 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3560 lnet_peer_decref_locked(lp);
3566 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3568 /* the mt could've shutdown and cleaned up the queues */
3569 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3572 if (list_empty(&lpni->lpni_recovery) &&
3573 atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3574 CERROR("lpni %s added to recovery queue. Health = %d\n",
3575 libcfs_nid2str(lpni->lpni_nid),
3576 atomic_read(&lpni->lpni_healthv));
3577 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3578 lnet_peer_ni_addref_locked(lpni);
3582 /* Call with the ln_api_mutex held */
3584 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3586 struct lnet_peer_table *ptable;
3587 struct lnet_peer *lp;
3588 struct lnet_peer_net *lpn;
3589 struct lnet_peer_ni *lpni;
3593 if (the_lnet.ln_state != LNET_STATE_RUNNING)
3597 lnet_net_lock(LNET_LOCK_EX);
3598 lpni = lnet_find_peer_ni_locked(nid);
3600 lnet_net_unlock(LNET_LOCK_EX);
3603 atomic_set(&lpni->lpni_healthv, value);
3604 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3605 lnet_peer_ni_decref_locked(lpni);
3606 lnet_net_unlock(LNET_LOCK_EX);
3610 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3613 * Walk all the peers and reset the healhv for each one to the
3616 lnet_net_lock(LNET_LOCK_EX);
3617 for (cpt = 0; cpt < lncpt; cpt++) {
3618 ptable = the_lnet.ln_peer_tables[cpt];
3619 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
3620 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
3621 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
3623 atomic_set(&lpni->lpni_healthv, value);
3624 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3629 lnet_net_unlock(LNET_LOCK_EX);