4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
35 #define DEBUG_SUBSYSTEM S_LNET
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
41 #include <linux/uaccess.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56 lnet_peer_ni_decref_locked(lpni);
61 lnet_peer_net_added(struct lnet_net *net)
63 struct lnet_peer_ni *lpni, *tmp;
65 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66 lpni_on_remote_peer_ni_list) {
68 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
71 spin_lock(&lpni->lpni_lock);
72 lpni->lpni_txcredits =
73 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75 lpni->lpni_rtrcredits =
76 lnet_peer_buffer_credits(lpni->lpni_net);
77 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78 spin_unlock(&lpni->lpni_lock);
80 lnet_peer_remove_from_remote_list(lpni);
86 lnet_peer_tables_destroy(void)
88 struct lnet_peer_table *ptable;
89 struct list_head *hash;
93 if (!the_lnet.ln_peer_tables)
96 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97 hash = ptable->pt_hash;
98 if (!hash) /* not intialized */
101 LASSERT(list_empty(&ptable->pt_zombie_list));
103 ptable->pt_hash = NULL;
104 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105 LASSERT(list_empty(&hash[j]));
107 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
110 cfs_percpt_free(the_lnet.ln_peer_tables);
111 the_lnet.ln_peer_tables = NULL;
115 lnet_peer_tables_create(void)
117 struct lnet_peer_table *ptable;
118 struct list_head *hash;
122 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124 if (the_lnet.ln_peer_tables == NULL) {
125 CERROR("Failed to allocate cpu-partition peer tables\n");
129 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131 LNET_PEER_HASH_SIZE * sizeof(*hash));
133 CERROR("Failed to create peer hash table\n");
134 lnet_peer_tables_destroy();
138 spin_lock_init(&ptable->pt_zombie_lock);
139 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141 INIT_LIST_HEAD(&ptable->pt_peer_list);
143 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144 INIT_LIST_HEAD(&hash[j]);
145 ptable->pt_hash = hash; /* sign of initialization */
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
154 struct lnet_peer_ni *lpni;
155 struct lnet_net *net;
158 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
164 INIT_LIST_HEAD(&lpni->lpni_txq);
165 INIT_LIST_HEAD(&lpni->lpni_hashlist);
166 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167 INIT_LIST_HEAD(&lpni->lpni_recovery);
168 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171 spin_lock_init(&lpni->lpni_lock);
173 if (lnet_peers_start_down())
174 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
176 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
177 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
178 lpni->lpni_nid = nid;
179 lpni->lpni_cpt = cpt;
180 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
182 net = lnet_get_net_locked(LNET_NIDNET(nid));
183 lpni->lpni_net = net;
185 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
186 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
187 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
188 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
191 * This peer_ni is not on a local network, so we
192 * cannot add the credits here. In case the net is
193 * added later, add the peer_ni to the remote peer ni
194 * list so it can be easily found and revisited.
196 /* FIXME: per-net implementation instead? */
197 atomic_inc(&lpni->lpni_refcount);
198 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
199 &the_lnet.ln_remote_peer_ni_list);
202 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
207 static struct lnet_peer_net *
208 lnet_peer_net_alloc(__u32 net_id)
210 struct lnet_peer_net *lpn;
212 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
217 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
218 lpn->lpn_net_id = net_id;
220 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
226 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
228 struct lnet_peer *lp;
230 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
232 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
233 LASSERT(list_empty(&lpn->lpn_peer_nis));
234 LASSERT(list_empty(&lpn->lpn_peer_nets));
236 lpn->lpn_peer = NULL;
237 LIBCFS_FREE(lpn, sizeof(*lpn));
239 lnet_peer_decref_locked(lp);
242 static struct lnet_peer *
243 lnet_peer_alloc(lnet_nid_t nid)
245 struct lnet_peer *lp;
247 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
251 INIT_LIST_HEAD(&lp->lp_rtrq);
252 INIT_LIST_HEAD(&lp->lp_routes);
253 INIT_LIST_HEAD(&lp->lp_peer_list);
254 INIT_LIST_HEAD(&lp->lp_peer_nets);
255 INIT_LIST_HEAD(&lp->lp_dc_list);
256 INIT_LIST_HEAD(&lp->lp_dc_pendq);
257 INIT_LIST_HEAD(&lp->lp_rtr_list);
258 init_waitqueue_head(&lp->lp_dc_waitq);
259 spin_lock_init(&lp->lp_lock);
260 lp->lp_primary_nid = nid;
263 * all peers created on a router should have health on
264 * if it's not already on.
266 if (the_lnet.ln_routing && !lnet_health_sensitivity)
267 lp->lp_health_sensitivity = 1;
270 * Turn off discovery for loopback peer. If you're creating a peer
271 * for the loopback interface then that was initiated when we
272 * attempted to send a message over the loopback. There is no need
273 * to ever use a different interface when sending messages to
276 if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
277 lp->lp_state = LNET_PEER_NO_DISCOVERY;
278 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
280 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
286 lnet_destroy_peer_locked(struct lnet_peer *lp)
288 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
290 LASSERT(atomic_read(&lp->lp_refcount) == 0);
291 LASSERT(lp->lp_rtr_refcount == 0);
292 LASSERT(list_empty(&lp->lp_peer_nets));
293 LASSERT(list_empty(&lp->lp_peer_list));
294 LASSERT(list_empty(&lp->lp_dc_list));
297 lnet_ping_buffer_decref(lp->lp_data);
300 * if there are messages still on the pending queue, then make
301 * sure to queue them on the ln_msg_resend list so they can be
302 * resent at a later point if the discovery thread is still
304 * If the discovery thread has stopped, then the wakeup will be a
305 * no-op, and it is expected the lnet_shutdown_lndnets() will
306 * eventually be called, which will traverse this list and
307 * finalize the messages on the list.
308 * We can not resend them now because we're holding the cpt lock.
309 * Releasing the lock can cause an inconsistent state
311 spin_lock(&the_lnet.ln_msg_resend_lock);
312 spin_lock(&lp->lp_lock);
313 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
314 spin_unlock(&lp->lp_lock);
315 spin_unlock(&the_lnet.ln_msg_resend_lock);
316 wake_up(&the_lnet.ln_dc_waitq);
318 LIBCFS_FREE(lp, sizeof(*lp));
322 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
323 * that peer_net, detach the peer_net from the peer.
325 * Call with lnet_net_lock/EX held
328 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
330 struct lnet_peer_table *ptable;
331 struct lnet_peer_net *lpn;
332 struct lnet_peer *lp;
335 * Belts and suspenders: gracefully handle teardown of a
336 * partially connected peer_ni.
338 lpn = lpni->lpni_peer_net;
340 list_del_init(&lpni->lpni_peer_nis);
342 * If there are no lpni's left, we detach lpn from
343 * lp_peer_nets, so it cannot be found anymore.
345 if (list_empty(&lpn->lpn_peer_nis))
346 list_del_init(&lpn->lpn_peer_nets);
348 /* Update peer NID count. */
353 * If there are no more peer nets, make the peer unfindable
354 * via the peer_tables.
356 * Otherwise, if the peer is DISCOVERED, tell discovery to
357 * take another look at it. This is a no-op if discovery for
358 * this peer did the detaching.
360 if (list_empty(&lp->lp_peer_nets)) {
361 list_del_init(&lp->lp_peer_list);
362 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
364 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
365 /* Discovery isn't running, nothing to do here. */
366 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
367 lnet_peer_queue_for_discovery(lp);
368 wake_up(&the_lnet.ln_dc_waitq);
370 CDEBUG(D_NET, "peer %s NID %s\n",
371 libcfs_nid2str(lp->lp_primary_nid),
372 libcfs_nid2str(lpni->lpni_nid));
375 /* called with lnet_net_lock LNET_LOCK_EX held */
377 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
379 struct lnet_peer_table *ptable = NULL;
381 /* don't remove a peer_ni if it's also a gateway */
382 if (lnet_isrouter(lpni) && !force) {
383 CERROR("Peer NI %s is a gateway. Can not delete it\n",
384 libcfs_nid2str(lpni->lpni_nid));
388 lnet_peer_remove_from_remote_list(lpni);
390 /* remove peer ni from the hash list. */
391 list_del_init(&lpni->lpni_hashlist);
394 * indicate the peer is being deleted so the monitor thread can
395 * remove it from the recovery queue.
397 spin_lock(&lpni->lpni_lock);
398 lpni->lpni_state |= LNET_PEER_NI_DELETING;
399 spin_unlock(&lpni->lpni_lock);
401 /* decrement the ref count on the peer table */
402 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
403 LASSERT(ptable->pt_number > 0);
407 * The peer_ni can no longer be found with a lookup. But there
408 * can be current users, so keep track of it on the zombie
409 * list until the reference count has gone to zero.
411 * The last reference may be lost in a place where the
412 * lnet_net_lock locks only a single cpt, and that cpt may not
413 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
416 spin_lock(&ptable->pt_zombie_lock);
417 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
418 ptable->pt_zombies++;
419 spin_unlock(&ptable->pt_zombie_lock);
421 /* no need to keep this peer_ni on the hierarchy anymore */
422 lnet_peer_detach_peer_ni_locked(lpni);
424 /* remove hashlist reference on peer_ni */
425 lnet_peer_ni_decref_locked(lpni);
430 void lnet_peer_uninit(void)
432 struct lnet_peer_ni *lpni, *tmp;
434 lnet_net_lock(LNET_LOCK_EX);
436 /* remove all peer_nis from the remote peer and the hash list */
437 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
438 lpni_on_remote_peer_ni_list)
439 lnet_peer_ni_del_locked(lpni, false);
441 lnet_peer_tables_destroy();
443 lnet_net_unlock(LNET_LOCK_EX);
447 lnet_peer_del_locked(struct lnet_peer *peer)
449 struct lnet_peer_ni *lpni = NULL, *lpni2;
452 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
454 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
455 while (lpni != NULL) {
456 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
457 rc = lnet_peer_ni_del_locked(lpni, false);
467 lnet_peer_del(struct lnet_peer *peer)
469 lnet_net_lock(LNET_LOCK_EX);
470 lnet_peer_del_locked(peer);
471 lnet_net_unlock(LNET_LOCK_EX);
477 * Delete a NID from a peer. Call with ln_api_mutex held.
480 * -EPERM: Non-DLC deletion from DLC-configured peer.
481 * -ENOENT: No lnet_peer_ni corresponding to the nid.
482 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
483 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
486 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
488 struct lnet_peer_ni *lpni;
489 lnet_nid_t primary_nid = lp->lp_primary_nid;
491 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
493 if (!(flags & LNET_PEER_CONFIGURED)) {
494 if (lp->lp_state & LNET_PEER_CONFIGURED) {
499 lpni = lnet_find_peer_ni_locked(nid);
504 lnet_peer_ni_decref_locked(lpni);
505 if (lp != lpni->lpni_peer_net->lpn_peer) {
511 * This function only allows deletion of the primary NID if it
514 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
519 lnet_net_lock(LNET_LOCK_EX);
521 if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
522 struct lnet_peer_ni *lpni2;
523 /* assign the next peer_ni to be the primary */
524 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
526 lp->lp_primary_nid = lpni->lpni_nid;
528 rc = lnet_peer_ni_del_locked(lpni, force);
530 lnet_net_unlock(LNET_LOCK_EX);
533 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
534 libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
540 lnet_peer_table_cleanup_locked(struct lnet_net *net,
541 struct lnet_peer_table *ptable)
544 struct lnet_peer_ni *next;
545 struct lnet_peer_ni *lpni;
546 struct lnet_peer *peer;
548 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
549 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
551 if (net != NULL && net != lpni->lpni_net)
554 peer = lpni->lpni_peer_net->lpn_peer;
555 if (peer->lp_primary_nid != lpni->lpni_nid) {
556 lnet_peer_ni_del_locked(lpni, false);
560 * Removing the primary NID implies removing
561 * the entire peer. Advance next beyond any
562 * peer_ni that belongs to the same peer.
564 list_for_each_entry_from(next, &ptable->pt_hash[i],
566 if (next->lpni_peer_net->lpn_peer != peer)
569 lnet_peer_del_locked(peer);
575 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
579 spin_lock(&ptable->pt_zombie_lock);
580 while (ptable->pt_zombies) {
581 spin_unlock(&ptable->pt_zombie_lock);
583 if (is_power_of_2(i)) {
585 "Waiting for %d zombies on peer table\n",
588 set_current_state(TASK_UNINTERRUPTIBLE);
589 schedule_timeout(cfs_time_seconds(1) >> 1);
590 spin_lock(&ptable->pt_zombie_lock);
592 spin_unlock(&ptable->pt_zombie_lock);
596 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
597 struct lnet_peer_table *ptable)
599 struct lnet_peer_ni *lp;
600 struct lnet_peer_ni *tmp;
604 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
605 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
607 if (net != lp->lpni_net)
610 if (!lnet_isrouter(lp))
613 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
615 lnet_net_unlock(LNET_LOCK_EX);
616 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), gw_nid);
617 lnet_net_lock(LNET_LOCK_EX);
623 lnet_peer_tables_cleanup(struct lnet_net *net)
626 struct lnet_peer_table *ptable;
628 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
629 /* If just deleting the peers for a NI, get rid of any routes these
630 * peers are gateways for. */
631 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
632 lnet_net_lock(LNET_LOCK_EX);
633 lnet_peer_table_del_rtrs_locked(net, ptable);
634 lnet_net_unlock(LNET_LOCK_EX);
637 /* Start the cleanup process */
638 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
639 lnet_net_lock(LNET_LOCK_EX);
640 lnet_peer_table_cleanup_locked(net, ptable);
641 lnet_net_unlock(LNET_LOCK_EX);
644 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
645 lnet_peer_ni_finalize_wait(ptable);
648 static struct lnet_peer_ni *
649 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
651 struct list_head *peers;
652 struct lnet_peer_ni *lp;
654 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
656 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
657 list_for_each_entry(lp, peers, lpni_hashlist) {
658 if (lp->lpni_nid == nid) {
659 lnet_peer_ni_addref_locked(lp);
667 struct lnet_peer_ni *
668 lnet_find_peer_ni_locked(lnet_nid_t nid)
670 struct lnet_peer_ni *lpni;
671 struct lnet_peer_table *ptable;
674 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
676 ptable = the_lnet.ln_peer_tables[cpt];
677 lpni = lnet_get_peer_ni_locked(ptable, nid);
682 struct lnet_peer_ni *
683 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
685 struct lnet_peer_net *lpn;
686 struct lnet_peer_ni *lpni;
688 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
692 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
693 if (lpni->lpni_nid == nid)
701 lnet_find_peer(lnet_nid_t nid)
703 struct lnet_peer_ni *lpni;
704 struct lnet_peer *lp = NULL;
707 cpt = lnet_net_lock_current();
708 lpni = lnet_find_peer_ni_locked(nid);
710 lp = lpni->lpni_peer_net->lpn_peer;
711 lnet_peer_addref_locked(lp);
712 lnet_peer_ni_decref_locked(lpni);
714 lnet_net_unlock(cpt);
719 struct lnet_peer_ni *
720 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
721 struct lnet_peer_net *peer_net,
722 struct lnet_peer_ni *prev)
724 struct lnet_peer_ni *lpni;
725 struct lnet_peer_net *net = peer_net;
729 if (list_empty(&peer->lp_peer_nets))
732 net = list_entry(peer->lp_peer_nets.next,
733 struct lnet_peer_net,
736 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
742 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
744 * if you reached the end of the peer ni list and the peer
745 * net is specified then there are no more peer nis in that
752 * we reached the end of this net ni list. move to the
755 if (prev->lpni_peer_net->lpn_peer_nets.next ==
757 /* no more nets and no more NIs. */
760 /* get the next net */
761 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
762 struct lnet_peer_net,
764 /* get the ni on it */
765 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
771 /* there are more nis left */
772 lpni = list_entry(prev->lpni_peer_nis.next,
773 struct lnet_peer_ni, lpni_peer_nis);
778 /* Call with the ln_api_mutex held */
779 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
781 struct lnet_process_id id;
782 struct lnet_peer_table *ptable;
783 struct lnet_peer *lp;
792 if (the_lnet.ln_state != LNET_STATE_RUNNING)
795 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
798 * Count the number of peers, and return E2BIG if the buffer
799 * is too small. We'll also return the desired size.
802 for (cpt = 0; cpt < lncpt; cpt++) {
803 ptable = the_lnet.ln_peer_tables[cpt];
804 count += ptable->pt_peers;
806 size = count * sizeof(*ids);
811 * Walk the peer lists and copy out the primary nids.
812 * This is safe because the peer lists are only modified
813 * while the ln_api_mutex is held. So we don't need to
814 * hold the lnet_net_lock as well, and can therefore
815 * directly call copy_to_user().
818 memset(&id, 0, sizeof(id));
819 id.pid = LNET_PID_LUSTRE;
821 for (cpt = 0; cpt < lncpt; cpt++) {
822 ptable = the_lnet.ln_peer_tables[cpt];
823 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
826 id.nid = lp->lp_primary_nid;
827 if (copy_to_user(&ids[i], &id, sizeof(id)))
840 * Start pushes to peers that need to be updated for a configuration
841 * change on this node.
844 lnet_push_update_to_peers(int force)
846 struct lnet_peer_table *ptable;
847 struct lnet_peer *lp;
851 lnet_net_lock(LNET_LOCK_EX);
852 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
853 for (cpt = 0; cpt < lncpt; cpt++) {
854 ptable = the_lnet.ln_peer_tables[cpt];
855 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
857 spin_lock(&lp->lp_lock);
858 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
859 lp->lp_state |= LNET_PEER_FORCE_PUSH;
860 spin_unlock(&lp->lp_lock);
862 if (lnet_peer_needs_push(lp))
863 lnet_peer_queue_for_discovery(lp);
866 lnet_net_unlock(LNET_LOCK_EX);
867 wake_up(&the_lnet.ln_dc_waitq);
871 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
872 * this is a preferred point-to-point path. Call with lnet_net_lock in
876 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
880 if (lpni->lpni_pref_nnids == 0)
882 if (lpni->lpni_pref_nnids == 1)
883 return lpni->lpni_pref.nid == nid;
884 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
885 if (lpni->lpni_pref.nids[i] == nid)
892 * Set a single ni as preferred, provided no preferred ni is already
893 * defined. Only to be used for non-multi-rail peer_ni.
896 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
900 spin_lock(&lpni->lpni_lock);
901 if (nid == LNET_NID_ANY) {
903 } else if (lpni->lpni_pref_nnids > 0) {
905 } else if (lpni->lpni_pref_nnids == 0) {
906 lpni->lpni_pref.nid = nid;
907 lpni->lpni_pref_nnids = 1;
908 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
910 spin_unlock(&lpni->lpni_lock);
912 CDEBUG(D_NET, "peer %s nid %s: %d\n",
913 libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
918 * Clear the preferred NID from a non-multi-rail peer_ni, provided
919 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
922 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
926 spin_lock(&lpni->lpni_lock);
927 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
928 lpni->lpni_pref_nnids = 0;
929 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
930 } else if (lpni->lpni_pref_nnids == 0) {
935 spin_unlock(&lpni->lpni_lock);
937 CDEBUG(D_NET, "peer %s: %d\n",
938 libcfs_nid2str(lpni->lpni_nid), rc);
943 * Clear the preferred NIDs from a non-multi-rail peer.
946 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
948 struct lnet_peer_ni *lpni = NULL;
950 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
951 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
955 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
957 lnet_nid_t *nids = NULL;
958 lnet_nid_t *oldnids = NULL;
959 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
964 if (nid == LNET_NID_ANY) {
969 if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
974 /* A non-MR node may have only one preferred NI per peer_ni */
975 if (lpni->lpni_pref_nnids > 0) {
976 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
982 if (lpni->lpni_pref_nnids != 0) {
983 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
984 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
989 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
990 if (lpni->lpni_pref.nids[i] == nid) {
991 LIBCFS_FREE(nids, size);
995 nids[i] = lpni->lpni_pref.nids[i];
1000 lnet_net_lock(LNET_LOCK_EX);
1001 spin_lock(&lpni->lpni_lock);
1002 if (lpni->lpni_pref_nnids == 0) {
1003 lpni->lpni_pref.nid = nid;
1005 oldnids = lpni->lpni_pref.nids;
1006 lpni->lpni_pref.nids = nids;
1008 lpni->lpni_pref_nnids++;
1009 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1010 spin_unlock(&lpni->lpni_lock);
1011 lnet_net_unlock(LNET_LOCK_EX);
1014 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1015 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
1018 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1019 spin_lock(&lpni->lpni_lock);
1020 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1021 spin_unlock(&lpni->lpni_lock);
1023 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1024 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1029 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1031 lnet_nid_t *nids = NULL;
1032 lnet_nid_t *oldnids = NULL;
1033 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1038 if (lpni->lpni_pref_nnids == 0) {
1043 if (lpni->lpni_pref_nnids == 1) {
1044 if (lpni->lpni_pref.nid != nid) {
1048 } else if (lpni->lpni_pref_nnids == 2) {
1049 if (lpni->lpni_pref.nids[0] != nid &&
1050 lpni->lpni_pref.nids[1] != nid) {
1055 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1056 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
1061 for (i = 0, j = 0; i < lpni->lpni_pref_nnids; i++) {
1062 if (lpni->lpni_pref.nids[i] != nid)
1064 nids[j++] = lpni->lpni_pref.nids[i];
1066 /* Check if we actually removed a nid. */
1067 if (j == lpni->lpni_pref_nnids) {
1068 LIBCFS_FREE(nids, size);
1074 lnet_net_lock(LNET_LOCK_EX);
1075 spin_lock(&lpni->lpni_lock);
1076 if (lpni->lpni_pref_nnids == 1) {
1077 lpni->lpni_pref.nid = LNET_NID_ANY;
1078 } else if (lpni->lpni_pref_nnids == 2) {
1079 oldnids = lpni->lpni_pref.nids;
1080 if (oldnids[0] == nid)
1081 lpni->lpni_pref.nid = oldnids[1];
1083 lpni->lpni_pref.nid = oldnids[2];
1085 oldnids = lpni->lpni_pref.nids;
1086 lpni->lpni_pref.nids = nids;
1088 lpni->lpni_pref_nnids--;
1089 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1090 spin_unlock(&lpni->lpni_lock);
1091 lnet_net_unlock(LNET_LOCK_EX);
1094 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
1095 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
1098 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1099 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1104 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1106 struct lnet_peer_ni *lpni;
1107 lnet_nid_t primary_nid = nid;
1109 lpni = lnet_find_peer_ni_locked(nid);
1111 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1112 lnet_peer_ni_decref_locked(lpni);
1119 LNetPrimaryNID(lnet_nid_t nid)
1121 struct lnet_peer *lp;
1122 struct lnet_peer_ni *lpni;
1123 lnet_nid_t primary_nid = nid;
1127 cpt = lnet_net_lock_current();
1128 lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1133 lp = lpni->lpni_peer_net->lpn_peer;
1134 while (!lnet_peer_is_uptodate(lp)) {
1135 rc = lnet_discover_peer_locked(lpni, cpt, true);
1138 lp = lpni->lpni_peer_net->lpn_peer;
1140 primary_nid = lp->lp_primary_nid;
1142 lnet_peer_ni_decref_locked(lpni);
1144 lnet_net_unlock(cpt);
1146 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1147 libcfs_nid2str(primary_nid), rc);
1150 EXPORT_SYMBOL(LNetPrimaryNID);
1152 struct lnet_peer_net *
1153 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1155 struct lnet_peer_net *peer_net;
1156 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1157 if (peer_net->lpn_net_id == net_id)
1164 * Attach a peer_ni to a peer_net and peer. This function assumes
1165 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1166 * may be attached to a different peer, in which case it will be
1167 * properly detached first. The whole operation is done atomically.
1169 * Always returns 0. This is the last function called from functions
1170 * that do return an int, so returning 0 here allows the compiler to
1174 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1175 struct lnet_peer_net *lpn,
1176 struct lnet_peer_ni *lpni,
1179 struct lnet_peer_table *ptable;
1181 /* Install the new peer_ni */
1182 lnet_net_lock(LNET_LOCK_EX);
1183 /* Add peer_ni to global peer table hash, if necessary. */
1184 if (list_empty(&lpni->lpni_hashlist)) {
1185 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1187 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1188 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1189 ptable->pt_version++;
1190 ptable->pt_number++;
1191 /* This is the 1st refcount on lpni. */
1192 atomic_inc(&lpni->lpni_refcount);
1195 /* Detach the peer_ni from an existing peer, if necessary. */
1196 if (lpni->lpni_peer_net) {
1197 LASSERT(lpni->lpni_peer_net != lpn);
1198 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1199 lnet_peer_detach_peer_ni_locked(lpni);
1200 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1201 lpni->lpni_peer_net = NULL;
1204 /* Add peer_ni to peer_net */
1205 lpni->lpni_peer_net = lpn;
1206 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1207 lnet_peer_net_addref_locked(lpn);
1209 /* Add peer_net to peer */
1210 if (!lpn->lpn_peer) {
1212 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1213 lnet_peer_addref_locked(lp);
1216 /* Add peer to global peer list, if necessary */
1217 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1218 if (list_empty(&lp->lp_peer_list)) {
1219 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1224 /* Update peer state */
1225 spin_lock(&lp->lp_lock);
1226 if (flags & LNET_PEER_CONFIGURED) {
1227 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1228 lp->lp_state |= LNET_PEER_CONFIGURED;
1230 if (flags & LNET_PEER_MULTI_RAIL) {
1231 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1232 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1233 lnet_peer_clr_non_mr_pref_nids(lp);
1236 spin_unlock(&lp->lp_lock);
1239 lnet_net_unlock(LNET_LOCK_EX);
1241 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1242 libcfs_nid2str(lp->lp_primary_nid),
1243 libcfs_nid2str(lpni->lpni_nid), flags);
1249 * Create a new peer, with nid as its primary nid.
1251 * Call with the lnet_api_mutex held.
1254 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1256 struct lnet_peer *lp;
1257 struct lnet_peer_net *lpn;
1258 struct lnet_peer_ni *lpni;
1261 LASSERT(nid != LNET_NID_ANY);
1264 * No need for the lnet_net_lock here, because the
1265 * lnet_api_mutex is held.
1267 lpni = lnet_find_peer_ni_locked(nid);
1269 /* A peer with this NID already exists. */
1270 lp = lpni->lpni_peer_net->lpn_peer;
1271 lnet_peer_ni_decref_locked(lpni);
1273 * This is an error if the peer was configured and the
1274 * primary NID differs or an attempt is made to change
1275 * the Multi-Rail flag. Otherwise the assumption is
1276 * that an existing peer is being modified.
1278 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1279 if (lp->lp_primary_nid != nid)
1281 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1285 /* Delete and recreate as a configured peer. */
1289 /* Create peer, peer_net, and peer_ni. */
1291 lp = lnet_peer_alloc(nid);
1294 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1297 lpni = lnet_peer_ni_alloc(nid);
1301 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1304 LIBCFS_FREE(lpn, sizeof(*lpn));
1306 LIBCFS_FREE(lp, sizeof(*lp));
1308 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1309 libcfs_nid2str(nid), flags, rc);
1314 * Add a NID to a peer. Call with ln_api_mutex held.
1317 * -EPERM: Non-DLC addition to a DLC-configured peer.
1318 * -EEXIST: The NID was configured by DLC for a different peer.
1319 * -ENOMEM: Out of memory.
1320 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1321 * non-multi-rail peer.
1324 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1326 struct lnet_peer_net *lpn;
1327 struct lnet_peer_ni *lpni;
1331 LASSERT(nid != LNET_NID_ANY);
1333 /* A configured peer can only be updated through configuration. */
1334 if (!(flags & LNET_PEER_CONFIGURED)) {
1335 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1342 * The MULTI_RAIL flag can be set but not cleared, because
1343 * that would leave the peer struct in an invalid state.
1345 if (flags & LNET_PEER_MULTI_RAIL) {
1346 spin_lock(&lp->lp_lock);
1347 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1348 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1349 lnet_peer_clr_non_mr_pref_nids(lp);
1351 spin_unlock(&lp->lp_lock);
1352 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1357 lpni = lnet_find_peer_ni_locked(nid);
1360 * A peer_ni already exists. This is only a problem if
1361 * it is not connected to this peer and was configured
1364 lnet_peer_ni_decref_locked(lpni);
1365 if (lpni->lpni_peer_net->lpn_peer == lp)
1367 if (lnet_peer_ni_is_configured(lpni)) {
1371 /* If this is the primary NID, destroy the peer. */
1372 if (lnet_peer_ni_is_primary(lpni)) {
1373 struct lnet_peer *rtr_lp =
1374 lpni->lpni_peer_net->lpn_peer;
1375 int rtr_refcount = rtr_lp->lp_rtr_refcount;
1377 * if we're trying to delete a router it means
1378 * we're moving this peer NI to a new peer so must
1379 * transfer router properties to the new peer
1381 if (rtr_refcount > 0) {
1382 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1383 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1385 lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1386 lpni = lnet_peer_ni_alloc(nid);
1393 lpni = lnet_peer_ni_alloc(nid);
1401 * Get the peer_net. Check that we're not adding a second
1402 * peer_ni on a peer_net of a non-multi-rail peer.
1404 lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1406 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1411 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1416 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1419 /* If the peer_ni was allocated above its peer_net pointer is NULL */
1420 if (!lpni->lpni_peer_net)
1421 LIBCFS_FREE(lpni, sizeof(*lpni));
1423 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1424 libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1430 * Update the primary NID of a peer, if possible.
1432 * Call with the lnet_api_mutex held.
1435 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1437 lnet_nid_t old = lp->lp_primary_nid;
1440 if (lp->lp_primary_nid == nid)
1442 rc = lnet_peer_add_nid(lp, nid, flags);
1445 lp->lp_primary_nid = nid;
1447 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1448 libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1453 * lpni creation initiated due to traffic either sending or receiving.
1456 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1458 struct lnet_peer *lp;
1459 struct lnet_peer_net *lpn;
1460 struct lnet_peer_ni *lpni;
1462 * Assume peer is Multi-Rail capable and let discovery find out
1465 unsigned flags = LNET_PEER_MULTI_RAIL;
1468 if (nid == LNET_NID_ANY) {
1473 /* lnet_net_lock is not needed here because ln_api_lock is held */
1474 lpni = lnet_find_peer_ni_locked(nid);
1477 * We must have raced with another thread. Since we
1478 * know next to nothing about a peer_ni created by
1479 * traffic, we just assume everything is ok and
1482 lnet_peer_ni_decref_locked(lpni);
1486 /* Create peer, peer_net, and peer_ni. */
1488 lp = lnet_peer_alloc(nid);
1491 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1494 lpni = lnet_peer_ni_alloc(nid);
1497 if (pref != LNET_NID_ANY)
1498 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1500 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1503 LIBCFS_FREE(lpn, sizeof(*lpn));
1505 LIBCFS_FREE(lp, sizeof(*lp));
1507 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1512 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1514 * This API handles the following combinations:
1515 * Create a peer with its primary NI if only the prim_nid is provided
1516 * Add a NID to a peer identified by the prim_nid. The peer identified
1517 * by the prim_nid must already exist.
1518 * The peer being created may be non-MR.
1520 * The caller must hold ln_api_mutex. This prevents the peer from
1521 * being created/modified/deleted by a different thread.
1524 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1526 struct lnet_peer *lp = NULL;
1527 struct lnet_peer_ni *lpni;
1530 /* The prim_nid must always be specified */
1531 if (prim_nid == LNET_NID_ANY)
1534 flags = LNET_PEER_CONFIGURED;
1536 flags |= LNET_PEER_MULTI_RAIL;
1539 * If nid isn't specified, we must create a new peer with
1540 * prim_nid as its primary nid.
1542 if (nid == LNET_NID_ANY)
1543 return lnet_peer_add(prim_nid, flags);
1545 /* Look up the prim_nid, which must exist. */
1546 lpni = lnet_find_peer_ni_locked(prim_nid);
1549 lnet_peer_ni_decref_locked(lpni);
1550 lp = lpni->lpni_peer_net->lpn_peer;
1552 /* Peer must have been configured. */
1553 if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1554 CDEBUG(D_NET, "peer %s was not configured\n",
1555 libcfs_nid2str(prim_nid));
1559 /* Primary NID must match */
1560 if (lp->lp_primary_nid != prim_nid) {
1561 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1562 libcfs_nid2str(prim_nid),
1563 libcfs_nid2str(lp->lp_primary_nid));
1567 /* Multi-Rail flag must match. */
1568 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1569 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1570 libcfs_nid2str(prim_nid));
1574 return lnet_peer_add_nid(lp, nid, flags);
1578 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1580 * This API handles the following combinations:
1581 * Delete a NI from a peer if both prim_nid and nid are provided.
1582 * Delete a peer if only prim_nid is provided.
1583 * Delete a peer if its primary nid is provided.
1585 * The caller must hold ln_api_mutex. This prevents the peer from
1586 * being modified/deleted by a different thread.
1589 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1591 struct lnet_peer *lp;
1592 struct lnet_peer_ni *lpni;
1595 if (prim_nid == LNET_NID_ANY)
1598 lpni = lnet_find_peer_ni_locked(prim_nid);
1601 lnet_peer_ni_decref_locked(lpni);
1602 lp = lpni->lpni_peer_net->lpn_peer;
1604 if (prim_nid != lp->lp_primary_nid) {
1605 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1606 libcfs_nid2str(prim_nid),
1607 libcfs_nid2str(lp->lp_primary_nid));
1611 lnet_net_lock(LNET_LOCK_EX);
1612 if (lp->lp_rtr_refcount > 0) {
1613 lnet_net_unlock(LNET_LOCK_EX);
1614 CERROR("%s is a router. Can not be deleted\n",
1615 libcfs_nid2str(prim_nid));
1618 lnet_net_unlock(LNET_LOCK_EX);
1620 if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1621 return lnet_peer_del(lp);
1623 flags = LNET_PEER_CONFIGURED;
1624 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1625 flags |= LNET_PEER_MULTI_RAIL;
1627 return lnet_peer_del_nid(lp, nid, flags);
1631 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1633 struct lnet_peer_table *ptable;
1634 struct lnet_peer_net *lpn;
1636 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1638 LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1639 LASSERT(list_empty(&lpni->lpni_txq));
1640 LASSERT(lpni->lpni_txqnob == 0);
1641 LASSERT(list_empty(&lpni->lpni_peer_nis));
1642 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1644 lpn = lpni->lpni_peer_net;
1645 lpni->lpni_peer_net = NULL;
1646 lpni->lpni_net = NULL;
1648 /* remove the peer ni from the zombie list */
1649 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1650 spin_lock(&ptable->pt_zombie_lock);
1651 list_del_init(&lpni->lpni_hashlist);
1652 ptable->pt_zombies--;
1653 spin_unlock(&ptable->pt_zombie_lock);
1655 if (lpni->lpni_pref_nnids > 1) {
1656 LIBCFS_FREE(lpni->lpni_pref.nids,
1657 sizeof(*lpni->lpni_pref.nids) * lpni->lpni_pref_nnids);
1659 LIBCFS_FREE(lpni, sizeof(*lpni));
1661 lnet_peer_net_decref_locked(lpn);
1664 struct lnet_peer_ni *
1665 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1667 struct lnet_peer_ni *lpni = NULL;
1670 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1671 return ERR_PTR(-ESHUTDOWN);
1674 * find if a peer_ni already exists.
1675 * If so then just return that.
1677 lpni = lnet_find_peer_ni_locked(nid);
1681 lnet_net_unlock(cpt);
1683 rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1686 goto out_net_relock;
1689 lpni = lnet_find_peer_ni_locked(nid);
1699 * Get a peer_ni for the given nid, create it if necessary. Takes a
1700 * hold on the peer_ni.
1702 struct lnet_peer_ni *
1703 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1705 struct lnet_peer_ni *lpni = NULL;
1708 if (the_lnet.ln_state != LNET_STATE_RUNNING)
1709 return ERR_PTR(-ESHUTDOWN);
1712 * find if a peer_ni already exists.
1713 * If so then just return that.
1715 lpni = lnet_find_peer_ni_locked(nid);
1721 * use the lnet_api_mutex to serialize the creation of the peer_ni
1722 * and the creation/deletion of the local ni/net. When a local ni is
1723 * created, if there exists a set of peer_nis on that network,
1724 * they need to be traversed and updated. When a local NI is
1725 * deleted, which could result in a network being deleted, then
1726 * all peer nis on that network need to be removed as well.
1728 * Creation through traffic should also be serialized with
1729 * creation through DLC.
1731 lnet_net_unlock(cpt);
1732 mutex_lock(&the_lnet.ln_api_mutex);
1734 * Shutdown is only set under the ln_api_lock, so a single
1735 * check here is sufficent.
1737 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1738 lpni = ERR_PTR(-ESHUTDOWN);
1739 goto out_mutex_unlock;
1742 rc = lnet_peer_ni_traffic_add(nid, pref);
1745 goto out_mutex_unlock;
1748 lpni = lnet_find_peer_ni_locked(nid);
1752 mutex_unlock(&the_lnet.ln_api_mutex);
1755 /* Lock has been dropped, check again for shutdown. */
1756 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1758 lnet_peer_ni_decref_locked(lpni);
1759 lpni = ERR_PTR(-ESHUTDOWN);
1766 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1768 if (lnet_peer_discovery_disabled)
1771 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1772 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1783 lnet_is_discovery_disabled(struct lnet_peer *lp)
1787 spin_lock(&lp->lp_lock);
1788 rc = lnet_is_discovery_disabled_locked(lp);
1789 spin_unlock(&lp->lp_lock);
1795 lnet_peer_gw_discovery(struct lnet_peer *lp)
1799 spin_lock(&lp->lp_lock);
1800 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
1802 spin_unlock(&lp->lp_lock);
1808 * Is a peer uptodate from the point of view of discovery?
1810 * If it is currently being processed, obviously not.
1811 * A forced Ping or Push is also handled by the discovery thread.
1813 * Otherwise look at whether the peer needs rediscovering.
1816 lnet_peer_is_uptodate(struct lnet_peer *lp)
1820 spin_lock(&lp->lp_lock);
1821 if (lp->lp_state & (LNET_PEER_DISCOVERING |
1822 LNET_PEER_FORCE_PING |
1823 LNET_PEER_FORCE_PUSH)) {
1825 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
1827 } else if (lnet_peer_needs_push(lp)) {
1829 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
1830 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
1837 spin_unlock(&lp->lp_lock);
1843 * Queue a peer for the attention of the discovery thread. Call with
1844 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
1845 * -EALREADY if the peer was already queued.
1847 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
1851 spin_lock(&lp->lp_lock);
1852 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
1853 lp->lp_state |= LNET_PEER_DISCOVERING;
1854 spin_unlock(&lp->lp_lock);
1855 if (list_empty(&lp->lp_dc_list)) {
1856 lnet_peer_addref_locked(lp);
1857 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
1858 wake_up(&the_lnet.ln_dc_waitq);
1864 CDEBUG(D_NET, "Queue peer %s: %d\n",
1865 libcfs_nid2str(lp->lp_primary_nid), rc);
1871 * Discovery of a peer is complete. Wake all waiters on the peer.
1872 * Call with lnet_net_lock/EX held.
1874 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
1876 struct lnet_msg *msg, *tmp;
1878 struct list_head pending_msgs;
1880 INIT_LIST_HEAD(&pending_msgs);
1882 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
1883 libcfs_nid2str(lp->lp_primary_nid));
1885 list_del_init(&lp->lp_dc_list);
1886 spin_lock(&lp->lp_lock);
1887 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
1888 spin_unlock(&lp->lp_lock);
1889 wake_up_all(&lp->lp_dc_waitq);
1891 if (lp->lp_rtr_refcount > 0)
1892 lnet_router_discovery_complete(lp);
1894 lnet_net_unlock(LNET_LOCK_EX);
1896 /* iterate through all pending messages and send them again */
1897 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
1898 list_del_init(&msg->msg_list);
1899 if (lp->lp_dc_error) {
1900 lnet_finalize(msg, lp->lp_dc_error);
1904 CDEBUG(D_NET, "sending pending message %s to target %s\n",
1905 lnet_msgtyp2str(msg->msg_type),
1906 libcfs_id2str(msg->msg_target));
1907 rc = lnet_send(msg->msg_src_nid_param, msg,
1908 msg->msg_rtr_nid_param);
1910 CNETERR("Error sending %s to %s: %d\n",
1911 lnet_msgtyp2str(msg->msg_type),
1912 libcfs_id2str(msg->msg_target), rc);
1913 lnet_finalize(msg, rc);
1916 lnet_net_lock(LNET_LOCK_EX);
1917 lnet_peer_decref_locked(lp);
1921 * Handle inbound push.
1922 * Like any event handler, called with lnet_res_lock/CPT held.
1924 void lnet_peer_push_event(struct lnet_event *ev)
1926 struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1927 struct lnet_peer *lp;
1929 /* lnet_find_peer() adds a refcount */
1930 lp = lnet_find_peer(ev->source.nid);
1932 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
1933 libcfs_nid2str(ev->initiator.nid),
1934 libcfs_nid2str(ev->source.nid));
1938 /* Ensure peer state remains consistent while we modify it. */
1939 spin_lock(&lp->lp_lock);
1942 * If some kind of error happened the contents of the message
1943 * cannot be used. Clear the NIDS_UPTODATE and set the
1944 * FORCE_PING flag to trigger a ping.
1947 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1948 lp->lp_state |= LNET_PEER_FORCE_PING;
1949 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
1951 libcfs_nid2str(lp->lp_primary_nid),
1952 libcfs_nid2str(ev->source.nid));
1957 * A push with invalid or corrupted info. Clear the UPTODATE
1958 * flag to trigger a ping.
1960 if (lnet_ping_info_validate(&pbuf->pb_info)) {
1961 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1962 lp->lp_state |= LNET_PEER_FORCE_PING;
1963 CDEBUG(D_NET, "Corrupted Push from %s\n",
1964 libcfs_nid2str(lp->lp_primary_nid));
1969 * Make sure we'll allocate the correct size ping buffer when
1972 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
1973 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
1976 * A non-Multi-Rail peer is not supposed to be capable of
1979 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
1980 CERROR("Push from non-Multi-Rail peer %s dropped\n",
1981 libcfs_nid2str(lp->lp_primary_nid));
1986 * Check the MULTIRAIL flag. Complain if the peer was DLC
1987 * configured without it.
1989 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1990 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1991 CERROR("Push says %s is Multi-Rail, DLC says not\n",
1992 libcfs_nid2str(lp->lp_primary_nid));
1994 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1995 lnet_peer_clr_non_mr_pref_nids(lp);
2000 * The peer may have discovery disabled at its end. Set
2001 * NO_DISCOVERY as appropriate.
2003 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2004 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2005 libcfs_nid2str(lp->lp_primary_nid));
2006 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2007 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2008 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2009 libcfs_nid2str(lp->lp_primary_nid));
2010 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2014 * Check for truncation of the Put message. Clear the
2015 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2016 * and tell discovery to allocate a bigger buffer.
2018 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2019 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2020 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2021 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2022 lp->lp_state |= LNET_PEER_FORCE_PING;
2023 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2024 libcfs_nid2str(lp->lp_primary_nid),
2025 pbuf->pb_info.pi_nnis);
2029 /* always assume new data */
2030 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2031 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2034 * If there is data present that hasn't been processed yet,
2035 * we'll replace it if the Put contained newer data and it
2036 * fits. We're racing with a Ping or earlier Push in this
2039 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2040 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2041 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2042 pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2043 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2044 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2045 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2046 libcfs_nid2str(lp->lp_primary_nid),
2047 LNET_PING_BUFFER_SEQNO(pbuf),
2048 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2054 * Allocate a buffer to copy the data. On a failure we drop
2055 * the Push and set FORCE_PING to force the discovery
2056 * thread to fix the problem by pinging the peer.
2058 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2060 lp->lp_state |= LNET_PEER_FORCE_PING;
2061 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2062 libcfs_nid2str(lp->lp_primary_nid),
2063 LNET_PING_BUFFER_SEQNO(pbuf));
2068 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2069 LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2070 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2071 CDEBUG(D_NET, "Received Push %s %u\n",
2072 libcfs_nid2str(lp->lp_primary_nid),
2073 LNET_PING_BUFFER_SEQNO(pbuf));
2077 * Queue the peer for discovery if not done, force it on the request
2078 * queue and wake the discovery thread if the peer was already queued,
2079 * because its status changed.
2081 spin_unlock(&lp->lp_lock);
2082 lnet_net_lock(LNET_LOCK_EX);
2083 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2084 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2085 wake_up(&the_lnet.ln_dc_waitq);
2087 /* Drop refcount from lookup */
2088 lnet_peer_decref_locked(lp);
2089 lnet_net_unlock(LNET_LOCK_EX);
2093 * Clear the discovery error state, unless we're already discovering
2094 * this peer, in which case the error is current.
2096 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2098 spin_lock(&lp->lp_lock);
2099 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2100 lp->lp_dc_error = 0;
2101 spin_unlock(&lp->lp_lock);
2105 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2106 * dropped/retaken within this function. An lnet_peer_ni is passed in
2107 * because discovery could tear down an lnet_peer.
2110 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2113 struct lnet_peer *lp;
2117 lnet_net_unlock(cpt);
2118 lnet_net_lock(LNET_LOCK_EX);
2119 lp = lpni->lpni_peer_net->lpn_peer;
2120 lnet_peer_clear_discovery_error(lp);
2123 * We're willing to be interrupted. The lpni can become a
2124 * zombie if we race with DLC, so we must check for that.
2127 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2128 if (signal_pending(current))
2130 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2132 if (lp->lp_dc_error)
2134 if (lnet_peer_is_uptodate(lp))
2136 lnet_peer_queue_for_discovery(lp);
2138 if (lnet_is_discovery_disabled(lp))
2141 * if caller requested a non-blocking operation then
2142 * return immediately. Once discovery is complete then the
2143 * peer ref will be decremented and any pending messages
2144 * that were stopped due to discovery will be transmitted.
2149 lnet_peer_addref_locked(lp);
2150 lnet_net_unlock(LNET_LOCK_EX);
2152 finish_wait(&lp->lp_dc_waitq, &wait);
2153 lnet_net_lock(LNET_LOCK_EX);
2154 lnet_peer_decref_locked(lp);
2155 /* Peer may have changed */
2156 lp = lpni->lpni_peer_net->lpn_peer;
2158 finish_wait(&lp->lp_dc_waitq, &wait);
2160 lnet_net_unlock(LNET_LOCK_EX);
2164 * If the peer has changed after we've discovered the older peer,
2165 * then we need to discovery the new peer to make sure the
2166 * interface information is up to date
2168 if (lp != lpni->lpni_peer_net->lpn_peer)
2171 if (signal_pending(current))
2173 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2175 else if (lp->lp_dc_error)
2176 rc = lp->lp_dc_error;
2178 CDEBUG(D_NET, "non-blocking discovery\n");
2179 else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2182 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2183 (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2184 libcfs_nid2str(lpni->lpni_nid), rc,
2185 (!block) ? "pending discovery" : "discovery complete");
2190 /* Handle an incoming ack for a push. */
2192 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2194 struct lnet_ping_buffer *pbuf;
2196 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
2197 spin_lock(&lp->lp_lock);
2198 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2199 lp->lp_push_error = ev->status;
2201 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2203 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2204 spin_unlock(&lp->lp_lock);
2206 CDEBUG(D_NET, "peer %s ev->status %d\n",
2207 libcfs_nid2str(lp->lp_primary_nid), ev->status);
2210 /* Handle a Reply message. This is the reply to a Ping message. */
2212 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2214 struct lnet_ping_buffer *pbuf;
2217 spin_lock(&lp->lp_lock);
2220 * If some kind of error happened the contents of message
2221 * cannot be used. Set PING_FAILED to trigger a retry.
2224 lp->lp_state |= LNET_PEER_PING_FAILED;
2225 lp->lp_ping_error = ev->status;
2226 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2228 libcfs_nid2str(lp->lp_primary_nid),
2229 libcfs_nid2str(ev->source.nid));
2233 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
2234 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2235 lnet_swap_pinginfo(pbuf);
2238 * A reply with invalid or corrupted info. Set PING_FAILED to
2241 rc = lnet_ping_info_validate(&pbuf->pb_info);
2243 lp->lp_state |= LNET_PEER_PING_FAILED;
2244 lp->lp_ping_error = 0;
2245 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2246 libcfs_nid2str(lp->lp_primary_nid), rc);
2252 * Only enable the multi-rail feature on the peer if both sides of
2253 * the connection have discovery on
2255 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2256 CDEBUG(D_NET, "Peer %s has Multi-Rail feature enabled\n",
2257 libcfs_nid2str(lp->lp_primary_nid));
2258 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2260 CDEBUG(D_NET, "Peer %s has Multi-Rail feature disabled\n",
2261 libcfs_nid2str(lp->lp_primary_nid));
2262 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2266 * The peer may have discovery disabled at its end. Set
2267 * NO_DISCOVERY as appropriate.
2269 if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2270 !lnet_peer_discovery_disabled) {
2271 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2272 libcfs_nid2str(lp->lp_primary_nid));
2273 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2275 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2276 libcfs_nid2str(lp->lp_primary_nid));
2277 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2281 * Update the MULTI_RAIL flag based on the reply. If the peer
2282 * was configured with DLC then the setting should match what
2285 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2286 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2287 /* Everything's fine */
2288 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2289 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2290 libcfs_nid2str(lp->lp_primary_nid));
2293 * if discovery is disabled then we don't want to
2294 * update the state of the peer. All we'll do is
2295 * update the peer_nis which were reported back in
2299 if (!lnet_is_discovery_disabled_locked(lp)) {
2300 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2301 lnet_peer_clr_non_mr_pref_nids(lp);
2304 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2305 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2306 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2307 libcfs_nid2str(lp->lp_primary_nid));
2309 CERROR("Multi-Rail state vanished from %s\n",
2310 libcfs_nid2str(lp->lp_primary_nid));
2311 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2316 * Make sure we'll allocate the correct size ping buffer when
2319 if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2320 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2323 * Check for truncation of the Reply. Clear PING_SENT and set
2324 * PING_FAILED to trigger a retry.
2326 if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2327 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2328 the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2329 lp->lp_state |= LNET_PEER_PING_FAILED;
2330 lp->lp_ping_error = 0;
2331 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2332 libcfs_nid2str(lp->lp_primary_nid),
2333 pbuf->pb_info.pi_nnis);
2338 * Check the sequence numbers in the reply. These are only
2339 * available if the reply came from a Multi-Rail peer.
2341 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2342 pbuf->pb_info.pi_nnis > 1 &&
2343 lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2344 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2345 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2346 libcfs_nid2str(lp->lp_primary_nid),
2347 LNET_PING_BUFFER_SEQNO(pbuf),
2350 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2353 /* We're happy with the state of the data in the buffer. */
2354 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2355 libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2356 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2357 lnet_ping_buffer_decref(lp->lp_data);
2359 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2360 lnet_ping_buffer_addref(pbuf);
2363 lp->lp_state &= ~LNET_PEER_PING_SENT;
2364 spin_unlock(&lp->lp_lock);
2368 * Send event handling. Only matters for error cases, where we clean
2369 * up state on the peer and peer_ni that would otherwise be updated in
2370 * the REPLY event handler for a successful Ping, and the ACK event
2371 * handler for a successful Push.
2374 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2381 spin_lock(&lp->lp_lock);
2382 if (ev->msg_type == LNET_MSG_GET) {
2383 lp->lp_state &= ~LNET_PEER_PING_SENT;
2384 lp->lp_state |= LNET_PEER_PING_FAILED;
2385 lp->lp_ping_error = ev->status;
2386 } else { /* ev->msg_type == LNET_MSG_PUT */
2387 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2388 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2389 lp->lp_push_error = ev->status;
2391 spin_unlock(&lp->lp_lock);
2392 rc = LNET_REDISCOVER_PEER;
2394 CDEBUG(D_NET, "%s Send to %s: %d\n",
2395 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2396 libcfs_nid2str(ev->target.nid), rc);
2401 * Unlink event handling. This event is only seen if a call to
2402 * LNetMDUnlink() caused the event to be unlinked. If this call was
2403 * made after the event was set up in LNetGet() or LNetPut() then we
2404 * assume the Ping or Push timed out.
2407 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2409 spin_lock(&lp->lp_lock);
2410 /* We've passed through LNetGet() */
2411 if (lp->lp_state & LNET_PEER_PING_SENT) {
2412 lp->lp_state &= ~LNET_PEER_PING_SENT;
2413 lp->lp_state |= LNET_PEER_PING_FAILED;
2414 lp->lp_ping_error = -ETIMEDOUT;
2415 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2416 libcfs_nid2str(lp->lp_primary_nid));
2418 /* We've passed through LNetPut() */
2419 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2420 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2421 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2422 lp->lp_push_error = -ETIMEDOUT;
2423 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2424 libcfs_nid2str(lp->lp_primary_nid));
2426 spin_unlock(&lp->lp_lock);
2430 * Event handler for the discovery EQ.
2432 * Called with lnet_res_lock(cpt) held. The cpt is the
2433 * lnet_cpt_of_cookie() of the md handle cookie.
2435 static void lnet_discovery_event_handler(struct lnet_event *event)
2437 struct lnet_peer *lp = event->md.user_ptr;
2438 struct lnet_ping_buffer *pbuf;
2441 /* discovery needs to take another look */
2442 rc = LNET_REDISCOVER_PEER;
2444 CDEBUG(D_NET, "Received event: %d\n", event->type);
2446 switch (event->type) {
2447 case LNET_EVENT_ACK:
2448 lnet_discovery_event_ack(lp, event);
2450 case LNET_EVENT_REPLY:
2451 lnet_discovery_event_reply(lp, event);
2453 case LNET_EVENT_SEND:
2454 /* Only send failure triggers a retry. */
2455 rc = lnet_discovery_event_send(lp, event);
2457 case LNET_EVENT_UNLINK:
2458 /* LNetMDUnlink() was called */
2459 lnet_discovery_event_unlink(lp, event);
2462 /* Invalid events. */
2465 lnet_net_lock(LNET_LOCK_EX);
2466 if (event->unlinked) {
2467 pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
2468 lnet_ping_buffer_decref(pbuf);
2469 lnet_peer_decref_locked(lp);
2472 /* put peer back at end of request queue, if discovery not already
2474 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2475 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2476 wake_up(&the_lnet.ln_dc_waitq);
2478 lnet_net_unlock(LNET_LOCK_EX);
2482 * Build a peer from incoming data.
2484 * The NIDs in the incoming data are supposed to be structured as follows:
2487 * - other NIDs in same net
2488 * - NIDs in second net
2489 * - NIDs in third net
2491 * This due to the way the list of NIDs in the data is created.
2493 * Note that this function will mark the peer uptodate unless an
2494 * ENOMEM is encontered. All other errors are due to a conflict
2495 * between the DLC configuration and what discovery sees. We treat DLC
2496 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2497 * peer from becoming stuck in discovery.
2499 static int lnet_peer_merge_data(struct lnet_peer *lp,
2500 struct lnet_ping_buffer *pbuf)
2502 struct lnet_peer_ni *lpni;
2503 lnet_nid_t *curnis = NULL;
2504 struct lnet_ni_status *addnis = NULL;
2505 lnet_nid_t *delnis = NULL;
2515 flags = LNET_PEER_DISCOVERED;
2516 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2517 flags |= LNET_PEER_MULTI_RAIL;
2520 * Cache the routing feature for the peer; whether it is enabled
2521 * for disabled as reported by the remote peer.
2523 spin_lock(&lp->lp_lock);
2524 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2525 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2527 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2528 spin_unlock(&lp->lp_lock);
2530 nnis = MAX(lp->lp_nnis, pbuf->pb_info.pi_nnis);
2531 LIBCFS_ALLOC(curnis, nnis * sizeof(*curnis));
2532 LIBCFS_ALLOC(addnis, nnis * sizeof(*addnis));
2533 LIBCFS_ALLOC(delnis, nnis * sizeof(*delnis));
2534 if (!curnis || !addnis || !delnis) {
2542 /* Construct the list of NIDs present in peer. */
2544 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2545 curnis[ncurnis++] = lpni->lpni_nid;
2548 * Check for NIDs in pbuf not present in curnis[].
2549 * The loop starts at 1 to skip the loopback NID.
2551 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2552 for (j = 0; j < ncurnis; j++)
2553 if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2556 addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2559 * Check for NIDs in curnis[] not present in pbuf.
2560 * The nested loop starts at 1 to skip the loopback NID.
2562 * But never add the loopback NID to delnis[]: if it is
2563 * present in curnis[] then this peer is for this node.
2565 for (i = 0; i < ncurnis; i++) {
2566 if (LNET_NETTYP(LNET_NIDNET(curnis[i])) == LOLND)
2568 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2569 if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2571 * update the information we cache for the
2572 * peer with the latest information we
2575 lpni = lnet_find_peer_ni_locked(curnis[i]);
2577 lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2578 lnet_peer_ni_decref_locked(lpni);
2583 if (j == pbuf->pb_info.pi_nnis)
2584 delnis[ndelnis++] = curnis[i];
2588 * If we get here and the discovery is disabled then we don't want
2589 * to add or delete any NIs. We just updated the ones we have some
2590 * information on, and call it a day
2593 if (lnet_is_discovery_disabled(lp))
2596 for (i = 0; i < naddnis; i++) {
2597 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2599 CERROR("Error adding NID %s to peer %s: %d\n",
2600 libcfs_nid2str(addnis[i].ns_nid),
2601 libcfs_nid2str(lp->lp_primary_nid), rc);
2605 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2607 lpni->lpni_ns_status = addnis[i].ns_status;
2608 lnet_peer_ni_decref_locked(lpni);
2612 for (i = 0; i < ndelnis; i++) {
2614 * for routers it's okay to delete the primary_nid because
2615 * the upper layers don't really rely on it. So if we're
2616 * being told that the router changed its primary_nid
2617 * then it's okay to delete it.
2619 if (lp->lp_rtr_refcount > 0)
2620 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2621 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2623 CERROR("Error deleting NID %s from peer %s: %d\n",
2624 libcfs_nid2str(delnis[i]),
2625 libcfs_nid2str(lp->lp_primary_nid), rc);
2631 * Errors other than -ENOMEM are due to peers having been
2632 * configured with DLC. Ignore these because DLC overrides
2637 LIBCFS_FREE(curnis, nnis * sizeof(*curnis));
2638 LIBCFS_FREE(addnis, nnis * sizeof(*addnis));
2639 LIBCFS_FREE(delnis, nnis * sizeof(*delnis));
2640 lnet_ping_buffer_decref(pbuf);
2641 CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2644 spin_lock(&lp->lp_lock);
2645 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2646 lp->lp_state |= LNET_PEER_FORCE_PING;
2647 spin_unlock(&lp->lp_lock);
2653 * The data in pbuf says lp is its primary peer, but the data was
2654 * received by a different peer. Try to update lp with the data.
2657 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2659 struct lnet_handle_md mdh;
2661 /* Queue lp for discovery, and force it on the request queue. */
2662 lnet_net_lock(LNET_LOCK_EX);
2663 if (lnet_peer_queue_for_discovery(lp))
2664 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2665 lnet_net_unlock(LNET_LOCK_EX);
2667 LNetInvalidateMDHandle(&mdh);
2670 * Decide whether we can move the peer to the DATA_PRESENT state.
2672 * We replace stale data for a multi-rail peer, repair PING_FAILED
2673 * status, and preempt FORCE_PING.
2675 * If after that we have DATA_PRESENT, we merge it into this peer.
2677 spin_lock(&lp->lp_lock);
2678 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2679 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2680 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2681 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2682 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2683 lnet_ping_buffer_decref(pbuf);
2688 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2689 lnet_ping_buffer_decref(lp->lp_data);
2691 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2693 if (lp->lp_state & LNET_PEER_PING_FAILED) {
2694 mdh = lp->lp_ping_mdh;
2695 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2696 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2697 lp->lp_ping_error = 0;
2699 if (lp->lp_state & LNET_PEER_FORCE_PING)
2700 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2701 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2702 spin_unlock(&lp->lp_lock);
2704 if (!LNetMDHandleIsInvalid(mdh))
2708 return lnet_peer_merge_data(lp, pbuf);
2710 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2714 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
2718 for (i = 0; i < pinfo->pi_nnis; i++) {
2719 if (pinfo->pi_ni[i].ns_nid == nid)
2727 * Update a peer using the data received.
2729 static int lnet_peer_data_present(struct lnet_peer *lp)
2730 __must_hold(&lp->lp_lock)
2732 struct lnet_ping_buffer *pbuf;
2733 struct lnet_peer_ni *lpni;
2734 lnet_nid_t nid = LNET_NID_ANY;
2740 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2741 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2742 spin_unlock(&lp->lp_lock);
2745 * Modifications of peer structures are done while holding the
2746 * ln_api_mutex. A global lock is required because we may be
2747 * modifying multiple peer structures, and a mutex greatly
2748 * simplifies memory management.
2750 * The actual changes to the data structures must also protect
2751 * against concurrent lookups, for which the lnet_net_lock in
2752 * LNET_LOCK_EX mode is used.
2754 mutex_lock(&the_lnet.ln_api_mutex);
2755 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2761 * If this peer is not on the peer list then it is being torn
2762 * down, and our reference count may be all that is keeping it
2763 * alive. Don't do any work on it.
2765 if (list_empty(&lp->lp_peer_list))
2768 flags = LNET_PEER_DISCOVERED;
2769 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2770 flags |= LNET_PEER_MULTI_RAIL;
2773 * Check whether the primary NID in the message matches the
2774 * primary NID of the peer. If it does, update the peer, if
2775 * it it does not, check whether there is already a peer with
2776 * that primary NID. If no such peer exists, try to update
2777 * the primary NID of the current peer (allowed if it was
2778 * created due to message traffic) and complete the update.
2779 * If the peer did exist, hand off the data to it.
2781 * The peer for the loopback interface is a special case: this
2782 * is the peer for the local node, and we want to set its
2783 * primary NID to the correct value here. Moreover, this peer
2784 * can show up with only the loopback NID in the ping buffer.
2786 if (pbuf->pb_info.pi_nnis <= 1)
2788 nid = pbuf->pb_info.pi_ni[1].ns_nid;
2789 if (LNET_NETTYP(LNET_NIDNET(lp->lp_primary_nid)) == LOLND) {
2790 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2792 rc = lnet_peer_merge_data(lp, pbuf);
2794 * if the primary nid of the peer is present in the ping info returned
2795 * from the peer, but it's not the local primary peer we have
2796 * cached and discovery is disabled, then we don't want to update
2797 * our local peer info, by adding or removing NIDs, we just want
2798 * to update the status of the nids that we currently have
2799 * recorded in that peer.
2801 } else if (lp->lp_primary_nid == nid ||
2802 (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
2803 lnet_is_discovery_disabled(lp))) {
2804 rc = lnet_peer_merge_data(lp, pbuf);
2806 lpni = lnet_find_peer_ni_locked(nid);
2808 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2810 CERROR("Primary NID error %s versus %s: %d\n",
2811 libcfs_nid2str(lp->lp_primary_nid),
2812 libcfs_nid2str(nid), rc);
2814 rc = lnet_peer_merge_data(lp, pbuf);
2817 struct lnet_peer *new_lp;
2818 new_lp = lpni->lpni_peer_net->lpn_peer;
2820 * if lp has discovery/MR enabled that means new_lp
2821 * should have discovery/MR enabled as well, since
2822 * it's the same peer, which we're about to merge
2824 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
2825 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2826 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2827 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
2829 rc = lnet_peer_set_primary_data(new_lp, pbuf);
2830 lnet_consolidate_routes_locked(lp, new_lp);
2831 lnet_peer_ni_decref_locked(lpni);
2835 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
2837 mutex_unlock(&the_lnet.ln_api_mutex);
2839 spin_lock(&lp->lp_lock);
2840 /* Tell discovery to re-check the peer immediately. */
2842 rc = LNET_REDISCOVER_PEER;
2847 * A ping failed. Clear the PING_FAILED state and set the
2848 * FORCE_PING state, to ensure a retry even if discovery is
2849 * disabled. This avoids being left with incorrect state.
2851 static int lnet_peer_ping_failed(struct lnet_peer *lp)
2852 __must_hold(&lp->lp_lock)
2854 struct lnet_handle_md mdh;
2857 mdh = lp->lp_ping_mdh;
2858 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2859 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2860 lp->lp_state |= LNET_PEER_FORCE_PING;
2861 rc = lp->lp_ping_error;
2862 lp->lp_ping_error = 0;
2863 spin_unlock(&lp->lp_lock);
2865 if (!LNetMDHandleIsInvalid(mdh))
2868 CDEBUG(D_NET, "peer %s:%d\n",
2869 libcfs_nid2str(lp->lp_primary_nid), rc);
2871 spin_lock(&lp->lp_lock);
2872 return rc ? rc : LNET_REDISCOVER_PEER;
2876 * Select NID to send a Ping or Push to.
2878 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
2880 struct lnet_peer_ni *lpni;
2882 /* Look for a direct-connected NID for this peer. */
2884 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2885 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
2890 return lpni->lpni_nid;
2892 /* Look for a routed-connected NID for this peer. */
2894 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2895 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
2900 return lpni->lpni_nid;
2902 return LNET_NID_ANY;
2905 /* Active side of ping. */
2906 static int lnet_peer_send_ping(struct lnet_peer *lp)
2907 __must_hold(&lp->lp_lock)
2914 lp->lp_state |= LNET_PEER_PING_SENT;
2915 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2916 spin_unlock(&lp->lp_lock);
2918 cpt = lnet_net_lock_current();
2919 /* Refcount for MD. */
2920 lnet_peer_addref_locked(lp);
2921 pnid = lnet_peer_select_nid(lp);
2922 lnet_net_unlock(cpt);
2924 nnis = MAX(lp->lp_data_nnis, LNET_INTERFACES_MIN);
2926 rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
2927 the_lnet.ln_dc_eqh, false);
2930 * if LNetMDBind in lnet_send_ping fails we need to decrement the
2931 * refcount on the peer, otherwise LNetMDUnlink will be called
2932 * which will eventually do that.
2936 lnet_peer_decref_locked(lp);
2937 lnet_net_unlock(cpt);
2938 rc = -rc; /* change the rc to negative value */
2940 } else if (rc < 0) {
2944 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2946 spin_lock(&lp->lp_lock);
2950 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2952 * The errors that get us here are considered hard errors and
2953 * cause Discovery to terminate. So we clear PING_SENT, but do
2954 * not set either PING_FAILED or FORCE_PING. In fact we need
2955 * to clear PING_FAILED, because the unlink event handler will
2956 * have set it if we called LNetMDUnlink() above.
2958 spin_lock(&lp->lp_lock);
2959 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
2964 * This function exists because you cannot call LNetMDUnlink() from an
2967 static int lnet_peer_push_failed(struct lnet_peer *lp)
2968 __must_hold(&lp->lp_lock)
2970 struct lnet_handle_md mdh;
2973 mdh = lp->lp_push_mdh;
2974 LNetInvalidateMDHandle(&lp->lp_push_mdh);
2975 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
2976 rc = lp->lp_push_error;
2977 lp->lp_push_error = 0;
2978 spin_unlock(&lp->lp_lock);
2980 if (!LNetMDHandleIsInvalid(mdh))
2983 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2984 spin_lock(&lp->lp_lock);
2985 return rc ? rc : LNET_REDISCOVER_PEER;
2988 /* Active side of push. */
2989 static int lnet_peer_send_push(struct lnet_peer *lp)
2990 __must_hold(&lp->lp_lock)
2992 struct lnet_ping_buffer *pbuf;
2993 struct lnet_process_id id;
2998 /* Don't push to a non-multi-rail peer. */
2999 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3000 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3004 lp->lp_state |= LNET_PEER_PUSH_SENT;
3005 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3006 spin_unlock(&lp->lp_lock);
3008 cpt = lnet_net_lock_current();
3009 pbuf = the_lnet.ln_ping_target;
3010 lnet_ping_buffer_addref(pbuf);
3011 lnet_net_unlock(cpt);
3013 /* Push source MD */
3014 md.start = &pbuf->pb_info;
3015 md.length = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3016 md.threshold = 2; /* Put/Ack */
3019 md.eq_handle = the_lnet.ln_dc_eqh;
3022 rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh);
3024 lnet_ping_buffer_decref(pbuf);
3025 CERROR("Can't bind push source MD: %d\n", rc);
3028 cpt = lnet_net_lock_current();
3029 /* Refcount for MD. */
3030 lnet_peer_addref_locked(lp);
3031 id.pid = LNET_PID_LUSTRE;
3032 id.nid = lnet_peer_select_nid(lp);
3033 lnet_net_unlock(cpt);
3035 if (id.nid == LNET_NID_ANY) {
3040 rc = LNetPut(LNET_NID_ANY, lp->lp_push_mdh,
3041 LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3042 LNET_PROTO_PING_MATCHBITS, 0, 0);
3047 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3049 spin_lock(&lp->lp_lock);
3053 LNetMDUnlink(lp->lp_push_mdh);
3054 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3056 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3058 * The errors that get us here are considered hard errors and
3059 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3060 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3061 * because the unlink event handler will have set it if we
3062 * called LNetMDUnlink() above.
3064 spin_lock(&lp->lp_lock);
3065 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3070 * An unrecoverable error was encountered during discovery.
3071 * Set error status in peer and abort discovery.
3073 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3075 CDEBUG(D_NET, "Discovery error %s: %d\n",
3076 libcfs_nid2str(lp->lp_primary_nid), error);
3078 spin_lock(&lp->lp_lock);
3079 lp->lp_dc_error = error;
3080 lp->lp_state &= ~LNET_PEER_DISCOVERING;
3081 lp->lp_state |= LNET_PEER_REDISCOVER;
3082 spin_unlock(&lp->lp_lock);
3086 * Mark the peer as discovered.
3088 static int lnet_peer_discovered(struct lnet_peer *lp)
3089 __must_hold(&lp->lp_lock)
3091 lp->lp_state |= LNET_PEER_DISCOVERED;
3092 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3093 LNET_PEER_REDISCOVER);
3095 CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3102 * Discovering this peer is taking too long. Cancel any Ping or Push
3103 * that discovery is waiting on by unlinking the relevant MDs. The
3104 * lnet_discovery_event_handler() will proceed from here and complete
3107 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3109 struct lnet_handle_md ping_mdh;
3110 struct lnet_handle_md push_mdh;
3112 LNetInvalidateMDHandle(&ping_mdh);
3113 LNetInvalidateMDHandle(&push_mdh);
3115 spin_lock(&lp->lp_lock);
3116 if (lp->lp_state & LNET_PEER_PING_SENT) {
3117 ping_mdh = lp->lp_ping_mdh;
3118 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3120 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3121 push_mdh = lp->lp_push_mdh;
3122 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3124 spin_unlock(&lp->lp_lock);
3126 if (!LNetMDHandleIsInvalid(ping_mdh))
3127 LNetMDUnlink(ping_mdh);
3128 if (!LNetMDHandleIsInvalid(push_mdh))
3129 LNetMDUnlink(push_mdh);
3133 * Wait for work to be queued or some other change that must be
3134 * attended to. Returns non-zero if the discovery thread should shut
3137 static int lnet_peer_discovery_wait_for_work(void)
3144 cpt = lnet_net_lock_current();
3146 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3147 TASK_INTERRUPTIBLE);
3148 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3150 if (lnet_push_target_resize_needed())
3152 if (!list_empty(&the_lnet.ln_dc_request))
3154 if (!list_empty(&the_lnet.ln_msg_resend))
3156 lnet_net_unlock(cpt);
3159 * wakeup max every second to check if there are peers that
3160 * have been stuck on the working queue for greater than
3163 schedule_timeout(cfs_time_seconds(1));
3164 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3165 cpt = lnet_net_lock_current();
3167 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3169 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3172 lnet_net_unlock(cpt);
3174 CDEBUG(D_NET, "woken: %d\n", rc);
3180 * Messages that were pending on a destroyed peer will be put on a global
3181 * resend list. The message resend list will be checked by
3182 * the discovery thread when it wakes up, and will resend messages. These
3183 * messages can still be sendable in the case the lpni which was the initial
3184 * cause of the message re-queue was transfered to another peer.
3186 * It is possible that LNet could be shutdown while we're iterating
3187 * through the list. lnet_shudown_lndnets() will attempt to access the
3188 * resend list, but will have to wait until the spinlock is released, by
3189 * which time there shouldn't be any more messages on the resend list.
3190 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3191 * for the messages so they can be released. The other case is that
3192 * lnet_shudown_lndnets() can finalize all the messages before this
3193 * function can visit the resend list, in which case this function will be
3196 static void lnet_resend_msgs(void)
3198 struct lnet_msg *msg, *tmp;
3199 struct list_head resend;
3202 INIT_LIST_HEAD(&resend);
3204 spin_lock(&the_lnet.ln_msg_resend_lock);
3205 list_splice(&the_lnet.ln_msg_resend, &resend);
3206 spin_unlock(&the_lnet.ln_msg_resend_lock);
3208 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3209 list_del_init(&msg->msg_list);
3210 rc = lnet_send(msg->msg_src_nid_param, msg,
3211 msg->msg_rtr_nid_param);
3213 CNETERR("Error sending %s to %s: %d\n",
3214 lnet_msgtyp2str(msg->msg_type),
3215 libcfs_id2str(msg->msg_target), rc);
3216 lnet_finalize(msg, rc);
3221 /* The discovery thread. */
3222 static int lnet_peer_discovery(void *arg)
3224 struct lnet_peer *lp;
3227 CDEBUG(D_NET, "started\n");
3228 cfs_block_allsigs();
3231 if (lnet_peer_discovery_wait_for_work())
3236 if (lnet_push_target_resize_needed())
3237 lnet_push_target_resize();
3239 lnet_net_lock(LNET_LOCK_EX);
3240 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3244 * Process all incoming discovery work requests. When
3245 * discovery must wait on a peer to change state, it
3246 * is added to the tail of the ln_dc_working queue. A
3247 * timestamp keeps track of when the peer was added,
3248 * so we can time out discovery requests that take too
3251 while (!list_empty(&the_lnet.ln_dc_request)) {
3252 lp = list_first_entry(&the_lnet.ln_dc_request,
3253 struct lnet_peer, lp_dc_list);
3254 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3256 * set the time the peer was put on the dc_working
3257 * queue. It shouldn't remain on the queue
3258 * forever, in case the GET message (for ping)
3259 * doesn't get a REPLY or the PUT message (for
3260 * push) doesn't get an ACK.
3262 lp->lp_last_queued = ktime_get_real_seconds();
3263 lnet_net_unlock(LNET_LOCK_EX);
3266 * Select an action depending on the state of
3267 * the peer and whether discovery is disabled.
3268 * The check whether discovery is disabled is
3269 * done after the code that handles processing
3270 * for arrived data, cleanup for failures, and
3271 * forcing a Ping or Push.
3273 spin_lock(&lp->lp_lock);
3274 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3275 libcfs_nid2str(lp->lp_primary_nid), lp,
3277 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3278 rc = lnet_peer_data_present(lp);
3279 else if (lp->lp_state & LNET_PEER_PING_FAILED)
3280 rc = lnet_peer_ping_failed(lp);
3281 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3282 rc = lnet_peer_push_failed(lp);
3283 else if (lp->lp_state & LNET_PEER_FORCE_PING)
3284 rc = lnet_peer_send_ping(lp);
3285 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3286 rc = lnet_peer_send_push(lp);
3287 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3288 rc = lnet_peer_send_ping(lp);
3289 else if (lnet_peer_needs_push(lp))
3290 rc = lnet_peer_send_push(lp);
3292 rc = lnet_peer_discovered(lp);
3293 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3294 libcfs_nid2str(lp->lp_primary_nid), lp,
3296 spin_unlock(&lp->lp_lock);
3298 lnet_net_lock(LNET_LOCK_EX);
3299 if (rc == LNET_REDISCOVER_PEER) {
3300 list_move(&lp->lp_dc_list,
3301 &the_lnet.ln_dc_request);
3303 lnet_peer_discovery_error(lp, rc);
3305 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3306 lnet_peer_discovery_complete(lp);
3307 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3311 lnet_net_unlock(LNET_LOCK_EX);
3314 CDEBUG(D_NET, "stopping\n");
3316 * Clean up before telling lnet_peer_discovery_stop() that
3317 * we're done. Use wake_up() below to somewhat reduce the
3318 * size of the thundering herd if there are multiple threads
3319 * waiting on discovery of a single peer.
3322 /* Queue cleanup 1: stop all pending pings and pushes. */
3323 lnet_net_lock(LNET_LOCK_EX);
3324 while (!list_empty(&the_lnet.ln_dc_working)) {
3325 lp = list_first_entry(&the_lnet.ln_dc_working,
3326 struct lnet_peer, lp_dc_list);
3327 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3328 lnet_net_unlock(LNET_LOCK_EX);
3329 lnet_peer_cancel_discovery(lp);
3330 lnet_net_lock(LNET_LOCK_EX);
3332 lnet_net_unlock(LNET_LOCK_EX);
3334 /* Queue cleanup 2: wait for the expired queue to clear. */
3335 while (!list_empty(&the_lnet.ln_dc_expired))
3336 schedule_timeout(cfs_time_seconds(1));
3338 /* Queue cleanup 3: clear the request queue. */
3339 lnet_net_lock(LNET_LOCK_EX);
3340 while (!list_empty(&the_lnet.ln_dc_request)) {
3341 lp = list_first_entry(&the_lnet.ln_dc_request,
3342 struct lnet_peer, lp_dc_list);
3343 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3344 lnet_peer_discovery_complete(lp);
3346 lnet_net_unlock(LNET_LOCK_EX);
3348 LNetEQFree(the_lnet.ln_dc_eqh);
3349 LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
3351 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3352 wake_up(&the_lnet.ln_dc_waitq);
3354 CDEBUG(D_NET, "stopped\n");
3359 /* ln_api_mutex is held on entry. */
3360 int lnet_peer_discovery_start(void)
3362 struct task_struct *task;
3365 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3368 rc = LNetEQAlloc(0, lnet_discovery_event_handler, &the_lnet.ln_dc_eqh);
3370 CERROR("Can't allocate discovery EQ: %d\n", rc);
3374 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3375 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3378 CERROR("Can't start peer discovery thread: %d\n", rc);
3380 LNetEQFree(the_lnet.ln_dc_eqh);
3381 LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
3383 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3386 CDEBUG(D_NET, "discovery start: %d\n", rc);
3391 /* ln_api_mutex is held on entry. */
3392 void lnet_peer_discovery_stop(void)
3394 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3397 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3398 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3399 wake_up(&the_lnet.ln_dc_waitq);
3401 wait_event(the_lnet.ln_dc_waitq,
3402 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3404 LASSERT(list_empty(&the_lnet.ln_dc_request));
3405 LASSERT(list_empty(&the_lnet.ln_dc_working));
3406 LASSERT(list_empty(&the_lnet.ln_dc_expired));
3408 CDEBUG(D_NET, "discovery stopped\n");
3414 lnet_debug_peer(lnet_nid_t nid)
3416 char *aliveness = "NA";
3417 struct lnet_peer_ni *lp;
3420 cpt = lnet_cpt_of_nid(nid, NULL);
3423 lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3425 lnet_net_unlock(cpt);
3426 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3430 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3431 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3433 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3434 libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3435 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3436 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3437 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3439 lnet_peer_ni_decref_locked(lp);
3441 lnet_net_unlock(cpt);
3444 /* Gathering information for userspace. */
3446 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3447 char aliveness[LNET_MAX_STR_LEN],
3448 __u32 *cpt_iter, __u32 *refcount,
3449 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3450 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3451 __u32 *peer_tx_qnob)
3453 struct lnet_peer_table *peer_table;
3454 struct lnet_peer_ni *lp;
3459 /* get the number of CPTs */
3460 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3462 /* if the cpt number to be examined is >= the number of cpts in
3463 * the system then indicate that there are no more cpts to examin
3465 if (*cpt_iter >= lncpt)
3468 /* get the current table */
3469 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3470 /* if the ptable is NULL then there are no more cpts to examine */
3471 if (peer_table == NULL)
3474 lnet_net_lock(*cpt_iter);
3476 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3477 struct list_head *peers = &peer_table->pt_hash[j];
3479 list_for_each_entry(lp, peers, lpni_hashlist) {
3480 if (peer_index-- > 0)
3483 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3484 if (lnet_isrouter(lp) ||
3485 lnet_peer_aliveness_enabled(lp))
3486 snprintf(aliveness, LNET_MAX_STR_LEN,
3487 lnet_is_peer_ni_alive(lp) ? "up" : "down");
3489 *nid = lp->lpni_nid;
3490 *refcount = atomic_read(&lp->lpni_refcount);
3491 *ni_peer_tx_credits =
3492 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3493 *peer_tx_credits = lp->lpni_txcredits;
3494 *peer_rtr_credits = lp->lpni_rtrcredits;
3495 *peer_min_rtr_credits = lp->lpni_mintxcredits;
3496 *peer_tx_qnob = lp->lpni_txqnob;
3502 lnet_net_unlock(*cpt_iter);
3506 return found ? 0 : -ENOENT;
3509 /* ln_api_mutex is held, which keeps the peer list stable */
3510 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3512 struct lnet_ioctl_element_stats *lpni_stats;
3513 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3514 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3515 struct lnet_peer_ni_credit_info *lpni_info;
3516 struct lnet_peer_ni *lpni;
3517 struct lnet_peer *lp;
3522 lp = lnet_find_peer(cfg->prcfg_prim_nid);
3529 size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3530 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3531 size *= lp->lp_nnis;
3532 if (size > cfg->prcfg_size) {
3533 cfg->prcfg_size = size;
3538 cfg->prcfg_prim_nid = lp->lp_primary_nid;
3539 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3540 cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3541 cfg->prcfg_count = lp->lp_nnis;
3542 cfg->prcfg_size = size;
3543 cfg->prcfg_state = lp->lp_state;
3545 /* Allocate helper buffers. */
3547 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3550 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3553 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3554 if (!lpni_msg_stats)
3555 goto out_free_stats;
3556 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3558 goto out_free_msg_stats;
3563 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3564 nid = lpni->lpni_nid;
3565 if (copy_to_user(bulk, &nid, sizeof(nid)))
3566 goto out_free_hstats;
3567 bulk += sizeof(nid);
3569 memset(lpni_info, 0, sizeof(*lpni_info));
3570 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3571 if (lnet_isrouter(lpni) ||
3572 lnet_peer_aliveness_enabled(lpni))
3573 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3574 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3576 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3577 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3578 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3579 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3580 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3581 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3582 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3583 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3584 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3585 goto out_free_hstats;
3586 bulk += sizeof(*lpni_info);
3588 memset(lpni_stats, 0, sizeof(*lpni_stats));
3589 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3590 LNET_STATS_TYPE_SEND);
3591 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3592 LNET_STATS_TYPE_RECV);
3593 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3594 LNET_STATS_TYPE_DROP);
3595 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3596 goto out_free_hstats;
3597 bulk += sizeof(*lpni_stats);
3598 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3599 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3600 goto out_free_hstats;
3601 bulk += sizeof(*lpni_msg_stats);
3602 lpni_hstats->hlpni_network_timeout =
3603 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3604 lpni_hstats->hlpni_remote_dropped =
3605 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3606 lpni_hstats->hlpni_remote_timeout =
3607 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3608 lpni_hstats->hlpni_remote_error =
3609 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3610 lpni_hstats->hlpni_health_value =
3611 atomic_read(&lpni->lpni_healthv);
3612 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3613 goto out_free_hstats;
3614 bulk += sizeof(*lpni_hstats);
3619 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3621 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3623 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3625 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3627 lnet_peer_decref_locked(lp);
3633 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3635 /* the mt could've shutdown and cleaned up the queues */
3636 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3639 if (list_empty(&lpni->lpni_recovery) &&
3640 atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3641 CERROR("lpni %s added to recovery queue. Health = %d\n",
3642 libcfs_nid2str(lpni->lpni_nid),
3643 atomic_read(&lpni->lpni_healthv));
3644 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3645 lnet_peer_ni_addref_locked(lpni);
3649 /* Call with the ln_api_mutex held */
3651 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3653 struct lnet_peer_table *ptable;
3654 struct lnet_peer *lp;
3655 struct lnet_peer_net *lpn;
3656 struct lnet_peer_ni *lpni;
3660 if (the_lnet.ln_state != LNET_STATE_RUNNING)
3664 lnet_net_lock(LNET_LOCK_EX);
3665 lpni = lnet_find_peer_ni_locked(nid);
3667 lnet_net_unlock(LNET_LOCK_EX);
3670 atomic_set(&lpni->lpni_healthv, value);
3671 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3672 lnet_peer_ni_decref_locked(lpni);
3673 lnet_net_unlock(LNET_LOCK_EX);
3677 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3680 * Walk all the peers and reset the healhv for each one to the
3683 lnet_net_lock(LNET_LOCK_EX);
3684 for (cpt = 0; cpt < lncpt; cpt++) {
3685 ptable = the_lnet.ln_peer_tables[cpt];
3686 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
3687 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
3688 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
3690 atomic_set(&lpni->lpni_healthv, value);
3691 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3696 lnet_net_unlock(LNET_LOCK_EX);