4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
34 #define DEBUG_SUBSYSTEM S_LNET
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
40 #include <linux/uaccess.h>
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER (1)
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
54 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
56 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
57 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
58 lnet_peer_ni_decref_locked(lpni);
63 lnet_peer_net_added(struct lnet_net *net)
65 struct lnet_peer_ni *lpni, *tmp;
67 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
68 lpni_on_remote_peer_ni_list) {
70 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
73 spin_lock(&lpni->lpni_lock);
74 lpni->lpni_txcredits =
75 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
76 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
77 lpni->lpni_rtrcredits =
78 lnet_peer_buffer_credits(lpni->lpni_net);
79 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
80 spin_unlock(&lpni->lpni_lock);
82 lnet_peer_remove_from_remote_list(lpni);
88 lnet_peer_tables_destroy(void)
90 struct lnet_peer_table *ptable;
91 struct list_head *hash;
95 if (!the_lnet.ln_peer_tables)
98 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
99 hash = ptable->pt_hash;
100 if (!hash) /* not intialized */
103 LASSERT(list_empty(&ptable->pt_zombie_list));
105 ptable->pt_hash = NULL;
106 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
107 LASSERT(list_empty(&hash[j]));
109 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
112 cfs_percpt_free(the_lnet.ln_peer_tables);
113 the_lnet.ln_peer_tables = NULL;
117 lnet_peer_tables_create(void)
119 struct lnet_peer_table *ptable;
120 struct list_head *hash;
124 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
126 if (the_lnet.ln_peer_tables == NULL) {
127 CERROR("Failed to allocate cpu-partition peer tables\n");
131 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
132 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
133 LNET_PEER_HASH_SIZE * sizeof(*hash));
135 CERROR("Failed to create peer hash table\n");
136 lnet_peer_tables_destroy();
140 spin_lock_init(&ptable->pt_zombie_lock);
141 INIT_LIST_HEAD(&ptable->pt_zombie_list);
143 INIT_LIST_HEAD(&ptable->pt_peer_list);
145 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
146 INIT_LIST_HEAD(&hash[j]);
147 ptable->pt_hash = hash; /* sign of initialization */
153 static struct lnet_peer_ni *
154 lnet_peer_ni_alloc(struct lnet_nid *nid)
156 struct lnet_peer_ni *lpni;
157 struct lnet_net *net;
160 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
162 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
166 INIT_LIST_HEAD(&lpni->lpni_txq);
167 INIT_LIST_HEAD(&lpni->lpni_hashlist);
168 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
169 INIT_LIST_HEAD(&lpni->lpni_recovery);
170 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
171 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
172 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
173 kref_init(&lpni->lpni_kref);
174 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
176 spin_lock_init(&lpni->lpni_lock);
178 if (lnet_peers_start_down())
179 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
181 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
182 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
183 lpni->lpni_nid = *nid;
184 lpni->lpni_cpt = cpt;
185 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
187 net = lnet_get_net_locked(LNET_NID_NET(nid));
188 lpni->lpni_net = net;
190 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
191 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
192 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
193 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
196 * This peer_ni is not on a local network, so we
197 * cannot add the credits here. In case the net is
198 * added later, add the peer_ni to the remote peer ni
199 * list so it can be easily found and revisited.
201 /* FIXME: per-net implementation instead? */
202 lnet_peer_ni_addref_locked(lpni);
203 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
204 &the_lnet.ln_remote_peer_ni_list);
207 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
212 static struct lnet_peer_net *
213 lnet_peer_net_alloc(__u32 net_id)
215 struct lnet_peer_net *lpn;
217 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
221 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
222 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
223 lpn->lpn_net_id = net_id;
224 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
226 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
232 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
234 struct lnet_peer *lp;
236 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
238 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
239 LASSERT(list_empty(&lpn->lpn_peer_nis));
240 LASSERT(list_empty(&lpn->lpn_peer_nets));
242 lpn->lpn_peer = NULL;
243 LIBCFS_FREE(lpn, sizeof(*lpn));
245 lnet_peer_decref_locked(lp);
248 static struct lnet_peer *
249 lnet_peer_alloc(struct lnet_nid *nid)
251 struct lnet_peer *lp;
253 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
257 INIT_LIST_HEAD(&lp->lp_rtrq);
258 INIT_LIST_HEAD(&lp->lp_routes);
259 INIT_LIST_HEAD(&lp->lp_peer_list);
260 INIT_LIST_HEAD(&lp->lp_peer_nets);
261 INIT_LIST_HEAD(&lp->lp_dc_list);
262 INIT_LIST_HEAD(&lp->lp_dc_pendq);
263 INIT_LIST_HEAD(&lp->lp_rtr_list);
264 init_waitqueue_head(&lp->lp_dc_waitq);
265 spin_lock_init(&lp->lp_lock);
266 lp->lp_primary_nid = *nid;
267 lp->lp_disc_src_nid = LNET_ANY_NID;
268 lp->lp_disc_dst_nid = LNET_ANY_NID;
269 if (lnet_peers_start_down())
270 lp->lp_alive = false;
275 * all peers created on a router should have health on
276 * if it's not already on.
278 if (the_lnet.ln_routing && !lnet_health_sensitivity)
279 lp->lp_health_sensitivity = 1;
282 * Turn off discovery for loopback peer. If you're creating a peer
283 * for the loopback interface then that was initiated when we
284 * attempted to send a message over the loopback. There is no need
285 * to ever use a different interface when sending messages to
289 lp->lp_state = LNET_PEER_NO_DISCOVERY;
290 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
292 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
298 lnet_destroy_peer_locked(struct lnet_peer *lp)
300 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
302 LASSERT(atomic_read(&lp->lp_refcount) == 0);
303 LASSERT(lp->lp_rtr_refcount == 0);
304 LASSERT(list_empty(&lp->lp_peer_nets));
305 LASSERT(list_empty(&lp->lp_peer_list));
306 LASSERT(list_empty(&lp->lp_dc_list));
309 lnet_ping_buffer_decref(lp->lp_data);
312 * if there are messages still on the pending queue, then make
313 * sure to queue them on the ln_msg_resend list so they can be
314 * resent at a later point if the discovery thread is still
316 * If the discovery thread has stopped, then the wakeup will be a
317 * no-op, and it is expected the lnet_shutdown_lndnets() will
318 * eventually be called, which will traverse this list and
319 * finalize the messages on the list.
320 * We can not resend them now because we're holding the cpt lock.
321 * Releasing the lock can cause an inconsistent state
323 spin_lock(&the_lnet.ln_msg_resend_lock);
324 spin_lock(&lp->lp_lock);
325 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
326 spin_unlock(&lp->lp_lock);
327 spin_unlock(&the_lnet.ln_msg_resend_lock);
328 wake_up(&the_lnet.ln_dc_waitq);
330 LIBCFS_FREE(lp, sizeof(*lp));
334 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
335 * that peer_net, detach the peer_net from the peer.
337 * Call with lnet_net_lock/EX held
340 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
342 struct lnet_peer_table *ptable;
343 struct lnet_peer_net *lpn;
344 struct lnet_peer *lp;
347 * Belts and suspenders: gracefully handle teardown of a
348 * partially connected peer_ni.
350 lpn = lpni->lpni_peer_net;
352 list_del_init(&lpni->lpni_peer_nis);
354 * If there are no lpni's left, we detach lpn from
355 * lp_peer_nets, so it cannot be found anymore.
357 if (list_empty(&lpn->lpn_peer_nis))
358 list_del_init(&lpn->lpn_peer_nets);
360 /* Update peer NID count. */
365 * If there are no more peer nets, make the peer unfindable
366 * via the peer_tables.
368 * Otherwise, if the peer is DISCOVERED, tell discovery to
369 * take another look at it. This is a no-op if discovery for
370 * this peer did the detaching.
372 if (list_empty(&lp->lp_peer_nets)) {
373 list_del_init(&lp->lp_peer_list);
374 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
376 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
377 /* Discovery isn't running, nothing to do here. */
378 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
379 lnet_peer_queue_for_discovery(lp);
380 wake_up(&the_lnet.ln_dc_waitq);
382 CDEBUG(D_NET, "peer %s NID %s\n",
383 libcfs_nidstr(&lp->lp_primary_nid),
384 libcfs_nidstr(&lpni->lpni_nid));
387 /* called with lnet_net_lock LNET_LOCK_EX held */
389 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
391 struct lnet_peer_table *ptable = NULL;
393 /* don't remove a peer_ni if it's also a gateway */
394 if (lnet_isrouter(lpni) && !force) {
395 CERROR("Peer NI %s is a gateway. Can not delete it\n",
396 libcfs_nidstr(&lpni->lpni_nid));
400 lnet_peer_remove_from_remote_list(lpni);
402 /* remove peer ni from the hash list. */
403 list_del_init(&lpni->lpni_hashlist);
406 * indicate the peer is being deleted so the monitor thread can
407 * remove it from the recovery queue.
409 spin_lock(&lpni->lpni_lock);
410 lpni->lpni_state |= LNET_PEER_NI_DELETING;
411 spin_unlock(&lpni->lpni_lock);
413 /* decrement the ref count on the peer table */
414 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
417 * The peer_ni can no longer be found with a lookup. But there
418 * can be current users, so keep track of it on the zombie
419 * list until the reference count has gone to zero.
421 * The last reference may be lost in a place where the
422 * lnet_net_lock locks only a single cpt, and that cpt may not
423 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
426 spin_lock(&ptable->pt_zombie_lock);
427 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
428 ptable->pt_zombies++;
429 spin_unlock(&ptable->pt_zombie_lock);
431 /* no need to keep this peer_ni on the hierarchy anymore */
432 lnet_peer_detach_peer_ni_locked(lpni);
434 /* remove hashlist reference on peer_ni */
435 lnet_peer_ni_decref_locked(lpni);
440 void lnet_peer_uninit(void)
442 struct lnet_peer_ni *lpni, *tmp;
444 lnet_net_lock(LNET_LOCK_EX);
446 /* remove all peer_nis from the remote peer and the hash list */
447 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
448 lpni_on_remote_peer_ni_list)
449 lnet_peer_ni_del_locked(lpni, false);
451 lnet_peer_tables_destroy();
453 lnet_net_unlock(LNET_LOCK_EX);
457 lnet_peer_del_locked(struct lnet_peer *peer)
459 struct lnet_peer_ni *lpni = NULL, *lpni2;
462 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
464 spin_lock(&peer->lp_lock);
465 peer->lp_state |= LNET_PEER_MARK_DELETED;
466 spin_unlock(&peer->lp_lock);
468 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469 while (lpni != NULL) {
470 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
471 rc = lnet_peer_ni_del_locked(lpni, false);
481 * Discovering this peer is taking too long. Cancel any Ping or Push
482 * that discovery is waiting on by unlinking the relevant MDs. The
483 * lnet_discovery_event_handler() will proceed from here and complete
486 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
488 struct lnet_handle_md ping_mdh;
489 struct lnet_handle_md push_mdh;
491 LNetInvalidateMDHandle(&ping_mdh);
492 LNetInvalidateMDHandle(&push_mdh);
494 spin_lock(&lp->lp_lock);
495 if (lp->lp_state & LNET_PEER_PING_SENT) {
496 ping_mdh = lp->lp_ping_mdh;
497 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
499 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
500 push_mdh = lp->lp_push_mdh;
501 LNetInvalidateMDHandle(&lp->lp_push_mdh);
503 spin_unlock(&lp->lp_lock);
505 if (!LNetMDHandleIsInvalid(ping_mdh))
506 LNetMDUnlink(ping_mdh);
507 if (!LNetMDHandleIsInvalid(push_mdh))
508 LNetMDUnlink(push_mdh);
512 lnet_peer_del(struct lnet_peer *peer)
516 lnet_peer_cancel_discovery(peer);
517 lnet_net_lock(LNET_LOCK_EX);
518 rc = lnet_peer_del_locked(peer);
519 lnet_net_unlock(LNET_LOCK_EX);
525 * Delete a NID from a peer. Call with ln_api_mutex held.
528 * -EPERM: Non-DLC deletion from DLC-configured peer.
529 * -ENOENT: No lnet_peer_ni corresponding to the nid.
530 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
531 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
534 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
537 struct lnet_peer_ni *lpni;
538 struct lnet_nid primary_nid = lp->lp_primary_nid;
540 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
542 if (!(flags & LNET_PEER_CONFIGURED)) {
543 if (lp->lp_state & LNET_PEER_CONFIGURED) {
549 /* If we're asked to lock down the primary NID we shouldn't be
552 if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
553 nid_same(&primary_nid, nid)) {
558 lpni = lnet_peer_ni_find_locked(nid);
563 if (lp != lpni->lpni_peer_net->lpn_peer) {
565 lnet_peer_ni_decref_locked(lpni);
570 * This function only allows deletion of the primary NID if it
573 if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
575 lnet_peer_ni_decref_locked(lpni);
579 lnet_net_lock(LNET_LOCK_EX);
581 if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
582 struct lnet_peer_ni *lpni2;
583 /* assign the next peer_ni to be the primary */
584 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
586 lp->lp_primary_nid = lpni2->lpni_nid;
588 rc = lnet_peer_ni_del_locked(lpni, force);
589 lnet_peer_ni_decref_locked(lpni);
591 lnet_net_unlock(LNET_LOCK_EX);
594 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
595 libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
602 lnet_peer_table_cleanup_locked(struct lnet_net *net,
603 struct lnet_peer_table *ptable)
606 struct lnet_peer_ni *next;
607 struct lnet_peer_ni *lpni;
608 struct lnet_peer *peer;
610 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
611 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
613 if (net != NULL && net != lpni->lpni_net)
616 peer = lpni->lpni_peer_net->lpn_peer;
617 if (!nid_same(&peer->lp_primary_nid,
619 lnet_peer_ni_del_locked(lpni, false);
623 * Removing the primary NID implies removing
624 * the entire peer. Advance next beyond any
625 * peer_ni that belongs to the same peer.
627 list_for_each_entry_from(next, &ptable->pt_hash[i],
629 if (next->lpni_peer_net->lpn_peer != peer)
632 lnet_peer_del_locked(peer);
638 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
640 wait_var_event_warning(&ptable->pt_zombies,
641 ptable->pt_zombies == 0,
642 "Waiting for %d zombies on peer table\n",
647 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
648 struct lnet_peer_table *ptable)
650 struct lnet_peer_ni *lp;
651 struct lnet_peer_ni *tmp;
652 struct lnet_nid gw_nid;
655 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
656 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
658 if (net != lp->lpni_net)
661 if (!lnet_isrouter(lp))
664 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
666 lnet_net_unlock(LNET_LOCK_EX);
667 lnet_del_route(LNET_NET_ANY, &gw_nid);
668 lnet_net_lock(LNET_LOCK_EX);
674 lnet_peer_tables_cleanup(struct lnet_net *net)
677 struct lnet_peer_table *ptable;
679 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
680 /* If just deleting the peers for a NI, get rid of any routes these
681 * peers are gateways for. */
682 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
683 lnet_net_lock(LNET_LOCK_EX);
684 lnet_peer_table_del_rtrs_locked(net, ptable);
685 lnet_net_unlock(LNET_LOCK_EX);
688 /* Start the cleanup process */
689 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
690 lnet_net_lock(LNET_LOCK_EX);
691 lnet_peer_table_cleanup_locked(net, ptable);
692 lnet_net_unlock(LNET_LOCK_EX);
695 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
696 lnet_peer_ni_finalize_wait(ptable);
699 static struct lnet_peer_ni *
700 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
702 struct list_head *peers;
703 struct lnet_peer_ni *lp;
705 if (the_lnet.ln_state != LNET_STATE_RUNNING)
708 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
709 list_for_each_entry(lp, peers, lpni_hashlist) {
710 if (nid_same(&lp->lpni_nid, nid)) {
711 lnet_peer_ni_addref_locked(lp);
719 struct lnet_peer_ni *
720 lnet_peer_ni_find_locked(struct lnet_nid *nid)
722 struct lnet_peer_ni *lpni;
723 struct lnet_peer_table *ptable;
726 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
728 ptable = the_lnet.ln_peer_tables[cpt];
729 lpni = lnet_get_peer_ni_locked(ptable, nid);
734 struct lnet_peer_ni *
735 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
737 struct lnet_peer_net *lpn;
738 struct lnet_peer_ni *lpni;
740 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
744 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
745 if (nid_same(&lpni->lpni_nid, nid))
753 lnet_find_peer(struct lnet_nid *nid)
755 struct lnet_peer_ni *lpni;
756 struct lnet_peer *lp = NULL;
759 cpt = lnet_net_lock_current();
760 lpni = lnet_peer_ni_find_locked(nid);
762 lp = lpni->lpni_peer_net->lpn_peer;
763 lnet_peer_addref_locked(lp);
764 lnet_peer_ni_decref_locked(lpni);
766 lnet_net_unlock(cpt);
771 struct lnet_peer_net *
772 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
774 struct lnet_peer_net *net;
777 /* no net id provided return the first net */
778 net = list_first_entry_or_null(&lp->lp_peer_nets,
779 struct lnet_peer_net,
785 /* find the net after the one provided */
786 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
787 if (net->lpn_net_id == prev_lpn_id) {
789 * if we reached the end of the list loop to the
792 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
793 return list_first_entry_or_null(&lp->lp_peer_nets,
794 struct lnet_peer_net,
797 return list_next_entry(net, lpn_peer_nets);
804 struct lnet_peer_ni *
805 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
806 struct lnet_peer_net *peer_net,
807 struct lnet_peer_ni *prev)
809 struct lnet_peer_ni *lpni;
810 struct lnet_peer_net *net = peer_net;
814 if (list_empty(&peer->lp_peer_nets))
817 net = list_first_entry(&peer->lp_peer_nets,
818 struct lnet_peer_net,
821 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
827 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
829 * if you reached the end of the peer ni list and the peer
830 * net is specified then there are no more peer nis in that
837 * we reached the end of this net ni list. move to the
840 if (prev->lpni_peer_net->lpn_peer_nets.next ==
842 /* no more nets and no more NIs. */
845 /* get the next net */
846 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
847 struct lnet_peer_net,
849 /* get the ni on it */
850 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
856 /* there are more nis left */
857 lpni = list_first_entry(&prev->lpni_peer_nis,
858 struct lnet_peer_ni, lpni_peer_nis);
863 /* Call with the ln_api_mutex held */
864 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
866 struct lnet_process_id id;
867 struct lnet_peer_table *ptable;
868 struct lnet_peer *lp;
877 if (the_lnet.ln_state != LNET_STATE_RUNNING)
880 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
883 * Count the number of peers, and return E2BIG if the buffer
884 * is too small. We'll also return the desired size.
887 for (cpt = 0; cpt < lncpt; cpt++) {
888 ptable = the_lnet.ln_peer_tables[cpt];
889 count += ptable->pt_peers;
891 size = count * sizeof(*ids);
896 * Walk the peer lists and copy out the primary nids.
897 * This is safe because the peer lists are only modified
898 * while the ln_api_mutex is held. So we don't need to
899 * hold the lnet_net_lock as well, and can therefore
900 * directly call copy_to_user().
903 memset(&id, 0, sizeof(id));
904 id.pid = LNET_PID_LUSTRE;
906 for (cpt = 0; cpt < lncpt; cpt++) {
907 ptable = the_lnet.ln_peer_tables[cpt];
908 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
909 if (!nid_is_nid4(&lp->lp_primary_nid))
913 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
914 if (copy_to_user(&ids[i], &id, sizeof(id)))
927 * Start pushes to peers that need to be updated for a configuration
928 * change on this node.
931 lnet_push_update_to_peers(int force)
933 struct lnet_peer_table *ptable;
934 struct lnet_peer *lp;
938 lnet_net_lock(LNET_LOCK_EX);
939 if (lnet_peer_discovery_disabled)
941 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
942 for (cpt = 0; cpt < lncpt; cpt++) {
943 ptable = the_lnet.ln_peer_tables[cpt];
944 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
946 spin_lock(&lp->lp_lock);
947 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
948 lp->lp_state |= LNET_PEER_FORCE_PUSH;
949 spin_unlock(&lp->lp_lock);
951 if (lnet_peer_needs_push(lp))
952 lnet_peer_queue_for_discovery(lp);
955 lnet_net_unlock(LNET_LOCK_EX);
956 wake_up(&the_lnet.ln_dc_waitq);
959 /* find the NID in the preferred gateways for the remote peer
961 * false: list is not empty and NID is not preferred
962 * false: list is empty
963 * true: nid is found in the list
966 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
967 struct lnet_nid *gw_nid)
969 struct lnet_nid_list *ne;
971 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
972 libcfs_nidstr(&lpni->lpni_nid),
973 list_empty(&lpni->lpni_rtr_pref_nids));
975 if (list_empty(&lpni->lpni_rtr_pref_nids))
978 /* iterate through all the preferred NIDs and see if any of them
979 * matches the provided gw_nid
981 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
982 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
983 libcfs_nidstr(&ne->nl_nid),
984 libcfs_nidstr(gw_nid));
985 if (nid_same(&ne->nl_nid, gw_nid))
993 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
995 struct list_head zombies;
996 struct lnet_nid_list *ne;
997 struct lnet_nid_list *tmp;
998 int cpt = lpni->lpni_cpt;
1000 INIT_LIST_HEAD(&zombies);
1003 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1004 lnet_net_unlock(cpt);
1006 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1007 list_del(&ne->nl_list);
1008 LIBCFS_FREE(ne, sizeof(*ne));
1013 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1014 struct lnet_nid *gw_nid)
1016 int cpt = lpni->lpni_cpt;
1017 struct lnet_nid_list *ne = NULL;
1019 /* This function is called with api_mutex held. When the api_mutex
1020 * is held the list can not be modified, as it is only modified as
1021 * a result of applying a UDSP and that happens under api_mutex
1024 __must_hold(&the_lnet.ln_api_mutex);
1026 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1027 if (nid_same(&ne->nl_nid, gw_nid))
1031 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1035 ne->nl_nid = *gw_nid;
1037 /* Lock the cpt to protect against addition and checks in the
1038 * selection algorithm
1041 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1042 lnet_net_unlock(cpt);
1048 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1049 * this is a preferred point-to-point path. Call with lnet_net_lock in
1053 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1055 struct lnet_nid_list *ne;
1057 if (lpni->lpni_pref_nnids == 0)
1059 if (lpni->lpni_pref_nnids == 1)
1060 return nid_same(&lpni->lpni_pref.nid, nid);
1061 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1062 if (nid_same(&ne->nl_nid, nid))
1069 * Set a single ni as preferred, provided no preferred ni is already
1070 * defined. Only to be used for non-multi-rail peer_ni.
1073 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1074 struct lnet_nid *nid)
1080 spin_lock(&lpni->lpni_lock);
1081 if (LNET_NID_IS_ANY(nid)) {
1083 } else if (lpni->lpni_pref_nnids > 0) {
1085 } else if (lpni->lpni_pref_nnids == 0) {
1086 lpni->lpni_pref.nid = *nid;
1087 lpni->lpni_pref_nnids = 1;
1088 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1090 spin_unlock(&lpni->lpni_lock);
1092 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1093 libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1098 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1099 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1102 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1106 spin_lock(&lpni->lpni_lock);
1107 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1108 lpni->lpni_pref_nnids = 0;
1109 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1110 } else if (lpni->lpni_pref_nnids == 0) {
1115 spin_unlock(&lpni->lpni_lock);
1117 CDEBUG(D_NET, "peer %s: %d\n",
1118 libcfs_nidstr(&lpni->lpni_nid), rc);
1123 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1125 lpni->lpni_sel_priority = priority;
1129 * Clear the preferred NIDs from a non-multi-rail peer.
1132 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1134 struct lnet_peer_ni *lpni = NULL;
1136 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1137 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1141 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1143 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1144 struct lnet_nid_list *ne1 = NULL;
1145 struct lnet_nid_list *ne2 = NULL;
1146 struct lnet_nid *tmp_nid = NULL;
1149 if (LNET_NID_IS_ANY(nid)) {
1154 if (lpni->lpni_pref_nnids == 1 &&
1155 nid_same(&lpni->lpni_pref.nid, nid)) {
1160 /* A non-MR node may have only one preferred NI per peer_ni */
1161 if (lpni->lpni_pref_nnids > 0 &&
1162 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1167 /* add the new preferred nid to the list of preferred nids */
1168 if (lpni->lpni_pref_nnids != 0) {
1169 size_t alloc_size = sizeof(*ne1);
1171 if (lpni->lpni_pref_nnids == 1) {
1172 tmp_nid = &lpni->lpni_pref.nid;
1173 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1176 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1177 if (nid_same(&ne1->nl_nid, nid)) {
1183 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1190 /* move the originally stored nid to the list */
1191 if (lpni->lpni_pref_nnids == 1) {
1192 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1193 lpni->lpni_cpt, alloc_size);
1198 INIT_LIST_HEAD(&ne2->nl_list);
1199 ne2->nl_nid = *tmp_nid;
1204 lnet_net_lock(LNET_LOCK_EX);
1205 spin_lock(&lpni->lpni_lock);
1206 if (lpni->lpni_pref_nnids == 0) {
1207 lpni->lpni_pref.nid = *nid;
1210 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1211 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1213 lpni->lpni_pref_nnids++;
1214 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1215 spin_unlock(&lpni->lpni_lock);
1216 lnet_net_unlock(LNET_LOCK_EX);
1219 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1220 spin_lock(&lpni->lpni_lock);
1221 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1222 spin_unlock(&lpni->lpni_lock);
1224 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1225 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1230 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1232 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1233 struct lnet_nid_list *ne = NULL;
1236 if (lpni->lpni_pref_nnids == 0) {
1241 if (lpni->lpni_pref_nnids == 1) {
1242 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1247 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1248 if (nid_same(&ne->nl_nid, nid))
1249 goto remove_nid_entry;
1257 lnet_net_lock(LNET_LOCK_EX);
1258 spin_lock(&lpni->lpni_lock);
1259 if (lpni->lpni_pref_nnids == 1)
1260 lpni->lpni_pref.nid = LNET_ANY_NID;
1262 list_del_init(&ne->nl_list);
1263 if (lpni->lpni_pref_nnids == 2) {
1264 struct lnet_nid_list *ne, *tmp;
1266 list_for_each_entry_safe(ne, tmp,
1267 &lpni->lpni_pref.nids,
1269 lpni->lpni_pref.nid = ne->nl_nid;
1270 list_del_init(&ne->nl_list);
1271 LIBCFS_FREE(ne, sizeof(*ne));
1275 lpni->lpni_pref_nnids--;
1276 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1277 spin_unlock(&lpni->lpni_lock);
1278 lnet_net_unlock(LNET_LOCK_EX);
1281 LIBCFS_FREE(ne, sizeof(*ne));
1283 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1284 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1289 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1291 struct list_head zombies;
1292 struct lnet_nid_list *ne;
1293 struct lnet_nid_list *tmp;
1295 INIT_LIST_HEAD(&zombies);
1297 lnet_net_lock(LNET_LOCK_EX);
1298 if (lpni->lpni_pref_nnids == 1)
1299 lpni->lpni_pref.nid = LNET_ANY_NID;
1300 else if (lpni->lpni_pref_nnids > 1)
1301 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1302 lpni->lpni_pref_nnids = 0;
1303 lnet_net_unlock(LNET_LOCK_EX);
1305 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1306 list_del_init(&ne->nl_list);
1307 LIBCFS_FREE(ne, sizeof(*ne));
1312 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1314 struct lnet_peer_ni *lpni;
1317 lpni = lnet_peer_ni_find_locked(nid);
1319 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1320 lnet_peer_ni_decref_locked(lpni);
1325 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1326 __must_hold(&lp->lp_lock)
1328 if (lnet_peer_discovery_disabled)
1331 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1332 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1343 lnet_is_discovery_disabled(struct lnet_peer *lp)
1347 spin_lock(&lp->lp_lock);
1348 rc = lnet_is_discovery_disabled_locked(lp);
1349 spin_unlock(&lp->lp_lock);
1355 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1357 struct lnet_nid pnid = LNET_ANY_NID;
1360 int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1362 if (!nids || num_nids < 1)
1365 rc = LNetNIInit(LNET_PID_ANY);
1369 mutex_lock(&the_lnet.ln_api_mutex);
1371 mr = lnet_peer_discovery_disabled == 0;
1374 for (i = 0; i < num_nids; i++) {
1375 if (nid_is_lo0(&nids[i]))
1378 if (LNET_NID_IS_ANY(&pnid)) {
1380 rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1381 if (rc == -EALREADY) {
1382 struct lnet_peer *lp;
1384 CDEBUG(D_NET, "A peer exists for NID %s\n",
1385 libcfs_nidstr(&pnid));
1387 /* Adds a refcount */
1388 lp = lnet_find_peer(&pnid);
1390 pnid = lp->lp_primary_nid;
1391 /* Drop refcount from lookup */
1392 lnet_peer_decref_locked(lp);
1394 } else if (lnet_peer_discovery_disabled) {
1395 rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1398 rc = lnet_add_peer_ni(&pnid, &nids[i], mr,
1402 if (rc && rc != -EEXIST)
1407 mutex_unlock(&the_lnet.ln_api_mutex);
1411 return rc == -EEXIST ? 0 : rc;
1413 EXPORT_SYMBOL(LNetAddPeer);
1415 void LNetPrimaryNID(struct lnet_nid *nid)
1417 struct lnet_peer *lp;
1418 struct lnet_peer_ni *lpni;
1419 struct lnet_nid orig;
1423 if (!nid || nid_is_lo0(nid))
1427 cpt = lnet_net_lock_current();
1428 lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1433 lp = lpni->lpni_peer_net->lpn_peer;
1435 /* If discovery is disabled locally then we needn't bother running
1436 * discovery here because discovery will not modify whatever
1437 * primary NID is currently set for this peer. If the specified peer is
1438 * down then this discovery can introduce long delays into the mount
1439 * process, so skip it if it isn't necessary.
1442 spin_lock(&lp->lp_lock);
1443 if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY) && lock_prim_nid) {
1444 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1445 lp->lp_prim_lock_ts = ktime_get_ns();
1448 /* DD disabled, nothing to do */
1449 if (lnet_peer_discovery_disabled) {
1450 *nid = lp->lp_primary_nid;
1451 spin_unlock(&lp->lp_lock);
1455 /* Peer already up to date, nothing to do */
1456 if (lnet_peer_is_uptodate_locked(lp)) {
1457 *nid = lp->lp_primary_nid;
1458 spin_unlock(&lp->lp_lock);
1461 spin_unlock(&lp->lp_lock);
1463 /* If primary nid locking is enabled, discovery is performed
1464 * in the background.
1465 * If primary nid locking is disabled, discovery blocks here.
1466 * Messages to the peer will not go through until the discovery is
1470 rc = lnet_discover_peer_locked(lpni, cpt, false);
1472 rc = lnet_discover_peer_locked(lpni, cpt, true);
1476 /* The lpni (or lp) for this NID may have changed and our ref is
1477 * the only thing keeping the old one around. Release the ref
1478 * and lookup the lpni again
1480 lnet_peer_ni_decref_locked(lpni);
1481 lpni = lnet_peer_ni_find_locked(nid);
1486 lp = lpni->lpni_peer_net->lpn_peer;
1488 if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1490 *nid = lp->lp_primary_nid;
1492 lnet_peer_ni_decref_locked(lpni);
1494 lnet_net_unlock(cpt);
1496 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1497 libcfs_nidstr(nid), rc);
1499 EXPORT_SYMBOL(LNetPrimaryNID);
1501 struct lnet_peer_net *
1502 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1504 struct lnet_peer_net *peer_net;
1505 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1506 if (peer_net->lpn_net_id == net_id)
1513 * Attach a peer_ni to a peer_net and peer. This function assumes
1514 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1515 * may be attached to a different peer, in which case it will be
1516 * properly detached first. The whole operation is done atomically.
1518 * This function consumes the reference on lpni and Always returns 0.
1519 * This is the last function called from functions that do return an
1520 * int, so returning 0 here allows the compiler to do a tail call.
1523 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1524 struct lnet_peer_net *lpn,
1525 struct lnet_peer_ni *lpni,
1528 struct lnet_peer_table *ptable;
1529 bool new_lpn = false;
1532 /* Install the new peer_ni */
1533 lnet_net_lock(LNET_LOCK_EX);
1534 /* Add peer_ni to global peer table hash, if necessary. */
1535 if (list_empty(&lpni->lpni_hashlist)) {
1536 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1538 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1539 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1540 ptable->pt_version++;
1541 lnet_peer_ni_addref_locked(lpni);
1544 /* Detach the peer_ni from an existing peer, if necessary. */
1545 if (lpni->lpni_peer_net) {
1546 LASSERT(lpni->lpni_peer_net != lpn);
1547 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1548 lnet_peer_detach_peer_ni_locked(lpni);
1549 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1550 lpni->lpni_peer_net = NULL;
1553 /* Add peer_ni to peer_net */
1554 lpni->lpni_peer_net = lpn;
1555 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1556 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1558 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1559 lnet_update_peer_net_healthv(lpni);
1560 lnet_peer_net_addref_locked(lpn);
1562 /* Add peer_net to peer */
1563 if (!lpn->lpn_peer) {
1566 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1567 list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1569 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1570 lnet_peer_addref_locked(lp);
1573 /* Add peer to global peer list, if necessary */
1574 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1575 if (list_empty(&lp->lp_peer_list)) {
1576 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1580 /* Update peer state */
1581 spin_lock(&lp->lp_lock);
1582 if (flags & LNET_PEER_CONFIGURED) {
1583 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1584 lp->lp_state |= LNET_PEER_CONFIGURED;
1586 if (flags & LNET_PEER_MULTI_RAIL) {
1587 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1588 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1589 lnet_peer_clr_non_mr_pref_nids(lp);
1592 if (flags & LNET_PEER_LOCK_PRIMARY) {
1593 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1594 lp->lp_prim_lock_ts = ktime_get_ns();
1596 spin_unlock(&lp->lp_lock);
1602 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1604 CERROR("Failed to apply UDSPs on lpn %s\n",
1605 libcfs_net2str(lpn->lpn_net_id));
1607 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1609 CERROR("Failed to apply UDSPs on lpni %s\n",
1610 libcfs_nidstr(&lpni->lpni_nid));
1612 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1613 libcfs_nidstr(&lp->lp_primary_nid),
1614 libcfs_nidstr(&lpni->lpni_nid), flags);
1615 lnet_peer_ni_decref_locked(lpni);
1616 lnet_net_unlock(LNET_LOCK_EX);
1622 * Create a new peer, with nid as its primary nid.
1624 * Call with the lnet_api_mutex held.
1627 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1629 struct lnet_peer *lp;
1630 struct lnet_peer_net *lpn;
1631 struct lnet_peer_ni *lpni;
1637 * No need for the lnet_net_lock here, because the
1638 * lnet_api_mutex is held.
1640 lpni = lnet_peer_ni_find_locked(nid);
1642 /* A peer with this NID already exists. */
1643 lp = lpni->lpni_peer_net->lpn_peer;
1644 lnet_peer_ni_decref_locked(lpni);
1646 * This is an error if the peer was configured and the
1647 * primary NID differs or an attempt is made to change
1648 * the Multi-Rail flag. Otherwise the assumption is
1649 * that an existing peer is being modified.
1651 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1652 if (!nid_same(&lp->lp_primary_nid, nid))
1654 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1657 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1658 if (nid_same(&lp->lp_primary_nid, nid))
1660 /* we're trying to recreate an existing peer which
1661 * has already been created and its primary
1662 * locked. This is likely due to two servers
1663 * existing on the same node. So we'll just refer
1664 * to that node with the primary NID which was
1665 * first added by Lustre
1670 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1671 /* if not recreating peer as configured and
1672 * not locking primary nid, no need to
1673 * do anything if primary nid is not being changed
1675 if (nid_same(&lp->lp_primary_nid, nid)) {
1680 /* Delete and recreate the peer.
1682 * 1. If the peer is being recreated as a configured NID
1683 * 2. if there already exists a peer which
1684 * was discovered manually, but is recreated via Lustre
1687 rc = lnet_peer_del(lp);
1692 /* Create peer, peer_net, and peer_ni. */
1694 lp = lnet_peer_alloc(nid);
1697 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1700 lpni = lnet_peer_ni_alloc(nid);
1704 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1707 LIBCFS_FREE(lpn, sizeof(*lpn));
1709 LIBCFS_FREE(lp, sizeof(*lp));
1711 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1712 libcfs_nidstr(nid), flags, rc);
1717 * Add a NID to a peer. Call with ln_api_mutex held.
1720 * -EPERM: Non-DLC addition to a DLC-configured peer.
1721 * -EEXIST: The NID was configured by DLC for a different peer.
1722 * -ENOMEM: Out of memory.
1723 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1724 * non-multi-rail peer.
1727 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1730 struct lnet_peer_net *lpn;
1731 struct lnet_peer_ni *lpni;
1737 /* A configured peer can only be updated through configuration. */
1738 if (!(flags & LNET_PEER_CONFIGURED)) {
1739 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1746 * The MULTI_RAIL flag can be set but not cleared, because
1747 * that would leave the peer struct in an invalid state.
1749 if (flags & LNET_PEER_MULTI_RAIL) {
1750 spin_lock(&lp->lp_lock);
1751 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1752 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1753 lnet_peer_clr_non_mr_pref_nids(lp);
1755 spin_unlock(&lp->lp_lock);
1756 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1761 lpni = lnet_peer_ni_find_locked(nid);
1764 * A peer_ni already exists. This is only a problem if
1765 * it is not connected to this peer and was configured
1768 if (lpni->lpni_peer_net->lpn_peer == lp)
1770 if (lnet_peer_ni_is_configured(lpni)) {
1774 /* If this is the primary NID, destroy the peer. */
1775 if (lnet_peer_ni_is_primary(lpni)) {
1776 struct lnet_peer *lp2 =
1777 lpni->lpni_peer_net->lpn_peer;
1778 int rtr_refcount = lp2->lp_rtr_refcount;
1779 unsigned int peer2_state;
1780 __u64 peer2_prim_lock_ts;
1782 /* If there's another peer that this NID belongs to
1783 * and the primary NID for that peer is locked,
1784 * then, unless it is the only NID, we don't want
1786 * But the configuration is wrong at this point,
1787 * so we should flag both of these peers as in a bad
1790 spin_lock(&lp2->lp_lock);
1791 if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1793 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1794 spin_unlock(&lp2->lp_lock);
1795 spin_lock(&lp->lp_lock);
1796 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1797 spin_unlock(&lp->lp_lock);
1798 CERROR("Peer %s NID %s is already locked with peer %s\n",
1799 libcfs_nidstr(&lp->lp_primary_nid),
1801 libcfs_nidstr(&lp2->lp_primary_nid));
1804 peer2_state = lp2->lp_state;
1805 peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1806 spin_unlock(&lp2->lp_lock);
1808 /* NID which got locked the earliest should be
1809 * kept as primary. In case if the peers were
1810 * created by Lustre, this allows the
1811 * first listed NID to stay primary as intended
1812 * for the purpose of communicating with Lustre
1813 * even if peer discovery succeeded using
1814 * a different NID of MR peer.
1816 spin_lock(&lp->lp_lock);
1817 if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1818 ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1819 peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1820 !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1821 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1822 lp->lp_primary_nid = *nid;
1823 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1825 spin_unlock(&lp->lp_lock);
1827 * if we're trying to delete a router it means
1828 * we're moving this peer NI to a new peer so must
1829 * transfer router properties to the new peer
1831 if (rtr_refcount > 0) {
1832 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1833 lnet_rtr_transfer_to_peer(lp2, lp);
1836 lnet_peer_ni_decref_locked(lpni);
1837 lpni = lnet_peer_ni_alloc(nid);
1844 lpni = lnet_peer_ni_alloc(nid);
1852 * Get the peer_net. Check that we're not adding a second
1853 * peer_ni on a peer_net of a non-multi-rail peer.
1855 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1857 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1862 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1867 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1870 lnet_peer_ni_decref_locked(lpni);
1872 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1873 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1879 * Update the primary NID of a peer, if possible.
1881 * Call with the lnet_api_mutex held.
1884 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1887 struct lnet_nid old = lp->lp_primary_nid;
1890 if (nid_same(&lp->lp_primary_nid, nid))
1893 if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1894 lp->lp_primary_nid = *nid;
1896 rc = lnet_peer_add_nid(lp, nid, flags);
1898 lp->lp_primary_nid = old;
1902 /* if this is a configured peer or the primary for that peer has
1903 * been locked, then we don't want to flag this scenario as
1906 if (lp->lp_state & LNET_PEER_CONFIGURED ||
1907 lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1910 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1911 libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1917 * lpni creation initiated due to traffic either sending or receiving.
1918 * Callers must hold ln_api_mutex
1919 * Ref taken on lnet_peer_ni returned by this function
1921 static struct lnet_peer_ni *
1922 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1923 __must_hold(&the_lnet.ln_api_mutex)
1925 struct lnet_peer *lp = NULL;
1926 struct lnet_peer_net *lpn = NULL;
1927 struct lnet_peer_ni *lpni;
1931 if (LNET_NID_IS_ANY(nid)) {
1936 /* lnet_net_lock is not needed here because ln_api_lock is held */
1937 lpni = lnet_peer_ni_find_locked(nid);
1940 * We must have raced with another thread. Since we
1941 * know next to nothing about a peer_ni created by
1942 * traffic, we just assume everything is ok and
1948 /* Create peer, peer_net, and peer_ni. */
1950 lp = lnet_peer_alloc(nid);
1953 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1956 lpni = lnet_peer_ni_alloc(nid);
1959 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1961 /* lnet_peer_attach_peer_ni() always returns 0 */
1962 rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1964 lnet_peer_ni_addref_locked(lpni);
1969 LIBCFS_FREE(lpn, sizeof(*lpn));
1971 LIBCFS_FREE(lp, sizeof(*lp));
1975 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1980 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1982 * This API handles the following combinations:
1983 * Create a peer with its primary NI if only the prim_nid is provided
1984 * Add a NID to a peer identified by the prim_nid. The peer identified
1985 * by the prim_nid must already exist.
1986 * The peer being created may be non-MR.
1988 * The caller must hold ln_api_mutex. This prevents the peer from
1989 * being created/modified/deleted by a different thread.
1992 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
1994 __must_hold(&the_lnet.ln_api_mutex)
1996 struct lnet_peer *lp = NULL;
1997 struct lnet_peer_ni *lpni;
1999 /* The prim_nid must always be specified */
2000 if (LNET_NID_IS_ANY(prim_nid))
2004 flags |= LNET_PEER_MULTI_RAIL;
2007 * If nid isn't specified, we must create a new peer with
2008 * prim_nid as its primary nid.
2010 if (LNET_NID_IS_ANY(nid))
2011 return lnet_peer_add(prim_nid, flags);
2013 /* Look up the prim_nid, which must exist. */
2014 lpni = lnet_peer_ni_find_locked(prim_nid);
2017 lp = lpni->lpni_peer_net->lpn_peer;
2018 lnet_peer_ni_decref_locked(lpni);
2020 /* Peer must have been configured. */
2021 if ((flags & LNET_PEER_CONFIGURED) &&
2022 !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2023 CDEBUG(D_NET, "peer %s was not configured\n",
2024 libcfs_nidstr(prim_nid));
2028 /* Primary NID must match */
2029 if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2030 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2031 libcfs_nidstr(prim_nid),
2032 libcfs_nidstr(&lp->lp_primary_nid));
2036 /* Multi-Rail flag must match. */
2037 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2038 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2039 libcfs_nidstr(prim_nid));
2043 if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2045 "Don't add temporary peer NI for uptodate peer %s\n",
2046 libcfs_nidstr(&lp->lp_primary_nid));
2050 return lnet_peer_add_nid(lp, nid, flags);
2053 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2054 bool mr, bool lock_prim)
2056 int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2058 return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2062 lnet_reset_peer(struct lnet_peer *lp)
2064 struct lnet_peer_net *lpn, *lpntmp;
2065 struct lnet_peer_ni *lpni, *lpnitmp;
2069 lnet_peer_cancel_discovery(lp);
2071 flags = LNET_PEER_CONFIGURED;
2072 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2073 flags |= LNET_PEER_MULTI_RAIL;
2075 list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2076 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2078 if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2081 rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2083 CERROR("Failed to delete %s from peer %s\n",
2084 libcfs_nidstr(&lpni->lpni_nid),
2085 libcfs_nidstr(&lp->lp_primary_nid));
2090 /* mark it for discovery the next time we use it */
2091 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2096 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2098 * This API handles the following combinations:
2099 * Delete a NI from a peer if both prim_nid and nid are provided.
2100 * Delete a peer if only prim_nid is provided.
2101 * Delete a peer if its primary nid is provided.
2103 * The caller must hold ln_api_mutex. This prevents the peer from
2104 * being modified/deleted by a different thread.
2107 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2110 struct lnet_peer *lp;
2111 struct lnet_peer_ni *lpni;
2114 if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2117 lpni = lnet_peer_ni_find_locked(prim_nid);
2120 lp = lpni->lpni_peer_net->lpn_peer;
2121 lnet_peer_ni_decref_locked(lpni);
2123 if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2124 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2125 libcfs_nidstr(prim_nid),
2126 libcfs_nidstr(&lp->lp_primary_nid));
2130 lnet_net_lock(LNET_LOCK_EX);
2131 if (lp->lp_rtr_refcount > 0) {
2132 lnet_net_unlock(LNET_LOCK_EX);
2133 CERROR("%s is a router. Can not be deleted\n",
2134 libcfs_nidstr(prim_nid));
2137 lnet_net_unlock(LNET_LOCK_EX);
2139 if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2140 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2141 CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2142 libcfs_nidstr(&lp->lp_primary_nid));
2143 return lnet_reset_peer(lp);
2145 return lnet_peer_del(lp);
2149 flags = LNET_PEER_CONFIGURED;
2150 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2151 flags |= LNET_PEER_MULTI_RAIL;
2153 return lnet_peer_del_nid(lp, nid, flags);
2157 lnet_destroy_peer_ni_locked(struct kref *ref)
2159 struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2161 struct lnet_peer_table *ptable;
2162 struct lnet_peer_net *lpn;
2164 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2166 LASSERT(kref_read(&lpni->lpni_kref) == 0);
2167 LASSERT(list_empty(&lpni->lpni_txq));
2168 LASSERT(lpni->lpni_txqnob == 0);
2169 LASSERT(list_empty(&lpni->lpni_peer_nis));
2170 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2172 lpn = lpni->lpni_peer_net;
2173 lpni->lpni_peer_net = NULL;
2174 lpni->lpni_net = NULL;
2176 if (!list_empty(&lpni->lpni_hashlist)) {
2177 /* remove the peer ni from the zombie list */
2178 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2179 spin_lock(&ptable->pt_zombie_lock);
2180 list_del_init(&lpni->lpni_hashlist);
2181 ptable->pt_zombies--;
2182 spin_unlock(&ptable->pt_zombie_lock);
2185 if (lpni->lpni_pref_nnids > 1) {
2186 struct lnet_nid_list *ne, *tmp;
2188 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2190 list_del_init(&ne->nl_list);
2191 LIBCFS_FREE(ne, sizeof(*ne));
2194 LIBCFS_FREE(lpni, sizeof(*lpni));
2197 lnet_peer_net_decref_locked(lpn);
2200 struct lnet_peer_ni *
2201 lnet_nid2peerni_ex(struct lnet_nid *nid)
2202 __must_hold(&the_lnet.ln_api_mutex)
2204 struct lnet_peer_ni *lpni = NULL;
2206 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2207 return ERR_PTR(-ESHUTDOWN);
2210 * find if a peer_ni already exists.
2211 * If so then just return that.
2213 lpni = lnet_peer_ni_find_locked(nid);
2217 lnet_net_unlock(LNET_LOCK_EX);
2219 lpni = lnet_peer_ni_traffic_add(nid, NULL);
2221 lnet_net_lock(LNET_LOCK_EX);
2227 * Get a peer_ni for the given nid, create it if necessary. Takes a
2228 * hold on the peer_ni.
2230 struct lnet_peer_ni *
2231 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2232 struct lnet_nid *pref, int cpt)
2234 struct lnet_peer_ni *lpni = NULL;
2236 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2237 return ERR_PTR(-ESHUTDOWN);
2240 * find if a peer_ni already exists.
2241 * If so then just return that.
2243 lpni = lnet_peer_ni_find_locked(nid);
2249 * use the lnet_api_mutex to serialize the creation of the peer_ni
2250 * and the creation/deletion of the local ni/net. When a local ni is
2251 * created, if there exists a set of peer_nis on that network,
2252 * they need to be traversed and updated. When a local NI is
2253 * deleted, which could result in a network being deleted, then
2254 * all peer nis on that network need to be removed as well.
2256 * Creation through traffic should also be serialized with
2257 * creation through DLC.
2259 lnet_net_unlock(cpt);
2260 mutex_lock(&the_lnet.ln_api_mutex);
2262 * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2263 * check here is sufficent.
2265 if (the_lnet.ln_state == LNET_STATE_RUNNING)
2266 lpni = lnet_peer_ni_traffic_add(nid, pref);
2268 mutex_unlock(&the_lnet.ln_api_mutex);
2271 /* Lock has been dropped, check again for shutdown. */
2272 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2273 if (!IS_ERR_OR_NULL(lpni))
2274 lnet_peer_ni_decref_locked(lpni);
2275 lpni = ERR_PTR(-ESHUTDOWN);
2282 lnet_peer_gw_discovery(struct lnet_peer *lp)
2286 spin_lock(&lp->lp_lock);
2287 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2289 spin_unlock(&lp->lp_lock);
2295 lnet_peer_is_uptodate(struct lnet_peer *lp)
2299 spin_lock(&lp->lp_lock);
2300 rc = lnet_peer_is_uptodate_locked(lp);
2301 spin_unlock(&lp->lp_lock);
2306 * Is a peer uptodate from the point of view of discovery?
2308 * If it is currently being processed, obviously not.
2309 * A forced Ping or Push is also handled by the discovery thread.
2311 * Otherwise look at whether the peer needs rediscovering.
2314 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2315 __must_hold(&lp->lp_lock)
2319 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2320 LNET_PEER_FORCE_PING |
2321 LNET_PEER_FORCE_PUSH)) {
2323 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2325 } else if (lnet_peer_needs_push(lp)) {
2327 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2328 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2339 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2341 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2343 /* The discovery thread holds net_lock/EX and lp_lock when it splices
2344 * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2345 * when adding to the list and queuing the peer to ensure that we do not
2346 * strand any messages on the lp_dc_pendq. This scheme ensures the
2347 * message will be resent even if the peer is already being discovered.
2348 * Therefore we needn't check the return value of
2349 * lnet_peer_queue_for_discovery(lp).
2351 lnet_net_lock(LNET_LOCK_EX);
2352 spin_lock(&lp->lp_lock);
2353 list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2354 spin_unlock(&lp->lp_lock);
2355 lnet_peer_queue_for_discovery(lp);
2356 lnet_net_unlock(LNET_LOCK_EX);
2360 * Queue a peer for the attention of the discovery thread. Call with
2361 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2362 * -EALREADY if the peer was already queued.
2364 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2368 spin_lock(&lp->lp_lock);
2369 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2370 lp->lp_state |= LNET_PEER_DISCOVERING;
2371 spin_unlock(&lp->lp_lock);
2372 if (list_empty(&lp->lp_dc_list)) {
2373 lnet_peer_addref_locked(lp);
2374 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2375 wake_up(&the_lnet.ln_dc_waitq);
2381 CDEBUG(D_NET, "Queue peer %s: %d\n",
2382 libcfs_nidstr(&lp->lp_primary_nid), rc);
2388 * Discovery of a peer is complete. Wake all waiters on the peer.
2389 * Call with lnet_net_lock/EX held.
2391 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2393 struct lnet_msg *msg, *tmp;
2395 LIST_HEAD(pending_msgs);
2397 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2398 libcfs_nidstr(&lp->lp_primary_nid));
2400 spin_lock(&lp->lp_lock);
2401 /* Our caller dropped lp_lock which may have allowed another thread to
2402 * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2403 * Ensure it is cleared.
2405 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2407 lp->lp_dc_error = dc_error;
2408 lp->lp_state |= LNET_PEER_REDISCOVER;
2410 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2411 spin_unlock(&lp->lp_lock);
2412 list_del_init(&lp->lp_dc_list);
2413 wake_up(&lp->lp_dc_waitq);
2415 if (lp->lp_rtr_refcount > 0)
2416 lnet_router_discovery_complete(lp);
2418 lnet_net_unlock(LNET_LOCK_EX);
2420 /* iterate through all pending messages and send them again */
2421 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2422 list_del_init(&msg->msg_list);
2424 lnet_finalize(msg, dc_error);
2428 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2429 lnet_msgtyp2str(msg->msg_type),
2430 libcfs_idstr(&msg->msg_target));
2431 rc = lnet_send(&msg->msg_src_nid_param, msg,
2432 &msg->msg_rtr_nid_param);
2434 CNETERR("Error sending %s to %s: %d\n",
2435 lnet_msgtyp2str(msg->msg_type),
2436 libcfs_idstr(&msg->msg_target), rc);
2437 lnet_finalize(msg, rc);
2440 lnet_net_lock(LNET_LOCK_EX);
2441 lnet_peer_decref_locked(lp);
2445 * Handle inbound push.
2446 * Like any event handler, called with lnet_res_lock/CPT held.
2448 void lnet_peer_push_event(struct lnet_event *ev)
2450 struct lnet_ping_buffer *pbuf;
2451 struct lnet_peer *lp;
2454 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2456 /* lnet_find_peer() adds a refcount */
2457 lp = lnet_find_peer(&ev->source.nid);
2459 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2460 libcfs_nidstr(&ev->initiator.nid),
2461 libcfs_nidstr(&ev->source.nid));
2462 pbuf->pb_needs_post = true;
2466 /* Ensure peer state remains consistent while we modify it. */
2467 spin_lock(&lp->lp_lock);
2470 * If some kind of error happened the contents of the message
2471 * cannot be used. Clear the NIDS_UPTODATE and set the
2472 * FORCE_PING flag to trigger a ping.
2475 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2476 lp->lp_state |= LNET_PEER_FORCE_PING;
2477 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2479 libcfs_nidstr(&lp->lp_primary_nid),
2480 libcfs_nidstr(&ev->source.nid));
2485 * A push with invalid or corrupted info. Clear the UPTODATE
2486 * flag to trigger a ping.
2488 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2489 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2490 lp->lp_state |= LNET_PEER_FORCE_PING;
2491 CDEBUG(D_NET, "Corrupted Push from %s\n",
2492 libcfs_nidstr(&lp->lp_primary_nid));
2496 /* Make sure we'll allocate the correct size ping buffer when
2499 infobytes = lnet_ping_info_size(&pbuf->pb_info);
2500 if (lp->lp_data_bytes < infobytes)
2501 lp->lp_data_bytes = infobytes;
2504 * A non-Multi-Rail peer is not supposed to be capable of
2507 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2508 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2509 libcfs_nidstr(&lp->lp_primary_nid));
2514 * The peer may have discovery disabled at its end. Set
2515 * NO_DISCOVERY as appropriate.
2517 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2518 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2519 libcfs_nidstr(&lp->lp_primary_nid));
2521 * Mark the peer for deletion if we already know about it
2522 * and it's going from discovery set to no discovery set
2524 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2525 LNET_PEER_DISCOVERING)) &&
2526 lp->lp_state & LNET_PEER_DISCOVERED) {
2527 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2528 libcfs_nidstr(&lp->lp_primary_nid),
2530 lp->lp_state |= LNET_PEER_MARK_DELETION;
2532 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2533 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2534 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2535 libcfs_nidstr(&lp->lp_primary_nid));
2536 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2540 * Update the MULTI_RAIL flag based on the push. If the peer
2541 * was configured with DLC then the setting should match what
2543 * NB: We verified above that the MR feature bit is set in pi_features
2545 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2546 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2547 libcfs_nidstr(&lp->lp_primary_nid), lp);
2548 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2549 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2550 libcfs_nidstr(&lp->lp_primary_nid));
2551 } else if (lnet_peer_discovery_disabled) {
2552 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2553 libcfs_nidstr(&lp->lp_primary_nid), lp);
2554 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2555 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2556 libcfs_nidstr(&lp->lp_primary_nid), lp);
2558 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2559 libcfs_nidstr(&lp->lp_primary_nid), lp);
2560 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2561 lnet_peer_clr_non_mr_pref_nids(lp);
2564 /* Check for truncation of the Put message. Clear the
2565 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2566 * and tell discovery to allocate a bigger buffer.
2568 if (ev->mlength < ev->rlength) {
2569 if (the_lnet.ln_push_target_nbytes < infobytes)
2570 the_lnet.ln_push_target_nbytes = infobytes;
2571 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2572 lp->lp_state |= LNET_PEER_FORCE_PING;
2573 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2574 libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2578 /* always assume new data */
2579 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2580 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2582 /* If there is data present that hasn't been processed yet,
2583 * we'll replace it if the Put contained newer data and it
2584 * fits. We're racing with a Ping or earlier Push in this
2587 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2588 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2589 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2590 infobytes <= lp->lp_data->pb_nbytes) {
2591 memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2593 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2594 libcfs_nidstr(&lp->lp_primary_nid),
2595 LNET_PING_BUFFER_SEQNO(pbuf),
2596 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2602 * Allocate a buffer to copy the data. On a failure we drop
2603 * the Push and set FORCE_PING to force the discovery
2604 * thread to fix the problem by pinging the peer.
2606 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2608 lp->lp_state |= LNET_PEER_FORCE_PING;
2609 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2610 libcfs_nidstr(&lp->lp_primary_nid),
2611 LNET_PING_BUFFER_SEQNO(pbuf));
2616 unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2618 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2619 CDEBUG(D_NET, "Received Push %s %u\n",
2620 libcfs_nidstr(&lp->lp_primary_nid),
2621 LNET_PING_BUFFER_SEQNO(pbuf));
2624 /* We've processed this buffer. It can be reposted */
2625 pbuf->pb_needs_post = true;
2628 * Queue the peer for discovery if not done, force it on the request
2629 * queue and wake the discovery thread if the peer was already queued,
2630 * because its status changed.
2632 spin_unlock(&lp->lp_lock);
2633 lnet_net_lock(LNET_LOCK_EX);
2634 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2635 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2636 wake_up(&the_lnet.ln_dc_waitq);
2638 /* Drop refcount from lookup */
2639 lnet_peer_decref_locked(lp);
2640 lnet_net_unlock(LNET_LOCK_EX);
2644 * Clear the discovery error state, unless we're already discovering
2645 * this peer, in which case the error is current.
2647 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2649 spin_lock(&lp->lp_lock);
2650 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2651 lp->lp_dc_error = 0;
2652 spin_unlock(&lp->lp_lock);
2656 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2657 * dropped/retaken within this function. An lnet_peer_ni is passed in
2658 * because discovery could tear down an lnet_peer.
2661 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2664 struct lnet_peer *lp = NULL;
2670 lnet_peer_decref_locked(lp);
2671 lnet_net_unlock(cpt);
2672 lnet_net_lock(LNET_LOCK_EX);
2673 lp = lpni->lpni_peer_net->lpn_peer;
2674 lnet_peer_clear_discovery_error(lp);
2677 * We're willing to be interrupted. The lpni can become a
2678 * zombie if we race with DLC, so we must check for that.
2681 /* Keep lp alive when the lnet_net_lock is unlocked */
2682 lnet_peer_addref_locked(lp);
2683 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2684 if (signal_pending(current))
2686 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2689 * Don't repeat discovery if discovery is disabled. This is
2690 * done to ensure we can use discovery as a standard ping as
2691 * well for backwards compatibility with routers which do not
2692 * have discovery or have discovery disabled
2694 if (lnet_is_discovery_disabled(lp) && count > 0)
2696 if (lp->lp_dc_error)
2698 if (lnet_peer_is_uptodate(lp))
2700 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2702 lnet_peer_queue_for_discovery(lp);
2704 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2707 * If caller requested a non-blocking operation then
2708 * return immediately. Once discovery is complete any
2709 * pending messages that were stopped due to discovery
2710 * will be transmitted.
2715 lnet_net_unlock(LNET_LOCK_EX);
2717 finish_wait(&lp->lp_dc_waitq, &wait);
2718 lnet_net_lock(LNET_LOCK_EX);
2719 lnet_peer_decref_locked(lp);
2720 /* Peer may have changed */
2721 lp = lpni->lpni_peer_net->lpn_peer;
2723 finish_wait(&lp->lp_dc_waitq, &wait);
2725 lnet_net_unlock(LNET_LOCK_EX);
2728 * The peer may have changed, so re-check and rediscover if that turns
2729 * out to have been the case. The reference count on lp ensured that
2730 * even if it was unlinked from lpni the memory could not be recycled.
2731 * Thus the check below is sufficient to determine whether the peer
2732 * changed. If the peer changed, then lp must not be dereferenced.
2734 if (lp != lpni->lpni_peer_net->lpn_peer)
2737 if (signal_pending(current))
2739 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2741 else if (lp->lp_dc_error)
2742 rc = lp->lp_dc_error;
2744 CDEBUG(D_NET, "non-blocking discovery\n");
2745 else if (!lnet_peer_is_uptodate(lp) &&
2746 !(lnet_is_discovery_disabled(lp) ||
2747 (lp->lp_state & LNET_PEER_MARK_DELETED)))
2750 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2751 (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2752 libcfs_nidstr(&lpni->lpni_nid), rc,
2753 (!block) ? "pending discovery" : "discovery complete");
2754 lnet_peer_decref_locked(lp);
2759 /* Handle an incoming ack for a push. */
2761 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2763 struct lnet_ping_buffer *pbuf;
2765 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2766 spin_lock(&lp->lp_lock);
2767 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2768 lp->lp_push_error = ev->status;
2770 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2772 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2773 spin_unlock(&lp->lp_lock);
2775 CDEBUG(D_NET, "peer %s ev->status %d\n",
2776 libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2779 static bool find_primary(struct lnet_nid *nid,
2780 struct lnet_ping_buffer *pbuf)
2782 struct lnet_ping_info *pi = &pbuf->pb_info;
2783 struct lnet_ping_iter piter;
2786 if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2787 /* First large nid is primary */
2788 for (stp = ping_iter_first(&piter, pbuf, nid);
2790 stp = ping_iter_next(&piter, nid)) {
2791 if (nid_is_nid4(nid))
2793 /* nid has already been copied in */
2796 /* no large nids ... weird ... ignore the flag
2797 * and use first nid.
2800 /* pi_nids[1] is primary */
2801 if (pi->pi_nnis < 2)
2803 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2807 /* Handle a Reply message. This is the reply to a Ping message. */
2809 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2811 struct lnet_ping_buffer *pbuf;
2812 struct lnet_nid primary;
2815 bool ping_feat_disc;
2817 spin_lock(&lp->lp_lock);
2819 lp->lp_disc_src_nid = ev->target.nid;
2820 lp->lp_disc_dst_nid = ev->source.nid;
2823 * If some kind of error happened the contents of message
2824 * cannot be used. Set PING_FAILED to trigger a retry.
2827 lp->lp_state |= LNET_PEER_PING_FAILED;
2828 lp->lp_ping_error = ev->status;
2829 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2831 libcfs_nidstr(&lp->lp_primary_nid),
2832 libcfs_nidstr(&ev->source.nid));
2836 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2837 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2838 lnet_swap_pinginfo(pbuf);
2841 * A reply with invalid or corrupted info. Set PING_FAILED to
2844 rc = lnet_ping_info_validate(&pbuf->pb_info);
2846 lp->lp_state |= LNET_PEER_PING_FAILED;
2847 lp->lp_ping_error = 0;
2848 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2849 libcfs_nidstr(&lp->lp_primary_nid), rc);
2854 * The peer may have discovery disabled at its end. Set
2855 * NO_DISCOVERY as appropriate.
2857 ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2858 if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2859 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2860 libcfs_nidstr(&lp->lp_primary_nid),
2861 ping_feat_disc ? "enabled" : "disabled",
2862 lnet_peer_discovery_disabled ? "disabled" : "enabled");
2864 /* Detect whether this peer has toggled discovery from on to
2865 * off and whether we can delete and re-create the peer. Peers
2866 * that were manually configured cannot be deleted by discovery.
2867 * We need to delete this peer and re-create it if the peer was
2868 * not configured manually, is currently considered DD capable,
2870 * 1. We've already discovered the peer (the peer has toggled
2871 * the discovery feature from on to off), or
2872 * 2. The peer is considered MR, but it was not user configured
2873 * (this was a "temporary" peer created via the kernel APIs
2874 * that we're discovering for the first time)
2876 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2877 LNET_PEER_NO_DISCOVERY)) &&
2878 (lp->lp_state & (LNET_PEER_DISCOVERED |
2879 LNET_PEER_MULTI_RAIL))) {
2880 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2881 libcfs_nidstr(&lp->lp_primary_nid),
2883 lp->lp_state |= LNET_PEER_MARK_DELETION;
2885 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2887 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2888 libcfs_nidstr(&lp->lp_primary_nid));
2889 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2893 * Update the MULTI_RAIL flag based on the reply. If the peer
2894 * was configured with DLC then the setting should match what
2897 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2898 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2899 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2900 libcfs_nidstr(&lp->lp_primary_nid), lp);
2901 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2902 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2903 libcfs_nidstr(&lp->lp_primary_nid));
2904 } else if (lnet_peer_discovery_disabled) {
2906 "peer %s(%p) not MR: DD disabled locally\n",
2907 libcfs_nidstr(&lp->lp_primary_nid), lp);
2908 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2910 "peer %s(%p) not MR: DD disabled remotely\n",
2911 libcfs_nidstr(&lp->lp_primary_nid), lp);
2913 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2914 libcfs_nidstr(&lp->lp_primary_nid), lp);
2915 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2916 lnet_peer_clr_non_mr_pref_nids(lp);
2918 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2919 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2920 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2921 libcfs_nidstr(&lp->lp_primary_nid));
2923 CERROR("Multi-Rail state vanished from %s\n",
2924 libcfs_nidstr(&lp->lp_primary_nid));
2925 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2929 infobytes = lnet_ping_info_size(&pbuf->pb_info);
2931 * Make sure we'll allocate the correct size ping buffer when
2934 if (lp->lp_data_bytes < infobytes)
2935 lp->lp_data_bytes = infobytes;
2937 /* Check for truncation of the Reply. Clear PING_SENT and set
2938 * PING_FAILED to trigger a retry.
2940 if (pbuf->pb_nbytes < infobytes) {
2941 if (the_lnet.ln_push_target_nbytes < infobytes)
2942 the_lnet.ln_push_target_nbytes = infobytes;
2943 lp->lp_state |= LNET_PEER_PING_FAILED;
2944 lp->lp_ping_error = 0;
2945 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2946 libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2951 * Check the sequence numbers in the reply. These are only
2952 * available if the reply came from a Multi-Rail peer.
2954 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2955 find_primary(&primary, pbuf) &&
2956 nid_same(&lp->lp_primary_nid, &primary)) {
2957 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2958 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2959 libcfs_nidstr(&lp->lp_primary_nid),
2960 LNET_PING_BUFFER_SEQNO(pbuf),
2963 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2966 /* We're happy with the state of the data in the buffer. */
2967 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2968 libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2970 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2971 lnet_ping_buffer_decref(lp->lp_data);
2973 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2974 lnet_ping_buffer_addref(pbuf);
2977 lp->lp_state &= ~LNET_PEER_PING_SENT;
2978 spin_unlock(&lp->lp_lock);
2982 * Send event handling. Only matters for error cases, where we clean
2983 * up state on the peer and peer_ni that would otherwise be updated in
2984 * the REPLY event handler for a successful Ping, and the ACK event
2985 * handler for a successful Push.
2988 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2995 spin_lock(&lp->lp_lock);
2996 if (ev->msg_type == LNET_MSG_GET) {
2997 lp->lp_state &= ~LNET_PEER_PING_SENT;
2998 lp->lp_state |= LNET_PEER_PING_FAILED;
2999 lp->lp_ping_error = ev->status;
3000 } else { /* ev->msg_type == LNET_MSG_PUT */
3001 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3002 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3003 lp->lp_push_error = ev->status;
3005 spin_unlock(&lp->lp_lock);
3006 rc = LNET_REDISCOVER_PEER;
3008 CDEBUG(D_NET, "%s Send to %s: %d\n",
3009 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3010 libcfs_nidstr(&ev->target.nid), rc);
3015 * Unlink event handling. This event is only seen if a call to
3016 * LNetMDUnlink() caused the event to be unlinked. If this call was
3017 * made after the event was set up in LNetGet() or LNetPut() then we
3018 * assume the Ping or Push timed out.
3021 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3023 spin_lock(&lp->lp_lock);
3024 /* We've passed through LNetGet() */
3025 if (lp->lp_state & LNET_PEER_PING_SENT) {
3026 lp->lp_state &= ~LNET_PEER_PING_SENT;
3027 lp->lp_state |= LNET_PEER_PING_FAILED;
3028 lp->lp_ping_error = -ETIMEDOUT;
3029 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3030 libcfs_nidstr(&lp->lp_primary_nid));
3032 /* We've passed through LNetPut() */
3033 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3034 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3035 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3036 lp->lp_push_error = -ETIMEDOUT;
3037 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3038 libcfs_nidstr(&lp->lp_primary_nid));
3040 spin_unlock(&lp->lp_lock);
3044 * Event handler for the discovery EQ.
3046 * Called with lnet_res_lock(cpt) held. The cpt is the
3047 * lnet_cpt_of_cookie() of the md handle cookie.
3049 static void lnet_discovery_event_handler(struct lnet_event *event)
3051 struct lnet_peer *lp = event->md_user_ptr;
3052 struct lnet_ping_buffer *pbuf;
3055 /* discovery needs to take another look */
3056 rc = LNET_REDISCOVER_PEER;
3058 CDEBUG(D_NET, "Received event: %d\n", event->type);
3060 switch (event->type) {
3061 case LNET_EVENT_ACK:
3062 lnet_discovery_event_ack(lp, event);
3064 case LNET_EVENT_REPLY:
3065 lnet_discovery_event_reply(lp, event);
3067 case LNET_EVENT_SEND:
3068 /* Only send failure triggers a retry. */
3069 rc = lnet_discovery_event_send(lp, event);
3071 case LNET_EVENT_UNLINK:
3072 /* LNetMDUnlink() was called */
3073 lnet_discovery_event_unlink(lp, event);
3076 /* Invalid events. */
3079 lnet_net_lock(LNET_LOCK_EX);
3081 /* put peer back at end of request queue, if discovery not already
3083 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3084 lnet_peer_queue_for_discovery(lp)) {
3085 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3086 wake_up(&the_lnet.ln_dc_waitq);
3088 if (event->unlinked) {
3089 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3090 lnet_ping_buffer_decref(pbuf);
3091 lnet_peer_decref_locked(lp);
3093 lnet_net_unlock(LNET_LOCK_EX);
3096 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3097 struct lnet_ping_buffer *pbuf,
3098 struct lnet_nid *nid)
3100 pi->pinfo = &pbuf->pb_info;
3101 pi->pos = &pbuf->pb_info.pi_ni;
3102 pi->end = (void *)pi->pinfo +
3103 min_t(int, pbuf->pb_nbytes,
3104 lnet_ping_info_size(pi->pinfo));
3105 /* lnet_ping_info_validiate ensures there will be one
3106 * lnet_ni_status at the start
3109 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3111 pi->pos += sizeof(struct lnet_ni_status);
3112 return &pbuf->pb_info.pi_ni[0].ns_status;
3115 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3117 int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3119 if (pi->pos < ((void *)pi->pinfo + off)) {
3120 struct lnet_ni_status *ns = pi->pos;
3123 if (pi->pos > pi->end)
3126 lnet_nid4_to_nid(ns->ns_nid, nid);
3127 return &ns->ns_status;
3130 while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3131 struct lnet_ni_large_status *lns = pi->pos;
3133 if (pi->pos + 8 > pi->end)
3134 /* Not safe to examine next */
3136 pi->pos = lnet_ping_sts_next(lns);
3137 if (pi->pos > pi->end)
3139 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3143 return &lns->ns_status;
3148 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3150 struct lnet_ping_iter pi;
3154 for (st = ping_iter_first(&pi, pbuf, NULL); st;
3155 st = ping_iter_next(&pi, NULL))
3161 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3163 if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN)
3164 lnet_handle_remote_failure_locked(lpni);
3165 else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3166 !lpni->lpni_last_alive)
3167 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3171 * Build a peer from incoming data.
3173 * The NIDs in the incoming data are supposed to be structured as follows:
3176 * - other NIDs in same net
3177 * - NIDs in second net
3178 * - NIDs in third net
3180 * This due to the way the list of NIDs in the data is created.
3182 * Note that this function will mark the peer uptodate unless an
3183 * ENOMEM is encontered. All other errors are due to a conflict
3184 * between the DLC configuration and what discovery sees. We treat DLC
3185 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3186 * peer from becoming stuck in discovery.
3188 static int lnet_peer_merge_data(struct lnet_peer *lp,
3189 struct lnet_ping_buffer *pbuf)
3191 struct lnet_peer_net *lpn;
3192 struct lnet_peer_ni *lpni;
3193 struct lnet_nid *curnis = NULL;
3194 struct lnet_ni_large_status *addnis = NULL;
3195 struct lnet_nid *delnis = NULL;
3196 struct lnet_ping_iter pi;
3197 struct lnet_nid nid;
3199 struct lnet_nid primary = {};
3200 bool want_large_primary;
3211 flags = LNET_PEER_DISCOVERED;
3212 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3213 flags |= LNET_PEER_MULTI_RAIL;
3216 * Cache the routing feature for the peer; whether it is enabled
3217 * for disabled as reported by the remote peer.
3219 spin_lock(&lp->lp_lock);
3220 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3221 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3223 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3224 spin_unlock(&lp->lp_lock);
3226 nnis = ping_info_count_entries(pbuf);
3227 nnis = max_t(int, lp->lp_nnis, nnis);
3228 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3229 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3230 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3231 if (!curnis || !addnis || !delnis) {
3239 /* Construct the list of NIDs present in peer. */
3241 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3242 curnis[ncurnis++] = lpni->lpni_nid;
3244 /* Check for NIDs in pbuf not present in curnis[].
3245 * Skip the first, which is loop-back. Take second as
3246 * primary, unless a large primary is found.
3248 ping_iter_first(&pi, pbuf, NULL);
3249 stp = ping_iter_next(&pi, &nid);
3252 want_large_primary = (pbuf->pb_info.pi_features &
3253 LNET_PING_FEAT_PRIMARY_LARGE);
3254 for (; stp; stp = ping_iter_next(&pi, &nid)) {
3255 for (j = 0; j < ncurnis; j++)
3256 if (nid_same(&nid, &curnis[j]))
3259 addnis[naddnis].ns_nid = nid;
3260 addnis[naddnis].ns_status = *stp;
3263 if (want_large_primary && nid.nid_size) {
3265 want_large_primary = false;
3269 * Check for NIDs in curnis[] not present in pbuf.
3270 * The nested loop starts at 1 to skip the loopback NID.
3272 * But never add the loopback NID to delnis[]: if it is
3273 * present in curnis[] then this peer is for this node.
3275 for (i = 0; i < ncurnis; i++) {
3276 if (nid_is_lo0(&curnis[i]))
3278 ping_iter_first(&pi, pbuf, NULL);
3279 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3280 if (nid_same(&curnis[i], &nid)) {
3282 * update the information we cache for the
3283 * peer with the latest information we
3286 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3288 old_st = lpni->lpni_ns_status;
3289 lpni->lpni_ns_status = *stp;
3290 if (old_st != lpni->lpni_ns_status)
3291 handle_disc_lpni_health(lpni);
3292 lnet_peer_ni_decref_locked(lpni);
3298 delnis[ndelnis++] = curnis[i];
3302 * If we get here and the discovery is disabled then we don't want
3303 * to add or delete any NIs. We just updated the ones we have some
3304 * information on, and call it a day
3307 if (lnet_is_discovery_disabled(lp))
3310 for (i = 0; i < naddnis; i++) {
3311 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3313 CERROR("Error adding NID %s to peer %s: %d\n",
3314 libcfs_nidstr(&addnis[i].ns_nid),
3315 libcfs_nidstr(&lp->lp_primary_nid), rc);
3319 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3321 lpni->lpni_ns_status = addnis[i].ns_status;
3322 handle_disc_lpni_health(lpni);
3323 lnet_peer_ni_decref_locked(lpni);
3327 for (i = 0; i < ndelnis; i++) {
3329 * for routers it's okay to delete the primary_nid because
3330 * the upper layers don't really rely on it. So if we're
3331 * being told that the router changed its primary_nid
3332 * then it's okay to delete it.
3334 if (lp->lp_rtr_refcount > 0)
3335 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3336 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3338 CERROR("Error deleting NID %s from peer %s: %d\n",
3339 libcfs_nidstr(&delnis[i]),
3340 libcfs_nidstr(&lp->lp_primary_nid), rc);
3346 /* The peer net for the primary NID should be the first entry in the
3347 * peer's lp_peer_nets list, and the peer NI for the primary NID should
3348 * be the first entry in its peer net's lpn_peer_nis list.
3350 find_primary(&nid, pbuf);
3351 lpni = lnet_peer_ni_find_locked(&nid);
3353 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3354 libcfs_nidstr(&nid));
3358 lpn = lpni->lpni_peer_net;
3359 if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3360 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3362 if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3363 list_move(&lpni->lpni_peer_nis,
3364 &lpni->lpni_peer_net->lpn_peer_nis);
3366 lnet_peer_ni_decref_locked(lpni);
3368 * Errors other than -ENOMEM are due to peers having been
3369 * configured with DLC. Ignore these because DLC overrides
3374 /* If this peer is a gateway, invoke the routing callback to update
3375 * the associated route status
3377 if (lp->lp_rtr_refcount > 0)
3378 lnet_router_discovery_ping_reply(lp, pbuf);
3380 CFS_FREE_PTR_ARRAY(curnis, nnis);
3381 CFS_FREE_PTR_ARRAY(addnis, nnis);
3382 CFS_FREE_PTR_ARRAY(delnis, nnis);
3383 lnet_ping_buffer_decref(pbuf);
3384 CDEBUG(D_NET, "peer %s (%p): %d\n",
3385 libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3388 spin_lock(&lp->lp_lock);
3389 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3390 lp->lp_state |= LNET_PEER_FORCE_PING;
3391 spin_unlock(&lp->lp_lock);
3397 * The data in pbuf says lp is its primary peer, but the data was
3398 * received by a different peer. Try to update lp with the data.
3401 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3403 struct lnet_handle_md mdh;
3405 /* Queue lp for discovery, and force it on the request queue. */
3406 lnet_net_lock(LNET_LOCK_EX);
3407 if (lnet_peer_queue_for_discovery(lp))
3408 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3409 lnet_net_unlock(LNET_LOCK_EX);
3411 LNetInvalidateMDHandle(&mdh);
3414 * Decide whether we can move the peer to the DATA_PRESENT state.
3416 * We replace stale data for a multi-rail peer, repair PING_FAILED
3417 * status, and preempt FORCE_PING.
3419 * If after that we have DATA_PRESENT, we merge it into this peer.
3421 spin_lock(&lp->lp_lock);
3422 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3423 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3424 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3425 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3426 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3427 lnet_ping_buffer_decref(pbuf);
3432 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3433 lnet_ping_buffer_decref(lp->lp_data);
3435 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3437 if (lp->lp_state & LNET_PEER_PING_FAILED) {
3438 mdh = lp->lp_ping_mdh;
3439 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3440 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3441 lp->lp_ping_error = 0;
3443 if (lp->lp_state & LNET_PEER_FORCE_PING)
3444 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3445 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3446 spin_unlock(&lp->lp_lock);
3448 if (!LNetMDHandleIsInvalid(mdh))
3452 return lnet_peer_merge_data(lp, pbuf);
3454 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3458 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3459 struct lnet_ping_buffer *pbuf)
3461 struct lnet_ping_iter pi;
3462 struct lnet_nid pnid;
3465 for (st = ping_iter_first(&pi, pbuf, &pnid);
3467 st = ping_iter_next(&pi, &pnid))
3468 if (nid_same(nid, &pnid))
3473 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3474 * to the discovery queue a reference was taken that will prevent the peer from
3475 * actually being freed by this function. After this function exits the
3476 * discovery thread should call lnet_peer_discovery_complete() which will
3477 * drop that reference as well as wake any waiters that may also be holding a
3480 static int lnet_peer_deletion(struct lnet_peer *lp)
3481 __must_hold(&lp->lp_lock)
3483 struct list_head rlist;
3484 struct lnet_route *route, *tmp;
3485 int sensitivity = lp->lp_health_sensitivity;
3488 INIT_LIST_HEAD(&rlist);
3490 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3491 libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3493 /* no-op if lnet_peer_del() has already been called on this peer */
3494 if (lp->lp_state & LNET_PEER_MARK_DELETED)
3495 goto clear_discovering;
3497 spin_unlock(&lp->lp_lock);
3499 mutex_lock(&the_lnet.ln_api_mutex);
3500 if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3501 the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3502 mutex_unlock(&the_lnet.ln_api_mutex);
3503 spin_lock(&lp->lp_lock);
3505 goto clear_discovering;
3508 lnet_peer_cancel_discovery(lp);
3509 lnet_net_lock(LNET_LOCK_EX);
3510 list_for_each_entry_safe(route, tmp,
3513 lnet_move_route(route, NULL, &rlist);
3515 /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3516 rc = lnet_peer_del_locked(lp);
3518 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3519 libcfs_nidstr(&lp->lp_primary_nid), rc);
3521 lnet_net_unlock(LNET_LOCK_EX);
3523 list_for_each_entry_safe(route, tmp,
3525 /* re-add these routes */
3526 lnet_add_route(route->lr_net,
3531 LIBCFS_FREE(route, sizeof(*route));
3534 mutex_unlock(&the_lnet.ln_api_mutex);
3536 spin_lock(&lp->lp_lock);
3541 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3542 LNET_PEER_FORCE_PUSH);
3548 * Update a peer using the data received.
3550 static int lnet_peer_data_present(struct lnet_peer *lp)
3551 __must_hold(&lp->lp_lock)
3553 struct lnet_ping_buffer *pbuf;
3554 struct lnet_peer_ni *lpni;
3555 struct lnet_nid nid;
3561 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3562 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3563 spin_unlock(&lp->lp_lock);
3566 * Modifications of peer structures are done while holding the
3567 * ln_api_mutex. A global lock is required because we may be
3568 * modifying multiple peer structures, and a mutex greatly
3569 * simplifies memory management.
3571 * The actual changes to the data structures must also protect
3572 * against concurrent lookups, for which the lnet_net_lock in
3573 * LNET_LOCK_EX mode is used.
3575 mutex_lock(&the_lnet.ln_api_mutex);
3576 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3582 * If this peer is not on the peer list then it is being torn
3583 * down, and our reference count may be all that is keeping it
3584 * alive. Don't do any work on it.
3586 if (list_empty(&lp->lp_peer_list)) {
3587 lnet_ping_buffer_decref(pbuf);
3591 flags = LNET_PEER_DISCOVERED;
3592 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3593 flags |= LNET_PEER_MULTI_RAIL;
3596 * Check whether the primary NID in the message matches the
3597 * primary NID of the peer. If it does, update the peer, if
3598 * it it does not, check whether there is already a peer with
3599 * that primary NID. If no such peer exists, try to update
3600 * the primary NID of the current peer (allowed if it was
3601 * created due to message traffic) and complete the update.
3602 * If the peer did exist, hand off the data to it.
3604 * The peer for the loopback interface is a special case: this
3605 * is the peer for the local node, and we want to set its
3606 * primary NID to the correct value here. Moreover, this peer
3607 * can show up with only the loopback NID in the ping buffer.
3609 if (!find_primary(&nid, pbuf)) {
3610 lnet_ping_buffer_decref(pbuf);
3613 if (nid_is_lo0(&lp->lp_primary_nid)) {
3614 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3616 lnet_ping_buffer_decref(pbuf);
3618 rc = lnet_peer_merge_data(lp, pbuf);
3620 * if the primary nid of the peer is present in the ping info returned
3621 * from the peer, but it's not the local primary peer we have
3622 * cached and discovery is disabled, then we don't want to update
3623 * our local peer info, by adding or removing NIDs, we just want
3624 * to update the status of the nids that we currently have
3625 * recorded in that peer.
3627 } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3628 (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3629 lnet_is_discovery_disabled(lp))) {
3630 rc = lnet_peer_merge_data(lp, pbuf);
3632 lpni = lnet_peer_ni_find_locked(&nid);
3633 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3634 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3636 CERROR("Primary NID error %s versus %s: %d\n",
3637 libcfs_nidstr(&lp->lp_primary_nid),
3638 libcfs_nidstr(&nid), rc);
3639 lnet_ping_buffer_decref(pbuf);
3641 rc = lnet_peer_merge_data(lp, pbuf);
3644 lnet_peer_ni_decref_locked(lpni);
3646 struct lnet_peer *new_lp;
3647 new_lp = lpni->lpni_peer_net->lpn_peer;
3649 * if lp has discovery/MR enabled that means new_lp
3650 * should have discovery/MR enabled as well, since
3651 * it's the same peer, which we're about to merge
3653 spin_lock(&lp->lp_lock);
3654 spin_lock(&new_lp->lp_lock);
3655 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3656 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3657 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3658 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3659 /* If we're processing a ping reply then we may be
3660 * about to send a push to the peer that we ping'd.
3661 * Since the ping reply that we're processing was
3662 * received by lp, we need to set the discovery source
3663 * NID for new_lp to the NID stored in lp.
3665 if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3666 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3667 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3669 spin_unlock(&new_lp->lp_lock);
3670 spin_unlock(&lp->lp_lock);
3672 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3673 lnet_consolidate_routes_locked(lp, new_lp);
3674 lnet_peer_ni_decref_locked(lpni);
3678 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3679 libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3681 mutex_unlock(&the_lnet.ln_api_mutex);
3683 spin_lock(&lp->lp_lock);
3684 /* Tell discovery to re-check the peer immediately. */
3686 rc = LNET_REDISCOVER_PEER;
3691 * A ping failed. Clear the PING_FAILED state and set the
3692 * FORCE_PING state, to ensure a retry even if discovery is
3693 * disabled. This avoids being left with incorrect state.
3695 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3696 __must_hold(&lp->lp_lock)
3698 struct lnet_handle_md mdh;
3701 mdh = lp->lp_ping_mdh;
3702 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3703 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3704 lp->lp_state |= LNET_PEER_FORCE_PING;
3705 rc = lp->lp_ping_error;
3706 lp->lp_ping_error = 0;
3707 spin_unlock(&lp->lp_lock);
3709 if (!LNetMDHandleIsInvalid(mdh))
3712 CDEBUG(D_NET, "peer %s:%d\n",
3713 libcfs_nidstr(&lp->lp_primary_nid), rc);
3715 spin_lock(&lp->lp_lock);
3716 return rc ? rc : LNET_REDISCOVER_PEER;
3719 /* Active side of ping. */
3720 static int lnet_peer_send_ping(struct lnet_peer *lp)
3721 __must_hold(&lp->lp_lock)
3727 lp->lp_state |= LNET_PEER_PING_SENT;
3728 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3729 spin_unlock(&lp->lp_lock);
3731 cpt = lnet_net_lock_current();
3732 /* Refcount for MD. */
3733 lnet_peer_addref_locked(lp);
3734 lnet_net_unlock(cpt);
3736 bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3738 rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3739 the_lnet.ln_dc_handler, false);
3740 /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3741 * refcount on the peer, otherwise LNetMDUnlink will be called
3742 * which will eventually do that.
3746 lnet_peer_decref_locked(lp);
3747 lnet_net_unlock(cpt);
3748 rc = -rc; /* change the rc to negative value */
3750 } else if (rc < 0) {
3754 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3756 spin_lock(&lp->lp_lock);
3760 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3762 * The errors that get us here are considered hard errors and
3763 * cause Discovery to terminate. So we clear PING_SENT, but do
3764 * not set either PING_FAILED or FORCE_PING. In fact we need
3765 * to clear PING_FAILED, because the unlink event handler will
3766 * have set it if we called LNetMDUnlink() above.
3768 spin_lock(&lp->lp_lock);
3769 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3774 * This function exists because you cannot call LNetMDUnlink() from an
3777 static int lnet_peer_push_failed(struct lnet_peer *lp)
3778 __must_hold(&lp->lp_lock)
3780 struct lnet_handle_md mdh;
3783 mdh = lp->lp_push_mdh;
3784 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3785 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3786 rc = lp->lp_push_error;
3787 lp->lp_push_error = 0;
3788 spin_unlock(&lp->lp_lock);
3790 if (!LNetMDHandleIsInvalid(mdh))
3793 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3794 spin_lock(&lp->lp_lock);
3795 return rc ? rc : LNET_REDISCOVER_PEER;
3799 * Mark the peer as discovered.
3801 static int lnet_peer_discovered(struct lnet_peer *lp)
3802 __must_hold(&lp->lp_lock)
3804 lp->lp_state |= LNET_PEER_DISCOVERED;
3805 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3806 LNET_PEER_REDISCOVER);
3808 lp->lp_dc_error = 0;
3810 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3815 /* Active side of push. */
3816 static int lnet_peer_send_push(struct lnet_peer *lp)
3817 __must_hold(&lp->lp_lock)
3819 struct lnet_ping_buffer *pbuf;
3820 struct lnet_processid id;
3825 /* Don't push to a non-multi-rail peer. */
3826 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3827 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3828 /* if peer's NIDs are uptodate then peer is discovered */
3829 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3830 rc = lnet_peer_discovered(lp);
3837 lp->lp_state |= LNET_PEER_PUSH_SENT;
3838 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3839 spin_unlock(&lp->lp_lock);
3841 cpt = lnet_net_lock_current();
3842 pbuf = the_lnet.ln_ping_target;
3843 lnet_ping_buffer_addref(pbuf);
3844 lnet_net_unlock(cpt);
3846 /* Push source MD */
3847 md.start = &pbuf->pb_info;
3848 md.length = pbuf->pb_nbytes;
3849 md.threshold = 2; /* Put/Ack */
3851 md.options = LNET_MD_TRACK_RESPONSE;
3852 md.handler = the_lnet.ln_dc_handler;
3855 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3857 lnet_ping_buffer_decref(pbuf);
3858 CERROR("Can't bind push source MD: %d\n", rc);
3862 cpt = lnet_net_lock_current();
3863 /* Refcount for MD. */
3864 lnet_peer_addref_locked(lp);
3865 id.pid = LNET_PID_LUSTRE;
3866 if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3867 id.nid = lp->lp_disc_dst_nid;
3869 id.nid = lp->lp_primary_nid;
3870 lnet_net_unlock(cpt);
3872 rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3873 LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3874 LNET_PROTO_PING_MATCHBITS, 0, 0);
3877 * reset the discovery nid. There is no need to restrict sending
3878 * from that source, if we call lnet_push_update_to_peers(). It'll
3879 * get set to a specific NID, if we initiate discovery from the
3882 lp->lp_disc_src_nid = LNET_ANY_NID;
3883 lp->lp_disc_dst_nid = LNET_ANY_NID;
3888 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3890 spin_lock(&lp->lp_lock);
3894 LNetMDUnlink(lp->lp_push_mdh);
3895 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3897 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3900 * The errors that get us here are considered hard errors and
3901 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3902 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3903 * because the unlink event handler will have set it if we
3904 * called LNetMDUnlink() above.
3906 spin_lock(&lp->lp_lock);
3907 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3912 * Wait for work to be queued or some other change that must be
3913 * attended to. Returns non-zero if the discovery thread should shut
3916 static int lnet_peer_discovery_wait_for_work(void)
3923 cpt = lnet_net_lock_current();
3925 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3926 TASK_INTERRUPTIBLE);
3927 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3929 if (lnet_push_target_resize_needed() ||
3930 the_lnet.ln_push_target->pb_needs_post)
3932 if (!list_empty(&the_lnet.ln_dc_request))
3934 if (!list_empty(&the_lnet.ln_msg_resend))
3936 lnet_net_unlock(cpt);
3939 * wakeup max every second to check if there are peers that
3940 * have been stuck on the working queue for greater than
3943 schedule_timeout(cfs_time_seconds(1));
3944 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3945 cpt = lnet_net_lock_current();
3947 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3949 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3952 lnet_net_unlock(cpt);
3954 CDEBUG(D_NET, "woken: %d\n", rc);
3960 * Messages that were pending on a destroyed peer will be put on a global
3961 * resend list. The message resend list will be checked by
3962 * the discovery thread when it wakes up, and will resend messages. These
3963 * messages can still be sendable in the case the lpni which was the initial
3964 * cause of the message re-queue was transfered to another peer.
3966 * It is possible that LNet could be shutdown while we're iterating
3967 * through the list. lnet_shudown_lndnets() will attempt to access the
3968 * resend list, but will have to wait until the spinlock is released, by
3969 * which time there shouldn't be any more messages on the resend list.
3970 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3971 * for the messages so they can be released. The other case is that
3972 * lnet_shudown_lndnets() can finalize all the messages before this
3973 * function can visit the resend list, in which case this function will be
3976 static void lnet_resend_msgs(void)
3978 struct lnet_msg *msg, *tmp;
3982 spin_lock(&the_lnet.ln_msg_resend_lock);
3983 list_splice(&the_lnet.ln_msg_resend, &resend);
3984 spin_unlock(&the_lnet.ln_msg_resend_lock);
3986 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3987 list_del_init(&msg->msg_list);
3988 rc = lnet_send(&msg->msg_src_nid_param, msg,
3989 &msg->msg_rtr_nid_param);
3991 CNETERR("Error sending %s to %s: %d\n",
3992 lnet_msgtyp2str(msg->msg_type),
3993 libcfs_idstr(&msg->msg_target), rc);
3994 lnet_finalize(msg, rc);
3999 /* The discovery thread. */
4000 static int lnet_peer_discovery(void *arg)
4002 struct lnet_peer *lp;
4005 wait_for_completion(&the_lnet.ln_started);
4007 CDEBUG(D_NET, "started\n");
4010 if (lnet_peer_discovery_wait_for_work())
4013 if (lnet_push_target_resize_needed())
4014 lnet_push_target_resize();
4015 else if (the_lnet.ln_push_target->pb_needs_post)
4016 lnet_push_target_post(the_lnet.ln_push_target,
4017 &the_lnet.ln_push_target_md);
4021 lnet_net_lock(LNET_LOCK_EX);
4022 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4023 lnet_net_unlock(LNET_LOCK_EX);
4028 * Process all incoming discovery work requests. When
4029 * discovery must wait on a peer to change state, it
4030 * is added to the tail of the ln_dc_working queue. A
4031 * timestamp keeps track of when the peer was added,
4032 * so we can time out discovery requests that take too
4035 while (!list_empty(&the_lnet.ln_dc_request)) {
4036 lp = list_first_entry(&the_lnet.ln_dc_request,
4037 struct lnet_peer, lp_dc_list);
4038 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4040 * set the time the peer was put on the dc_working
4041 * queue. It shouldn't remain on the queue
4042 * forever, in case the GET message (for ping)
4043 * doesn't get a REPLY or the PUT message (for
4044 * push) doesn't get an ACK.
4046 lp->lp_last_queued = ktime_get_real_seconds();
4047 lnet_net_unlock(LNET_LOCK_EX);
4049 if (lnet_push_target_resize_needed())
4050 lnet_push_target_resize();
4051 else if (the_lnet.ln_push_target->pb_needs_post)
4052 lnet_push_target_post(the_lnet.ln_push_target,
4053 &the_lnet.ln_push_target_md);
4056 * Select an action depending on the state of
4057 * the peer and whether discovery is disabled.
4058 * The check whether discovery is disabled is
4059 * done after the code that handles processing
4060 * for arrived data, cleanup for failures, and
4061 * forcing a Ping or Push.
4063 spin_lock(&lp->lp_lock);
4064 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4065 libcfs_nidstr(&lp->lp_primary_nid), lp,
4067 if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4068 LNET_PEER_MARK_DELETED))
4069 rc = lnet_peer_deletion(lp);
4070 else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4071 rc = lnet_peer_data_present(lp);
4072 else if (lp->lp_state & LNET_PEER_PING_FAILED)
4073 rc = lnet_peer_ping_failed(lp);
4074 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4075 rc = lnet_peer_push_failed(lp);
4076 else if (lp->lp_state & LNET_PEER_FORCE_PING)
4077 rc = lnet_peer_send_ping(lp);
4078 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4079 rc = lnet_peer_send_push(lp);
4080 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4081 rc = lnet_peer_send_ping(lp);
4082 else if (lnet_peer_needs_push(lp))
4083 rc = lnet_peer_send_push(lp);
4085 rc = lnet_peer_discovered(lp);
4086 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4087 libcfs_nidstr(&lp->lp_primary_nid), lp,
4090 if (rc == LNET_REDISCOVER_PEER) {
4091 spin_unlock(&lp->lp_lock);
4092 lnet_net_lock(LNET_LOCK_EX);
4093 list_move(&lp->lp_dc_list,
4094 &the_lnet.ln_dc_request);
4096 !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4097 spin_unlock(&lp->lp_lock);
4098 lnet_net_lock(LNET_LOCK_EX);
4099 lnet_peer_discovery_complete(lp, rc);
4101 spin_unlock(&lp->lp_lock);
4102 lnet_net_lock(LNET_LOCK_EX);
4105 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4110 lnet_net_unlock(LNET_LOCK_EX);
4113 CDEBUG(D_NET, "stopping\n");
4115 * Clean up before telling lnet_peer_discovery_stop() that
4116 * we're done. Use wake_up() below to somewhat reduce the
4117 * size of the thundering herd if there are multiple threads
4118 * waiting on discovery of a single peer.
4121 /* Queue cleanup 1: stop all pending pings and pushes. */
4122 lnet_net_lock(LNET_LOCK_EX);
4123 while (!list_empty(&the_lnet.ln_dc_working)) {
4124 lp = list_first_entry(&the_lnet.ln_dc_working,
4125 struct lnet_peer, lp_dc_list);
4126 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4127 lnet_net_unlock(LNET_LOCK_EX);
4128 lnet_peer_cancel_discovery(lp);
4129 lnet_net_lock(LNET_LOCK_EX);
4131 lnet_net_unlock(LNET_LOCK_EX);
4133 /* Queue cleanup 2: wait for the expired queue to clear. */
4134 while (!list_empty(&the_lnet.ln_dc_expired))
4135 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4137 /* Queue cleanup 3: clear the request queue. */
4138 lnet_net_lock(LNET_LOCK_EX);
4139 while (!list_empty(&the_lnet.ln_dc_request)) {
4140 lp = list_first_entry(&the_lnet.ln_dc_request,
4141 struct lnet_peer, lp_dc_list);
4142 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4144 lnet_net_unlock(LNET_LOCK_EX);
4146 lnet_assert_handler_unused(the_lnet.ln_dc_handler);
4147 the_lnet.ln_dc_handler = NULL;
4149 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4150 wake_up(&the_lnet.ln_dc_waitq);
4152 CDEBUG(D_NET, "stopped\n");
4157 /* ln_api_mutex is held on entry. */
4158 int lnet_peer_discovery_start(void)
4160 struct task_struct *task;
4163 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4166 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4167 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4168 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4171 CERROR("Can't start peer discovery thread: %d\n", rc);
4173 the_lnet.ln_dc_handler = NULL;
4175 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4178 CDEBUG(D_NET, "discovery start: %d\n", rc);
4183 /* ln_api_mutex is held on entry. */
4184 void lnet_peer_discovery_stop(void)
4186 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4189 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4190 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4192 /* In the LNetNIInit() path we may be stopping discovery before it
4193 * entered its work loop
4195 if (!completion_done(&the_lnet.ln_started))
4196 complete(&the_lnet.ln_started);
4198 wake_up(&the_lnet.ln_dc_waitq);
4200 mutex_unlock(&the_lnet.ln_api_mutex);
4201 wait_event(the_lnet.ln_dc_waitq,
4202 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4203 mutex_lock(&the_lnet.ln_api_mutex);
4205 LASSERT(list_empty(&the_lnet.ln_dc_request));
4206 LASSERT(list_empty(&the_lnet.ln_dc_working));
4207 LASSERT(list_empty(&the_lnet.ln_dc_expired));
4209 CDEBUG(D_NET, "discovery stopped\n");
4215 lnet_debug_peer(struct lnet_nid *nid)
4217 char *aliveness = "NA";
4218 struct lnet_peer_ni *lp;
4221 cpt = lnet_nid2cpt(nid, NULL);
4224 lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4226 lnet_net_unlock(cpt);
4227 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4231 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4232 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4234 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4235 libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4236 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4237 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4238 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4240 lnet_peer_ni_decref_locked(lp);
4242 lnet_net_unlock(cpt);
4245 /* Gathering information for userspace. */
4247 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4248 char aliveness[LNET_MAX_STR_LEN],
4249 __u32 *cpt_iter, __u32 *refcount,
4250 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4251 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4252 __u32 *peer_tx_qnob)
4254 struct lnet_peer_table *peer_table;
4255 struct lnet_peer_ni *lp;
4260 /* get the number of CPTs */
4261 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4263 /* if the cpt number to be examined is >= the number of cpts in
4264 * the system then indicate that there are no more cpts to examin
4266 if (*cpt_iter >= lncpt)
4269 /* get the current table */
4270 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4271 /* if the ptable is NULL then there are no more cpts to examine */
4272 if (peer_table == NULL)
4275 lnet_net_lock(*cpt_iter);
4277 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4278 struct list_head *peers = &peer_table->pt_hash[j];
4280 list_for_each_entry(lp, peers, lpni_hashlist) {
4281 if (!nid_is_nid4(&lp->lpni_nid))
4283 if (peer_index-- > 0)
4286 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4287 if (lnet_isrouter(lp) ||
4288 lnet_peer_aliveness_enabled(lp))
4289 snprintf(aliveness, LNET_MAX_STR_LEN,
4290 lnet_is_peer_ni_alive(lp) ? "up" : "down");
4292 *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4293 *refcount = kref_read(&lp->lpni_kref);
4294 *ni_peer_tx_credits =
4295 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4296 *peer_tx_credits = lp->lpni_txcredits;
4297 *peer_rtr_credits = lp->lpni_rtrcredits;
4298 *peer_min_rtr_credits = lp->lpni_mintxcredits;
4299 *peer_tx_qnob = lp->lpni_txqnob;
4305 lnet_net_unlock(*cpt_iter);
4309 return found ? 0 : -ENOENT;
4312 /* ln_api_mutex is held, which keeps the peer list stable */
4313 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4315 struct lnet_ioctl_element_stats *lpni_stats;
4316 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4317 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4318 struct lnet_peer_ni_credit_info *lpni_info;
4319 struct lnet_peer_ni *lpni;
4320 struct lnet_peer *lp;
4322 struct lnet_nid nid;
4326 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4327 lp = lnet_find_peer(&nid);
4333 size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4334 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4335 size *= lp->lp_nnis;
4336 if (size > cfg->prcfg_size) {
4337 cfg->prcfg_size = size;
4342 cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4343 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4344 cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4345 cfg->prcfg_count = lp->lp_nnis;
4346 cfg->prcfg_size = size;
4347 cfg->prcfg_state = lp->lp_state;
4349 /* Allocate helper buffers. */
4351 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4354 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4357 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4358 if (!lpni_msg_stats)
4359 goto out_free_stats;
4360 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4362 goto out_free_msg_stats;
4367 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4368 if (!nid_is_nid4(&lpni->lpni_nid))
4370 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4371 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4372 goto out_free_hstats;
4373 bulk += sizeof(nid4);
4375 memset(lpni_info, 0, sizeof(*lpni_info));
4376 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4377 if (lnet_isrouter(lpni) ||
4378 lnet_peer_aliveness_enabled(lpni))
4379 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4380 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4382 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4383 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4384 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4385 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4386 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4387 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4388 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4389 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4390 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4391 goto out_free_hstats;
4392 bulk += sizeof(*lpni_info);
4394 memset(lpni_stats, 0, sizeof(*lpni_stats));
4395 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4396 LNET_STATS_TYPE_SEND);
4397 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4398 LNET_STATS_TYPE_RECV);
4399 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4400 LNET_STATS_TYPE_DROP);
4401 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4402 goto out_free_hstats;
4403 bulk += sizeof(*lpni_stats);
4404 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4405 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4406 goto out_free_hstats;
4407 bulk += sizeof(*lpni_msg_stats);
4408 lpni_hstats->hlpni_network_timeout =
4409 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4410 lpni_hstats->hlpni_remote_dropped =
4411 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4412 lpni_hstats->hlpni_remote_timeout =
4413 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4414 lpni_hstats->hlpni_remote_error =
4415 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4416 lpni_hstats->hlpni_health_value =
4417 atomic_read(&lpni->lpni_healthv);
4418 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4419 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4420 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4421 goto out_free_hstats;
4422 bulk += sizeof(*lpni_hstats);
4427 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4429 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4431 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4433 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4435 lnet_peer_decref_locked(lp);
4440 /* must hold net_lock/0 */
4442 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4443 struct list_head *recovery_queue,
4446 /* the mt could've shutdown and cleaned up the queues */
4447 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4450 if (!list_empty(&lpni->lpni_recovery))
4453 if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4456 if (!lpni->lpni_last_alive) {
4458 "lpni %s(%p) not eligible for recovery last alive %lld\n",
4459 libcfs_nidstr(&lpni->lpni_nid), lpni,
4460 lpni->lpni_last_alive);
4464 if (lnet_recovery_limit &&
4465 now > lpni->lpni_last_alive + lnet_recovery_limit) {
4466 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4467 libcfs_nidstr(&lpni->lpni_nid),
4468 lpni->lpni_last_alive);
4469 /* Reset the ping count so that if this peer NI is added back to
4470 * the recovery queue we will send the first ping right away.
4472 lpni->lpni_ping_count = 0;
4476 /* This peer NI is going on the recovery queue, so take a ref on it */
4477 lnet_peer_ni_addref_locked(lpni);
4479 lnet_peer_ni_set_next_ping(lpni, now);
4481 CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4482 libcfs_nidstr(&lpni->lpni_nid),
4483 lpni->lpni_ping_count,
4484 lpni->lpni_next_ping,
4485 lpni->lpni_last_alive,
4486 atomic_read(&lpni->lpni_healthv));
4488 list_add_tail(&lpni->lpni_recovery, recovery_queue);
4491 /* Call with the ln_api_mutex held */
4493 lnet_peer_ni_set_healthv(struct lnet_nid *nid, int value, bool all)
4495 struct lnet_peer_table *ptable;
4496 struct lnet_peer *lp;
4497 struct lnet_peer_net *lpn;
4498 struct lnet_peer_ni *lpni;
4503 if (the_lnet.ln_state != LNET_STATE_RUNNING)
4506 now = ktime_get_seconds();
4509 lnet_net_lock(LNET_LOCK_EX);
4510 lpni = lnet_peer_ni_find_locked(nid);
4512 lnet_net_unlock(LNET_LOCK_EX);
4515 lnet_set_lpni_healthv_locked(lpni, value);
4516 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4517 &the_lnet.ln_mt_peerNIRecovq, now);
4518 lnet_peer_ni_decref_locked(lpni);
4519 lnet_net_unlock(LNET_LOCK_EX);
4523 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4526 * Walk all the peers and reset the health value for each one to the
4529 lnet_net_lock(LNET_LOCK_EX);
4530 for (cpt = 0; cpt < lncpt; cpt++) {
4531 ptable = the_lnet.ln_peer_tables[cpt];
4532 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4533 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4534 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4536 lnet_set_lpni_healthv_locked(lpni,
4538 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4539 &the_lnet.ln_mt_peerNIRecovq, now);
4544 lnet_net_unlock(LNET_LOCK_EX);