1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Use is subject to license terms.
6 * Copyright (c) 2012, 2017, Intel Corporation.
9 /* This file is part of Lustre, http://www.lustre.org/ */
11 #define DEBUG_SUBSYSTEM S_LNET
13 #include <linux/sched.h>
14 #ifdef HAVE_SCHED_HEADERS
15 #include <linux/sched/signal.h>
17 #include <linux/uaccess.h>
19 #include <lnet/udsp.h>
20 #include <lnet/lib-lnet.h>
21 #include <uapi/linux/lnet/lnet-dlc.h>
23 /* Value indicating that recovery needs to re-check a peer immediately. */
24 #define LNET_REDISCOVER_PEER (1)
26 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
27 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
31 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
33 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
34 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
35 lnet_peer_ni_decref_locked(lpni);
40 lnet_peer_net_added(struct lnet_net *net)
42 struct lnet_peer_ni *lpni, *tmp;
44 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
45 lpni_on_remote_peer_ni_list) {
47 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
50 spin_lock(&lpni->lpni_lock);
51 lpni->lpni_txcredits =
52 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
53 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
54 lpni->lpni_rtrcredits =
55 lnet_peer_buffer_credits(lpni->lpni_net);
56 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
57 spin_unlock(&lpni->lpni_lock);
59 lnet_peer_remove_from_remote_list(lpni);
65 lnet_peer_tables_destroy(void)
67 struct lnet_peer_table *ptable;
68 struct list_head *hash;
72 if (!the_lnet.ln_peer_tables)
75 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
76 hash = ptable->pt_hash;
77 if (!hash) /* not intialized */
80 LASSERT(list_empty(&ptable->pt_zombie_list));
82 ptable->pt_hash = NULL;
83 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
84 LASSERT(list_empty(&hash[j]));
86 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
89 cfs_percpt_free(the_lnet.ln_peer_tables);
90 the_lnet.ln_peer_tables = NULL;
94 lnet_peer_tables_create(void)
96 struct lnet_peer_table *ptable;
97 struct list_head *hash;
101 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
103 if (the_lnet.ln_peer_tables == NULL) {
104 CERROR("Failed to allocate cpu-partition peer tables\n");
108 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
109 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
110 LNET_PEER_HASH_SIZE * sizeof(*hash));
112 CERROR("Failed to create peer hash table\n");
113 lnet_peer_tables_destroy();
117 spin_lock_init(&ptable->pt_zombie_lock);
118 INIT_LIST_HEAD(&ptable->pt_zombie_list);
120 INIT_LIST_HEAD(&ptable->pt_peer_list);
122 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
123 INIT_LIST_HEAD(&hash[j]);
124 ptable->pt_hash = hash; /* sign of initialization */
130 static struct lnet_peer_ni *
131 lnet_peer_ni_alloc(struct lnet_nid *nid)
133 struct lnet_peer_ni *lpni;
134 struct lnet_net *net;
137 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
139 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
143 INIT_LIST_HEAD(&lpni->lpni_txq);
144 INIT_LIST_HEAD(&lpni->lpni_hashlist);
145 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
146 INIT_LIST_HEAD(&lpni->lpni_recovery);
147 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
148 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
149 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
150 kref_init(&lpni->lpni_kref);
151 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
153 spin_lock_init(&lpni->lpni_lock);
155 if (lnet_peers_start_down())
156 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
158 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
159 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
160 lpni->lpni_nid = *nid;
161 lpni->lpni_cpt = cpt;
162 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
164 net = lnet_get_net_locked(LNET_NID_NET(nid));
165 lpni->lpni_net = net;
167 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
168 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
169 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
170 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
173 * This peer_ni is not on a local network, so we
174 * cannot add the credits here. In case the net is
175 * added later, add the peer_ni to the remote peer ni
176 * list so it can be easily found and revisited.
178 /* FIXME: per-net implementation instead? */
179 lnet_peer_ni_addref_locked(lpni);
180 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
181 &the_lnet.ln_remote_peer_ni_list);
184 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
189 static struct lnet_peer_net *
190 lnet_peer_net_alloc(__u32 net_id)
192 struct lnet_peer_net *lpn;
194 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
198 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
199 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
200 lpn->lpn_net_id = net_id;
201 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
203 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
209 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
211 struct lnet_peer *lp;
213 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
215 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
216 LASSERT(list_empty(&lpn->lpn_peer_nis));
217 LASSERT(list_empty(&lpn->lpn_peer_nets));
219 lpn->lpn_peer = NULL;
220 LIBCFS_FREE(lpn, sizeof(*lpn));
222 lnet_peer_decref_locked(lp);
225 static struct lnet_peer *
226 lnet_peer_alloc(struct lnet_nid *nid)
228 struct lnet_peer *lp;
230 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
234 INIT_LIST_HEAD(&lp->lp_rtrq);
235 INIT_LIST_HEAD(&lp->lp_routes);
236 INIT_LIST_HEAD(&lp->lp_peer_list);
237 INIT_LIST_HEAD(&lp->lp_peer_nets);
238 INIT_LIST_HEAD(&lp->lp_dc_list);
239 INIT_LIST_HEAD(&lp->lp_dc_pendq);
240 INIT_LIST_HEAD(&lp->lp_rtr_list);
241 init_waitqueue_head(&lp->lp_dc_waitq);
242 spin_lock_init(&lp->lp_lock);
243 lp->lp_primary_nid = *nid;
244 lp->lp_disc_src_nid = LNET_ANY_NID;
245 lp->lp_disc_dst_nid = LNET_ANY_NID;
246 if (lnet_peers_start_down())
247 lp->lp_alive = false;
252 * all peers created on a router should have health on
253 * if it's not already on.
255 if (the_lnet.ln_routing && !lnet_health_sensitivity)
256 lp->lp_health_sensitivity = 1;
259 * Turn off discovery for loopback peer. If you're creating a peer
260 * for the loopback interface then that was initiated when we
261 * attempted to send a message over the loopback. There is no need
262 * to ever use a different interface when sending messages to
266 lp->lp_state = LNET_PEER_NO_DISCOVERY;
267 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
269 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
275 lnet_destroy_peer_locked(struct lnet_peer *lp)
277 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
279 LASSERT(atomic_read(&lp->lp_refcount) == 0);
280 LASSERT(lp->lp_rtr_refcount == 0);
281 LASSERT(list_empty(&lp->lp_peer_nets));
282 LASSERT(list_empty(&lp->lp_peer_list));
283 LASSERT(list_empty(&lp->lp_dc_list));
286 lnet_ping_buffer_decref(lp->lp_data);
289 * if there are messages still on the pending queue, then make
290 * sure to queue them on the ln_msg_resend list so they can be
291 * resent at a later point if the discovery thread is still
293 * If the discovery thread has stopped, then the wakeup will be a
294 * no-op, and it is expected the lnet_shutdown_lndnets() will
295 * eventually be called, which will traverse this list and
296 * finalize the messages on the list.
297 * We can not resend them now because we're holding the cpt lock.
298 * Releasing the lock can cause an inconsistent state
300 spin_lock(&the_lnet.ln_msg_resend_lock);
301 spin_lock(&lp->lp_lock);
302 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
303 spin_unlock(&lp->lp_lock);
304 spin_unlock(&the_lnet.ln_msg_resend_lock);
305 wake_up(&the_lnet.ln_dc_waitq);
307 LIBCFS_FREE(lp, sizeof(*lp));
311 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
312 * that peer_net, detach the peer_net from the peer.
314 * Call with lnet_net_lock/EX held
317 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
319 struct lnet_peer_table *ptable;
320 struct lnet_peer_net *lpn;
321 struct lnet_peer *lp;
324 * Belts and suspenders: gracefully handle teardown of a
325 * partially connected peer_ni.
327 lpn = lpni->lpni_peer_net;
329 list_del_init(&lpni->lpni_peer_nis);
331 * If there are no lpni's left, we detach lpn from
332 * lp_peer_nets, so it cannot be found anymore.
334 if (list_empty(&lpn->lpn_peer_nis))
335 list_del_init(&lpn->lpn_peer_nets);
337 /* Update peer NID count. */
342 * If there are no more peer nets, make the peer unfindable
343 * via the peer_tables.
345 * Otherwise, if the peer is DISCOVERED, tell discovery to
346 * take another look at it. This is a no-op if discovery for
347 * this peer did the detaching.
349 if (list_empty(&lp->lp_peer_nets)) {
350 list_del_init(&lp->lp_peer_list);
351 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
353 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
354 /* Discovery isn't running, nothing to do here. */
355 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
356 lnet_peer_queue_for_discovery(lp);
357 wake_up(&the_lnet.ln_dc_waitq);
359 CDEBUG(D_NET, "peer %s NID %s\n",
360 libcfs_nidstr(&lp->lp_primary_nid),
361 libcfs_nidstr(&lpni->lpni_nid));
364 /* called with lnet_net_lock LNET_LOCK_EX held */
366 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
368 struct lnet_peer_table *ptable = NULL;
370 /* don't remove a peer_ni if it's also a gateway */
371 if (lnet_isrouter(lpni) && !force) {
372 CERROR("Peer NI %s is a gateway. Can not delete it\n",
373 libcfs_nidstr(&lpni->lpni_nid));
377 lnet_peer_remove_from_remote_list(lpni);
379 /* remove peer ni from the hash list. */
380 list_del_init(&lpni->lpni_hashlist);
383 * indicate the peer is being deleted so the monitor thread can
384 * remove it from the recovery queue.
386 spin_lock(&lpni->lpni_lock);
387 lpni->lpni_state |= LNET_PEER_NI_DELETING;
388 spin_unlock(&lpni->lpni_lock);
390 /* decrement the ref count on the peer table */
391 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
394 * The peer_ni can no longer be found with a lookup. But there
395 * can be current users, so keep track of it on the zombie
396 * list until the reference count has gone to zero.
398 * The last reference may be lost in a place where the
399 * lnet_net_lock locks only a single cpt, and that cpt may not
400 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
403 spin_lock(&ptable->pt_zombie_lock);
404 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
405 ptable->pt_zombies++;
406 spin_unlock(&ptable->pt_zombie_lock);
408 /* no need to keep this peer_ni on the hierarchy anymore */
409 lnet_peer_detach_peer_ni_locked(lpni);
411 /* remove hashlist reference on peer_ni */
412 lnet_peer_ni_decref_locked(lpni);
417 void lnet_peer_uninit(void)
419 struct lnet_peer_ni *lpni, *tmp;
421 lnet_net_lock(LNET_LOCK_EX);
423 /* remove all peer_nis from the remote peer and the hash list */
424 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
425 lpni_on_remote_peer_ni_list)
426 lnet_peer_ni_del_locked(lpni, false);
428 lnet_peer_tables_destroy();
430 lnet_net_unlock(LNET_LOCK_EX);
434 lnet_peer_del_locked(struct lnet_peer *peer)
436 struct lnet_peer_ni *lpni = NULL, *lpni2;
439 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
441 spin_lock(&peer->lp_lock);
442 peer->lp_state |= LNET_PEER_MARK_DELETED;
443 spin_unlock(&peer->lp_lock);
445 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
446 while (lpni != NULL) {
447 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
448 rc = lnet_peer_ni_del_locked(lpni, false);
458 * Discovering this peer is taking too long. Cancel any Ping or Push
459 * that discovery is waiting on by unlinking the relevant MDs. The
460 * lnet_discovery_event_handler() will proceed from here and complete
463 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
465 struct lnet_handle_md ping_mdh;
466 struct lnet_handle_md push_mdh;
468 LNetInvalidateMDHandle(&ping_mdh);
469 LNetInvalidateMDHandle(&push_mdh);
471 spin_lock(&lp->lp_lock);
472 if (lp->lp_state & LNET_PEER_PING_SENT) {
473 ping_mdh = lp->lp_ping_mdh;
474 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
476 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
477 push_mdh = lp->lp_push_mdh;
478 LNetInvalidateMDHandle(&lp->lp_push_mdh);
480 spin_unlock(&lp->lp_lock);
482 if (!LNetMDHandleIsInvalid(ping_mdh))
483 LNetMDUnlink(ping_mdh);
484 if (!LNetMDHandleIsInvalid(push_mdh))
485 LNetMDUnlink(push_mdh);
489 lnet_peer_del(struct lnet_peer *peer)
493 lnet_peer_cancel_discovery(peer);
494 lnet_net_lock(LNET_LOCK_EX);
495 rc = lnet_peer_del_locked(peer);
496 lnet_net_unlock(LNET_LOCK_EX);
502 * Delete a NID from a peer. Call with ln_api_mutex held.
505 * -EPERM: Non-DLC deletion from DLC-configured peer.
506 * -ENOENT: No lnet_peer_ni corresponding to the nid.
507 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
508 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
511 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
514 struct lnet_peer_ni *lpni;
515 struct lnet_nid primary_nid = lp->lp_primary_nid;
517 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
519 if (!(flags & LNET_PEER_CONFIGURED)) {
520 if (lp->lp_state & LNET_PEER_CONFIGURED) {
526 /* If we're asked to lock down the primary NID we shouldn't be
529 if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
530 nid_same(&primary_nid, nid)) {
535 lpni = lnet_peer_ni_find_locked(nid);
540 if (lp != lpni->lpni_peer_net->lpn_peer) {
542 lnet_peer_ni_decref_locked(lpni);
547 * This function only allows deletion of the primary NID if it
550 if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
552 lnet_peer_ni_decref_locked(lpni);
556 lnet_net_lock(LNET_LOCK_EX);
558 if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
559 struct lnet_peer_ni *lpni2;
560 /* assign the next peer_ni to be the primary */
561 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
563 lp->lp_primary_nid = lpni2->lpni_nid;
565 rc = lnet_peer_ni_del_locked(lpni, force);
566 lnet_peer_ni_decref_locked(lpni);
568 lnet_net_unlock(LNET_LOCK_EX);
571 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
572 libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
579 lnet_peer_table_cleanup_locked(struct lnet_net *net,
580 struct lnet_peer_table *ptable)
583 struct lnet_peer_ni *next;
584 struct lnet_peer_ni *lpni;
585 struct lnet_peer *peer;
587 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
588 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
590 if (net != NULL && net != lpni->lpni_net)
593 peer = lpni->lpni_peer_net->lpn_peer;
594 if (!nid_same(&peer->lp_primary_nid,
596 lnet_peer_ni_del_locked(lpni, false);
600 * Removing the primary NID implies removing
601 * the entire peer. Advance next beyond any
602 * peer_ni that belongs to the same peer.
604 list_for_each_entry_from(next, &ptable->pt_hash[i],
606 if (next->lpni_peer_net->lpn_peer != peer)
609 lnet_peer_del_locked(peer);
615 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
617 wait_var_event_warning(&ptable->pt_zombies,
618 ptable->pt_zombies == 0,
619 "Waiting for %d zombies on peer table\n",
624 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
625 struct lnet_peer_table *ptable)
627 struct lnet_peer_ni *lp;
628 struct lnet_peer_ni *tmp;
629 struct lnet_nid gw_nid;
632 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
633 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
635 if (net != lp->lpni_net)
638 if (!lnet_isrouter(lp))
641 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
643 lnet_net_unlock(LNET_LOCK_EX);
644 lnet_del_route(LNET_NET_ANY, &gw_nid);
645 lnet_net_lock(LNET_LOCK_EX);
651 lnet_peer_tables_cleanup(struct lnet_net *net)
654 struct lnet_peer_table *ptable;
656 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
657 /* If just deleting the peers for a NI, get rid of any routes these
658 * peers are gateways for. */
659 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
660 lnet_net_lock(LNET_LOCK_EX);
661 lnet_peer_table_del_rtrs_locked(net, ptable);
662 lnet_net_unlock(LNET_LOCK_EX);
665 /* Start the cleanup process */
666 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
667 lnet_net_lock(LNET_LOCK_EX);
668 lnet_peer_table_cleanup_locked(net, ptable);
669 lnet_net_unlock(LNET_LOCK_EX);
672 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
673 lnet_peer_ni_finalize_wait(ptable);
676 static struct lnet_peer_ni *
677 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
679 struct list_head *peers;
680 struct lnet_peer_ni *lp;
682 if (the_lnet.ln_state != LNET_STATE_RUNNING)
685 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
686 list_for_each_entry(lp, peers, lpni_hashlist) {
687 if (nid_same(&lp->lpni_nid, nid)) {
688 lnet_peer_ni_addref_locked(lp);
696 struct lnet_peer_ni *
697 lnet_peer_ni_find_locked(struct lnet_nid *nid)
699 struct lnet_peer_ni *lpni;
700 struct lnet_peer_table *ptable;
703 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
705 ptable = the_lnet.ln_peer_tables[cpt];
706 lpni = lnet_get_peer_ni_locked(ptable, nid);
711 struct lnet_peer_ni *
712 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
714 struct lnet_peer_net *lpn;
715 struct lnet_peer_ni *lpni;
717 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
721 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
722 if (nid_same(&lpni->lpni_nid, nid))
730 lnet_find_peer(struct lnet_nid *nid)
732 struct lnet_peer_ni *lpni;
733 struct lnet_peer *lp = NULL;
736 cpt = lnet_net_lock_current();
737 lpni = lnet_peer_ni_find_locked(nid);
739 lp = lpni->lpni_peer_net->lpn_peer;
740 lnet_peer_addref_locked(lp);
741 lnet_peer_ni_decref_locked(lpni);
743 lnet_net_unlock(cpt);
748 struct lnet_peer_net *
749 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
751 struct lnet_peer_net *net;
754 /* no net id provided return the first net */
755 net = list_first_entry_or_null(&lp->lp_peer_nets,
756 struct lnet_peer_net,
762 /* find the net after the one provided */
763 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
764 if (net->lpn_net_id == prev_lpn_id) {
766 * if we reached the end of the list loop to the
769 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
770 return list_first_entry_or_null(&lp->lp_peer_nets,
771 struct lnet_peer_net,
774 return list_next_entry(net, lpn_peer_nets);
781 struct lnet_peer_ni *
782 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
783 struct lnet_peer_net *peer_net,
784 struct lnet_peer_ni *prev)
786 struct lnet_peer_ni *lpni;
787 struct lnet_peer_net *net = peer_net;
791 if (list_empty(&peer->lp_peer_nets))
794 net = list_first_entry(&peer->lp_peer_nets,
795 struct lnet_peer_net,
798 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
804 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
806 * if you reached the end of the peer ni list and the peer
807 * net is specified then there are no more peer nis in that
814 * we reached the end of this net ni list. move to the
817 if (prev->lpni_peer_net->lpn_peer_nets.next ==
819 /* no more nets and no more NIs. */
822 /* get the next net */
823 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
824 struct lnet_peer_net,
826 /* get the ni on it */
827 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
833 /* there are more nis left */
834 lpni = list_first_entry(&prev->lpni_peer_nis,
835 struct lnet_peer_ni, lpni_peer_nis);
840 /* Call with the ln_api_mutex held */
841 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
843 struct lnet_process_id id;
844 struct lnet_peer_table *ptable;
845 struct lnet_peer *lp;
854 if (the_lnet.ln_state != LNET_STATE_RUNNING)
857 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
860 * Count the number of peers, and return E2BIG if the buffer
861 * is too small. We'll also return the desired size.
864 for (cpt = 0; cpt < lncpt; cpt++) {
865 ptable = the_lnet.ln_peer_tables[cpt];
866 count += ptable->pt_peers;
868 size = count * sizeof(*ids);
873 * Walk the peer lists and copy out the primary nids.
874 * This is safe because the peer lists are only modified
875 * while the ln_api_mutex is held. So we don't need to
876 * hold the lnet_net_lock as well, and can therefore
877 * directly call copy_to_user().
880 memset(&id, 0, sizeof(id));
881 id.pid = LNET_PID_LUSTRE;
883 for (cpt = 0; cpt < lncpt; cpt++) {
884 ptable = the_lnet.ln_peer_tables[cpt];
885 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
886 if (!nid_is_nid4(&lp->lp_primary_nid))
890 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
891 if (copy_to_user(&ids[i], &id, sizeof(id)))
904 * Start pushes to peers that need to be updated for a configuration
905 * change on this node.
908 lnet_push_update_to_peers(int force)
910 struct lnet_peer_table *ptable;
911 struct lnet_peer *lp;
915 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
918 lnet_net_lock(LNET_LOCK_EX);
919 if (lnet_peer_discovery_disabled)
921 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
922 for (cpt = 0; cpt < lncpt; cpt++) {
923 ptable = the_lnet.ln_peer_tables[cpt];
924 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
926 spin_lock(&lp->lp_lock);
927 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
928 lp->lp_state |= LNET_PEER_FORCE_PUSH;
929 spin_unlock(&lp->lp_lock);
931 if (lnet_peer_needs_push(lp))
932 lnet_peer_queue_for_discovery(lp);
935 lnet_net_unlock(LNET_LOCK_EX);
936 wake_up(&the_lnet.ln_dc_waitq);
939 /* find the NID in the preferred gateways for the remote peer
941 * false: list is not empty and NID is not preferred
942 * false: list is empty
943 * true: nid is found in the list
946 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
947 struct lnet_nid *gw_nid)
949 struct lnet_nid_list *ne;
951 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
952 libcfs_nidstr(&lpni->lpni_nid),
953 list_empty(&lpni->lpni_rtr_pref_nids));
955 if (list_empty(&lpni->lpni_rtr_pref_nids))
958 /* iterate through all the preferred NIDs and see if any of them
959 * matches the provided gw_nid
961 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
962 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
963 libcfs_nidstr(&ne->nl_nid),
964 libcfs_nidstr(gw_nid));
965 if (nid_same(&ne->nl_nid, gw_nid))
973 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
975 struct list_head zombies;
976 struct lnet_nid_list *ne;
977 struct lnet_nid_list *tmp;
978 int cpt = lpni->lpni_cpt;
980 INIT_LIST_HEAD(&zombies);
983 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
984 lnet_net_unlock(cpt);
986 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
987 list_del(&ne->nl_list);
988 LIBCFS_FREE(ne, sizeof(*ne));
993 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
994 struct lnet_nid *gw_nid)
996 int cpt = lpni->lpni_cpt;
997 struct lnet_nid_list *ne = NULL;
999 /* This function is called with api_mutex held. When the api_mutex
1000 * is held the list can not be modified, as it is only modified as
1001 * a result of applying a UDSP and that happens under api_mutex
1004 __must_hold(&the_lnet.ln_api_mutex);
1006 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1007 if (nid_same(&ne->nl_nid, gw_nid))
1011 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1015 ne->nl_nid = *gw_nid;
1017 /* Lock the cpt to protect against addition and checks in the
1018 * selection algorithm
1021 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1022 lnet_net_unlock(cpt);
1028 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1029 * this is a preferred point-to-point path. Call with lnet_net_lock in
1033 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1035 struct lnet_nid_list *ne;
1037 if (lpni->lpni_pref_nnids == 0)
1039 if (lpni->lpni_pref_nnids == 1)
1040 return nid_same(&lpni->lpni_pref.nid, nid);
1041 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1042 if (nid_same(&ne->nl_nid, nid))
1049 * Set a single ni as preferred, provided no preferred ni is already
1050 * defined. Only to be used for non-multi-rail peer_ni.
1053 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1054 struct lnet_nid *nid)
1060 spin_lock(&lpni->lpni_lock);
1061 if (LNET_NID_IS_ANY(nid)) {
1063 } else if (lpni->lpni_pref_nnids > 0) {
1065 } else if (lpni->lpni_pref_nnids == 0) {
1066 lpni->lpni_pref.nid = *nid;
1067 lpni->lpni_pref_nnids = 1;
1068 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1070 spin_unlock(&lpni->lpni_lock);
1072 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1073 libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1078 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1079 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1082 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1086 spin_lock(&lpni->lpni_lock);
1087 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1088 lpni->lpni_pref_nnids = 0;
1089 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1090 } else if (lpni->lpni_pref_nnids == 0) {
1095 spin_unlock(&lpni->lpni_lock);
1097 CDEBUG(D_NET, "peer %s: %d\n",
1098 libcfs_nidstr(&lpni->lpni_nid), rc);
1103 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1105 lpni->lpni_sel_priority = priority;
1109 * Clear the preferred NIDs from a non-multi-rail peer.
1112 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1114 struct lnet_peer_ni *lpni = NULL;
1116 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1117 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1121 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1123 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1124 struct lnet_nid_list *ne1 = NULL;
1125 struct lnet_nid_list *ne2 = NULL;
1126 struct lnet_nid *tmp_nid = NULL;
1129 if (LNET_NID_IS_ANY(nid)) {
1134 if (lpni->lpni_pref_nnids == 1 &&
1135 nid_same(&lpni->lpni_pref.nid, nid)) {
1140 /* A non-MR node may have only one preferred NI per peer_ni */
1141 if (lpni->lpni_pref_nnids > 0 &&
1142 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1147 /* add the new preferred nid to the list of preferred nids */
1148 if (lpni->lpni_pref_nnids != 0) {
1149 size_t alloc_size = sizeof(*ne1);
1151 if (lpni->lpni_pref_nnids == 1) {
1152 tmp_nid = &lpni->lpni_pref.nid;
1153 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1156 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1157 if (nid_same(&ne1->nl_nid, nid)) {
1163 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1170 /* move the originally stored nid to the list */
1171 if (lpni->lpni_pref_nnids == 1) {
1172 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1173 lpni->lpni_cpt, alloc_size);
1178 INIT_LIST_HEAD(&ne2->nl_list);
1179 ne2->nl_nid = *tmp_nid;
1184 lnet_net_lock(LNET_LOCK_EX);
1185 spin_lock(&lpni->lpni_lock);
1186 if (lpni->lpni_pref_nnids == 0) {
1187 lpni->lpni_pref.nid = *nid;
1190 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1191 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1193 lpni->lpni_pref_nnids++;
1194 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1195 spin_unlock(&lpni->lpni_lock);
1196 lnet_net_unlock(LNET_LOCK_EX);
1199 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1200 spin_lock(&lpni->lpni_lock);
1201 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1202 spin_unlock(&lpni->lpni_lock);
1204 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1205 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1210 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1212 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1213 struct lnet_nid_list *ne = NULL;
1216 if (lpni->lpni_pref_nnids == 0) {
1221 if (lpni->lpni_pref_nnids == 1) {
1222 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1227 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1228 if (nid_same(&ne->nl_nid, nid))
1229 goto remove_nid_entry;
1237 lnet_net_lock(LNET_LOCK_EX);
1238 spin_lock(&lpni->lpni_lock);
1239 if (lpni->lpni_pref_nnids == 1)
1240 lpni->lpni_pref.nid = LNET_ANY_NID;
1242 list_del_init(&ne->nl_list);
1243 if (lpni->lpni_pref_nnids == 2) {
1244 struct lnet_nid_list *ne, *tmp;
1246 list_for_each_entry_safe(ne, tmp,
1247 &lpni->lpni_pref.nids,
1249 lpni->lpni_pref.nid = ne->nl_nid;
1250 list_del_init(&ne->nl_list);
1251 LIBCFS_FREE(ne, sizeof(*ne));
1255 lpni->lpni_pref_nnids--;
1256 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1257 spin_unlock(&lpni->lpni_lock);
1258 lnet_net_unlock(LNET_LOCK_EX);
1261 LIBCFS_FREE(ne, sizeof(*ne));
1263 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1264 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1269 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1271 struct list_head zombies;
1272 struct lnet_nid_list *ne;
1273 struct lnet_nid_list *tmp;
1275 INIT_LIST_HEAD(&zombies);
1277 lnet_net_lock(LNET_LOCK_EX);
1278 if (lpni->lpni_pref_nnids == 1)
1279 lpni->lpni_pref.nid = LNET_ANY_NID;
1280 else if (lpni->lpni_pref_nnids > 1)
1281 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1282 lpni->lpni_pref_nnids = 0;
1283 lnet_net_unlock(LNET_LOCK_EX);
1285 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1286 list_del_init(&ne->nl_list);
1287 LIBCFS_FREE(ne, sizeof(*ne));
1292 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1294 struct lnet_peer_ni *lpni;
1297 lpni = lnet_peer_ni_find_locked(nid);
1299 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1300 lnet_peer_ni_decref_locked(lpni);
1305 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1306 __must_hold(&lp->lp_lock)
1308 if (lnet_peer_discovery_disabled)
1311 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1312 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1323 lnet_is_discovery_disabled(struct lnet_peer *lp)
1327 spin_lock(&lp->lp_lock);
1328 rc = lnet_is_discovery_disabled_locked(lp);
1329 spin_unlock(&lp->lp_lock);
1335 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1337 struct lnet_nid pnid = LNET_ANY_NID;
1340 int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1342 if (!nids || num_nids < 1)
1345 rc = LNetNIInit(LNET_PID_ANY);
1349 mutex_lock(&the_lnet.ln_api_mutex);
1351 mr = lnet_peer_discovery_disabled == 0;
1354 for (i = 0; i < num_nids; i++) {
1355 if (nid_is_lo0(&nids[i]))
1358 if (LNET_NID_IS_ANY(&pnid)) {
1360 rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1361 if (rc == -EALREADY) {
1362 struct lnet_peer *lp;
1364 CDEBUG(D_NET, "A peer exists for NID %s\n",
1365 libcfs_nidstr(&pnid));
1367 /* Adds a refcount */
1368 lp = lnet_find_peer(&pnid);
1370 pnid = lp->lp_primary_nid;
1371 /* Drop refcount from lookup */
1372 lnet_peer_decref_locked(lp);
1374 } else if (lnet_peer_discovery_disabled) {
1375 rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1378 rc = lnet_add_peer_ni(&pnid, &nids[i], mr,
1382 if (rc && rc != -EEXIST)
1387 mutex_unlock(&the_lnet.ln_api_mutex);
1391 return rc == -EEXIST ? 0 : rc;
1393 EXPORT_SYMBOL(LNetAddPeer);
1395 void LNetPrimaryNID(struct lnet_nid *nid)
1397 struct lnet_peer *lp;
1398 struct lnet_peer_ni *lpni;
1399 struct lnet_nid orig;
1403 if (!nid || nid_is_lo0(nid))
1407 cpt = lnet_net_lock_current();
1408 lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1413 lp = lpni->lpni_peer_net->lpn_peer;
1415 /* If discovery is disabled locally then we needn't bother running
1416 * discovery here because discovery will not modify whatever
1417 * primary NID is currently set for this peer. If the specified peer is
1418 * down then this discovery can introduce long delays into the mount
1419 * process, so skip it if it isn't necessary.
1422 spin_lock(&lp->lp_lock);
1423 if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY) && lock_prim_nid) {
1424 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1425 lp->lp_prim_lock_ts = ktime_get_ns();
1428 /* DD disabled, nothing to do */
1429 if (lnet_peer_discovery_disabled) {
1430 *nid = lp->lp_primary_nid;
1431 spin_unlock(&lp->lp_lock);
1435 /* Peer already up to date, nothing to do */
1436 if (lnet_peer_is_uptodate_locked(lp)) {
1437 *nid = lp->lp_primary_nid;
1438 spin_unlock(&lp->lp_lock);
1441 spin_unlock(&lp->lp_lock);
1443 /* If primary nid locking is enabled, discovery is performed
1444 * in the background.
1445 * If primary nid locking is disabled, discovery blocks here.
1446 * Messages to the peer will not go through until the discovery is
1450 rc = lnet_discover_peer_locked(lpni, cpt, false);
1452 rc = lnet_discover_peer_locked(lpni, cpt, true);
1456 /* The lpni (or lp) for this NID may have changed and our ref is
1457 * the only thing keeping the old one around. Release the ref
1458 * and lookup the lpni again
1460 lnet_peer_ni_decref_locked(lpni);
1461 lpni = lnet_peer_ni_find_locked(nid);
1466 lp = lpni->lpni_peer_net->lpn_peer;
1468 if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1470 *nid = lp->lp_primary_nid;
1472 lnet_peer_ni_decref_locked(lpni);
1474 lnet_net_unlock(cpt);
1476 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1477 libcfs_nidstr(nid), rc);
1479 EXPORT_SYMBOL(LNetPrimaryNID);
1482 LNetPeerDiscovered(struct lnet_nid *nid)
1484 int cpt, disc = false;
1485 struct lnet_peer *lp;
1487 lp = lnet_find_peer(nid);
1491 cpt = lnet_net_lock_current();
1492 spin_lock(&lp->lp_lock);
1493 if (((lp->lp_state & LNET_PEER_DISCOVERED) &&
1494 (lp->lp_state & LNET_PEER_NIDS_UPTODATE)) ||
1495 (lp->lp_state & LNET_PEER_NO_DISCOVERY))
1497 spin_unlock(&lp->lp_lock);
1499 /* Drop refcount from lookup */
1500 lnet_peer_decref_locked(lp);
1501 lnet_net_unlock(cpt);
1503 CDEBUG(D_NET, "Peer NID %s discovered: %d\n", libcfs_nidstr(nid),
1507 EXPORT_SYMBOL(LNetPeerDiscovered);
1509 struct lnet_peer_net *
1510 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1512 struct lnet_peer_net *peer_net;
1513 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1514 if (peer_net->lpn_net_id == net_id)
1521 * Attach a peer_ni to a peer_net and peer. This function assumes
1522 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1523 * may be attached to a different peer, in which case it will be
1524 * properly detached first. The whole operation is done atomically.
1526 * This function consumes the reference on lpni and Always returns 0.
1527 * This is the last function called from functions that do return an
1528 * int, so returning 0 here allows the compiler to do a tail call.
1531 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1532 struct lnet_peer_net *lpn,
1533 struct lnet_peer_ni *lpni,
1536 struct lnet_peer_table *ptable;
1537 bool new_lpn = false;
1540 /* Install the new peer_ni */
1541 lnet_net_lock(LNET_LOCK_EX);
1542 /* Add peer_ni to global peer table hash, if necessary. */
1543 if (list_empty(&lpni->lpni_hashlist)) {
1544 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1546 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1547 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1548 ptable->pt_version++;
1549 lnet_peer_ni_addref_locked(lpni);
1552 /* Detach the peer_ni from an existing peer, if necessary. */
1553 if (lpni->lpni_peer_net) {
1554 LASSERT(lpni->lpni_peer_net != lpn);
1555 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1556 lnet_peer_detach_peer_ni_locked(lpni);
1557 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1558 lpni->lpni_peer_net = NULL;
1561 /* Add peer_ni to peer_net */
1562 lpni->lpni_peer_net = lpn;
1563 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1564 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1566 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1567 lnet_update_peer_net_healthv(lpni);
1568 lnet_peer_net_addref_locked(lpn);
1570 /* Add peer_net to peer */
1571 if (!lpn->lpn_peer) {
1574 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1575 list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1577 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1578 lnet_peer_addref_locked(lp);
1581 /* Add peer to global peer list, if necessary */
1582 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1583 if (list_empty(&lp->lp_peer_list)) {
1584 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1588 /* Update peer state */
1589 spin_lock(&lp->lp_lock);
1590 if (flags & LNET_PEER_CONFIGURED) {
1591 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1592 lp->lp_state |= LNET_PEER_CONFIGURED;
1594 if (flags & LNET_PEER_MULTI_RAIL) {
1595 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1596 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1597 lnet_peer_clr_non_mr_pref_nids(lp);
1600 if (flags & LNET_PEER_LOCK_PRIMARY) {
1601 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1602 lp->lp_prim_lock_ts = ktime_get_ns();
1604 spin_unlock(&lp->lp_lock);
1610 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1612 CERROR("Failed to apply UDSPs on lpn %s\n",
1613 libcfs_net2str(lpn->lpn_net_id));
1615 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1617 CERROR("Failed to apply UDSPs on lpni %s\n",
1618 libcfs_nidstr(&lpni->lpni_nid));
1620 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1621 libcfs_nidstr(&lp->lp_primary_nid),
1622 libcfs_nidstr(&lpni->lpni_nid), flags);
1623 lnet_peer_ni_decref_locked(lpni);
1624 lnet_net_unlock(LNET_LOCK_EX);
1630 * Create a new peer, with nid as its primary nid.
1632 * Call with the lnet_api_mutex held.
1635 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1637 struct lnet_peer *lp;
1638 struct lnet_peer_net *lpn;
1639 struct lnet_peer_ni *lpni;
1645 * No need for the lnet_net_lock here, because the
1646 * lnet_api_mutex is held.
1648 lpni = lnet_peer_ni_find_locked(nid);
1650 /* A peer with this NID already exists. */
1651 lp = lpni->lpni_peer_net->lpn_peer;
1652 lnet_peer_ni_decref_locked(lpni);
1654 * This is an error if the peer was configured and the
1655 * primary NID differs or an attempt is made to change
1656 * the Multi-Rail flag. Otherwise the assumption is
1657 * that an existing peer is being modified.
1659 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1660 if (!nid_same(&lp->lp_primary_nid, nid))
1662 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1665 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1666 if (nid_same(&lp->lp_primary_nid, nid))
1668 /* we're trying to recreate an existing peer which
1669 * has already been created and its primary
1670 * locked. This is likely due to two servers
1671 * existing on the same node. So we'll just refer
1672 * to that node with the primary NID which was
1673 * first added by Lustre
1678 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1679 /* if not recreating peer as configured and
1680 * not locking primary nid, no need to
1681 * do anything if primary nid is not being changed
1683 if (nid_same(&lp->lp_primary_nid, nid)) {
1688 /* Delete and recreate the peer.
1690 * 1. If the peer is being recreated as a configured NID
1691 * 2. if there already exists a peer which
1692 * was discovered manually, but is recreated via Lustre
1695 rc = lnet_peer_del(lp);
1700 /* Create peer, peer_net, and peer_ni. */
1702 lp = lnet_peer_alloc(nid);
1705 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1708 lpni = lnet_peer_ni_alloc(nid);
1712 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1715 LIBCFS_FREE(lpn, sizeof(*lpn));
1717 LIBCFS_FREE(lp, sizeof(*lp));
1719 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1720 libcfs_nidstr(nid), flags, rc);
1725 * Add a NID to a peer. Call with ln_api_mutex held.
1728 * -EPERM: Non-DLC addition to a DLC-configured peer.
1729 * -EEXIST: The NID was configured by DLC for a different peer.
1730 * -ENOMEM: Out of memory.
1731 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1732 * non-multi-rail peer.
1735 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1738 struct lnet_peer_net *lpn;
1739 struct lnet_peer_ni *lpni;
1745 /* A configured peer can only be updated through configuration. */
1746 if (!(flags & LNET_PEER_CONFIGURED)) {
1747 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1754 * The MULTI_RAIL flag can be set but not cleared, because
1755 * that would leave the peer struct in an invalid state.
1757 if (flags & LNET_PEER_MULTI_RAIL) {
1758 spin_lock(&lp->lp_lock);
1759 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1760 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1761 lnet_peer_clr_non_mr_pref_nids(lp);
1763 spin_unlock(&lp->lp_lock);
1764 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1769 lpni = lnet_peer_ni_find_locked(nid);
1772 * A peer_ni already exists. This is only a problem if
1773 * it is not connected to this peer and was configured
1776 if (lpni->lpni_peer_net->lpn_peer == lp)
1778 if (lnet_peer_ni_is_configured(lpni)) {
1782 /* If this is the primary NID, destroy the peer. */
1783 if (lnet_peer_ni_is_primary(lpni)) {
1784 struct lnet_peer *lp2 =
1785 lpni->lpni_peer_net->lpn_peer;
1786 int rtr_refcount = lp2->lp_rtr_refcount;
1787 unsigned int peer2_state;
1788 __u64 peer2_prim_lock_ts;
1790 /* If there's another peer that this NID belongs to
1791 * and the primary NID for that peer is locked,
1792 * then, unless it is the only NID, we don't want
1794 * But the configuration is wrong at this point,
1795 * so we should flag both of these peers as in a bad
1798 spin_lock(&lp2->lp_lock);
1799 if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1801 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1802 spin_unlock(&lp2->lp_lock);
1803 spin_lock(&lp->lp_lock);
1804 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1805 spin_unlock(&lp->lp_lock);
1806 CERROR("Peer %s NID %s is already locked with peer %s\n",
1807 libcfs_nidstr(&lp->lp_primary_nid),
1809 libcfs_nidstr(&lp2->lp_primary_nid));
1812 peer2_state = lp2->lp_state;
1813 peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1814 spin_unlock(&lp2->lp_lock);
1816 /* NID which got locked the earliest should be
1817 * kept as primary. In case if the peers were
1818 * created by Lustre, this allows the
1819 * first listed NID to stay primary as intended
1820 * for the purpose of communicating with Lustre
1821 * even if peer discovery succeeded using
1822 * a different NID of MR peer.
1824 spin_lock(&lp->lp_lock);
1825 if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1826 ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1827 peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1828 !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1829 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1830 lp->lp_primary_nid = *nid;
1831 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1833 spin_unlock(&lp->lp_lock);
1835 * if we're trying to delete a router it means
1836 * we're moving this peer NI to a new peer so must
1837 * transfer router properties to the new peer
1839 if (rtr_refcount > 0) {
1840 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1841 lnet_rtr_transfer_to_peer(lp2, lp);
1844 lnet_peer_ni_decref_locked(lpni);
1845 lpni = lnet_peer_ni_alloc(nid);
1852 lpni = lnet_peer_ni_alloc(nid);
1860 * Get the peer_net. Check that we're not adding a second
1861 * peer_ni on a peer_net of a non-multi-rail peer.
1863 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1865 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1870 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1875 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1878 lnet_peer_ni_decref_locked(lpni);
1880 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1881 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1887 * Update the primary NID of a peer, if possible.
1889 * Call with the lnet_api_mutex held.
1892 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1895 struct lnet_nid old = lp->lp_primary_nid;
1898 if (nid_same(&lp->lp_primary_nid, nid))
1901 if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1902 lp->lp_primary_nid = *nid;
1904 rc = lnet_peer_add_nid(lp, nid, flags);
1906 lp->lp_primary_nid = old;
1910 /* if this is a configured peer or the primary for that peer has
1911 * been locked, then we don't want to flag this scenario as
1914 if (lp->lp_state & LNET_PEER_CONFIGURED ||
1915 lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1918 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1919 libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1925 * lpni creation initiated due to traffic either sending or receiving.
1926 * Callers must hold ln_api_mutex
1927 * Ref taken on lnet_peer_ni returned by this function
1929 static struct lnet_peer_ni *
1930 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1931 __must_hold(&the_lnet.ln_api_mutex)
1933 struct lnet_peer *lp = NULL;
1934 struct lnet_peer_net *lpn = NULL;
1935 struct lnet_peer_ni *lpni;
1939 if (LNET_NID_IS_ANY(nid)) {
1944 /* lnet_net_lock is not needed here because ln_api_lock is held */
1945 lpni = lnet_peer_ni_find_locked(nid);
1948 * We must have raced with another thread. Since we
1949 * know next to nothing about a peer_ni created by
1950 * traffic, we just assume everything is ok and
1956 /* Create peer, peer_net, and peer_ni. */
1958 lp = lnet_peer_alloc(nid);
1961 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1964 lpni = lnet_peer_ni_alloc(nid);
1967 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1969 /* lnet_peer_attach_peer_ni() always returns 0 */
1970 rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1972 lnet_peer_ni_addref_locked(lpni);
1977 LIBCFS_FREE(lpn, sizeof(*lpn));
1979 LIBCFS_FREE(lp, sizeof(*lp));
1983 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1988 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1990 * This API handles the following combinations:
1991 * Create a peer with its primary NI if only the prim_nid is provided
1992 * Add a NID to a peer identified by the prim_nid. The peer identified
1993 * by the prim_nid must already exist.
1994 * The peer being created may be non-MR.
1996 * The caller must hold ln_api_mutex. This prevents the peer from
1997 * being created/modified/deleted by a different thread.
2000 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
2002 __must_hold(&the_lnet.ln_api_mutex)
2004 struct lnet_peer *lp = NULL;
2005 struct lnet_peer_ni *lpni;
2007 /* The prim_nid must always be specified */
2008 if (LNET_NID_IS_ANY(prim_nid))
2012 flags |= LNET_PEER_MULTI_RAIL;
2015 * If nid isn't specified, we must create a new peer with
2016 * prim_nid as its primary nid.
2018 if (LNET_NID_IS_ANY(nid))
2019 return lnet_peer_add(prim_nid, flags);
2021 /* Look up the prim_nid, which must exist. */
2022 lpni = lnet_peer_ni_find_locked(prim_nid);
2025 lp = lpni->lpni_peer_net->lpn_peer;
2026 lnet_peer_ni_decref_locked(lpni);
2028 /* Peer must have been configured. */
2029 if ((flags & LNET_PEER_CONFIGURED) &&
2030 !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2031 CDEBUG(D_NET, "peer %s was not configured\n",
2032 libcfs_nidstr(prim_nid));
2036 /* Primary NID must match */
2037 if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2038 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2039 libcfs_nidstr(prim_nid),
2040 libcfs_nidstr(&lp->lp_primary_nid));
2044 /* Multi-Rail flag must match. */
2045 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2046 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2047 libcfs_nidstr(prim_nid));
2051 if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2053 "Don't add temporary peer NI for uptodate peer %s\n",
2054 libcfs_nidstr(&lp->lp_primary_nid));
2058 return lnet_peer_add_nid(lp, nid, flags);
2061 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2062 bool mr, bool lock_prim)
2064 int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2066 return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2070 lnet_reset_peer(struct lnet_peer *lp)
2072 struct lnet_peer_net *lpn, *lpntmp;
2073 struct lnet_peer_ni *lpni, *lpnitmp;
2077 lnet_peer_cancel_discovery(lp);
2079 flags = LNET_PEER_CONFIGURED;
2080 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2081 flags |= LNET_PEER_MULTI_RAIL;
2083 list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2084 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2086 if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2089 rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2091 CERROR("Failed to delete %s from peer %s\n",
2092 libcfs_nidstr(&lpni->lpni_nid),
2093 libcfs_nidstr(&lp->lp_primary_nid));
2098 /* mark it for discovery the next time we use it */
2099 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2104 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2106 * This API handles the following combinations:
2107 * Delete a NI from a peer if both prim_nid and nid are provided.
2108 * Delete a peer if only prim_nid is provided.
2109 * Delete a peer if its primary nid is provided.
2111 * The caller must hold ln_api_mutex. This prevents the peer from
2112 * being modified/deleted by a different thread.
2115 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2118 struct lnet_peer *lp;
2119 struct lnet_peer_ni *lpni;
2122 if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2125 lpni = lnet_peer_ni_find_locked(prim_nid);
2128 lp = lpni->lpni_peer_net->lpn_peer;
2129 lnet_peer_ni_decref_locked(lpni);
2131 if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2132 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2133 libcfs_nidstr(prim_nid),
2134 libcfs_nidstr(&lp->lp_primary_nid));
2138 lnet_net_lock(LNET_LOCK_EX);
2139 if (lp->lp_rtr_refcount > 0) {
2140 lnet_net_unlock(LNET_LOCK_EX);
2141 CERROR("%s is a router. Can not be deleted\n",
2142 libcfs_nidstr(prim_nid));
2145 lnet_net_unlock(LNET_LOCK_EX);
2147 if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2148 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2149 CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2150 libcfs_nidstr(&lp->lp_primary_nid));
2151 return lnet_reset_peer(lp);
2153 return lnet_peer_del(lp);
2157 flags = LNET_PEER_CONFIGURED;
2158 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2159 flags |= LNET_PEER_MULTI_RAIL;
2161 return lnet_peer_del_nid(lp, nid, flags);
2165 lnet_destroy_peer_ni_locked(struct kref *ref)
2167 struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2169 struct lnet_peer_table *ptable;
2170 struct lnet_peer_net *lpn;
2172 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2174 LASSERT(kref_read(&lpni->lpni_kref) == 0);
2175 LASSERT(list_empty(&lpni->lpni_txq));
2176 LASSERT(lpni->lpni_txqnob == 0);
2177 LASSERT(list_empty(&lpni->lpni_peer_nis));
2178 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2180 lpn = lpni->lpni_peer_net;
2181 lpni->lpni_peer_net = NULL;
2182 lpni->lpni_net = NULL;
2184 if (!list_empty(&lpni->lpni_hashlist)) {
2185 /* remove the peer ni from the zombie list */
2186 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2187 spin_lock(&ptable->pt_zombie_lock);
2188 list_del_init(&lpni->lpni_hashlist);
2189 ptable->pt_zombies--;
2190 spin_unlock(&ptable->pt_zombie_lock);
2193 if (lpni->lpni_pref_nnids > 1) {
2194 struct lnet_nid_list *ne, *tmp;
2196 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2198 list_del_init(&ne->nl_list);
2199 LIBCFS_FREE(ne, sizeof(*ne));
2202 LIBCFS_FREE(lpni, sizeof(*lpni));
2205 lnet_peer_net_decref_locked(lpn);
2208 struct lnet_peer_ni *
2209 lnet_nid2peerni_ex(struct lnet_nid *nid)
2210 __must_hold(&the_lnet.ln_api_mutex)
2212 struct lnet_peer_ni *lpni = NULL;
2214 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2215 return ERR_PTR(-ESHUTDOWN);
2218 * find if a peer_ni already exists.
2219 * If so then just return that.
2221 lpni = lnet_peer_ni_find_locked(nid);
2225 lnet_net_unlock(LNET_LOCK_EX);
2227 lpni = lnet_peer_ni_traffic_add(nid, NULL);
2229 lnet_net_lock(LNET_LOCK_EX);
2235 * Get a peer_ni for the given nid, create it if necessary. Takes a
2236 * hold on the peer_ni.
2238 struct lnet_peer_ni *
2239 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2240 struct lnet_nid *pref, int cpt)
2242 struct lnet_peer_ni *lpni = NULL;
2244 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2245 return ERR_PTR(-ESHUTDOWN);
2248 * find if a peer_ni already exists.
2249 * If so then just return that.
2251 lpni = lnet_peer_ni_find_locked(nid);
2257 * use the lnet_api_mutex to serialize the creation of the peer_ni
2258 * and the creation/deletion of the local ni/net. When a local ni is
2259 * created, if there exists a set of peer_nis on that network,
2260 * they need to be traversed and updated. When a local NI is
2261 * deleted, which could result in a network being deleted, then
2262 * all peer nis on that network need to be removed as well.
2264 * Creation through traffic should also be serialized with
2265 * creation through DLC.
2267 lnet_net_unlock(cpt);
2268 mutex_lock(&the_lnet.ln_api_mutex);
2270 * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2271 * check here is sufficent.
2273 if (the_lnet.ln_state == LNET_STATE_RUNNING)
2274 lpni = lnet_peer_ni_traffic_add(nid, pref);
2276 mutex_unlock(&the_lnet.ln_api_mutex);
2279 /* Lock has been dropped, check again for shutdown. */
2280 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2281 if (!IS_ERR_OR_NULL(lpni))
2282 lnet_peer_ni_decref_locked(lpni);
2283 lpni = ERR_PTR(-ESHUTDOWN);
2290 lnet_peer_gw_discovery(struct lnet_peer *lp)
2294 spin_lock(&lp->lp_lock);
2295 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2297 spin_unlock(&lp->lp_lock);
2303 lnet_peer_is_uptodate(struct lnet_peer *lp)
2307 spin_lock(&lp->lp_lock);
2308 rc = lnet_peer_is_uptodate_locked(lp);
2309 spin_unlock(&lp->lp_lock);
2314 * Is a peer uptodate from the point of view of discovery?
2316 * If it is currently being processed, obviously not.
2317 * A forced Ping or Push is also handled by the discovery thread.
2319 * Otherwise look at whether the peer needs rediscovering.
2322 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2323 __must_hold(&lp->lp_lock)
2327 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2328 LNET_PEER_FORCE_PING |
2329 LNET_PEER_FORCE_PUSH)) {
2331 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2333 } else if (lnet_peer_needs_push(lp)) {
2335 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2336 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2347 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2349 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2351 /* The discovery thread holds net_lock/EX and lp_lock when it splices
2352 * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2353 * when adding to the list and queuing the peer to ensure that we do not
2354 * strand any messages on the lp_dc_pendq. This scheme ensures the
2355 * message will be resent even if the peer is already being discovered.
2356 * Therefore we needn't check the return value of
2357 * lnet_peer_queue_for_discovery(lp).
2359 lnet_net_lock(LNET_LOCK_EX);
2360 spin_lock(&lp->lp_lock);
2361 list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2362 spin_unlock(&lp->lp_lock);
2363 lnet_peer_queue_for_discovery(lp);
2364 lnet_net_unlock(LNET_LOCK_EX);
2368 * Queue a peer for the attention of the discovery thread. Call with
2369 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2370 * -EALREADY if the peer was already queued.
2372 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2376 spin_lock(&lp->lp_lock);
2377 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2378 lp->lp_state |= LNET_PEER_DISCOVERING;
2379 spin_unlock(&lp->lp_lock);
2380 if (list_empty(&lp->lp_dc_list)) {
2381 lnet_peer_addref_locked(lp);
2382 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2383 wake_up(&the_lnet.ln_dc_waitq);
2389 CDEBUG(D_NET, "Queue peer %s: %d\n",
2390 libcfs_nidstr(&lp->lp_primary_nid), rc);
2396 * Discovery of a peer is complete. Wake all waiters on the peer.
2397 * Call with lnet_net_lock/EX held.
2399 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2401 struct lnet_msg *msg, *tmp;
2403 LIST_HEAD(pending_msgs);
2405 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2406 libcfs_nidstr(&lp->lp_primary_nid));
2408 spin_lock(&lp->lp_lock);
2409 /* Our caller dropped lp_lock which may have allowed another thread to
2410 * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2411 * Ensure it is cleared.
2413 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2415 lp->lp_dc_error = dc_error;
2416 lp->lp_state |= LNET_PEER_REDISCOVER;
2418 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2419 spin_unlock(&lp->lp_lock);
2420 list_del_init(&lp->lp_dc_list);
2421 wake_up(&lp->lp_dc_waitq);
2423 if (lp->lp_rtr_refcount > 0)
2424 lnet_router_discovery_complete(lp);
2426 lnet_net_unlock(LNET_LOCK_EX);
2428 /* iterate through all pending messages and send them again */
2429 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2430 list_del_init(&msg->msg_list);
2432 lnet_finalize(msg, dc_error);
2436 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2437 lnet_msgtyp2str(msg->msg_type),
2438 libcfs_idstr(&msg->msg_target));
2439 rc = lnet_send(&msg->msg_src_nid_param, msg,
2440 &msg->msg_rtr_nid_param);
2442 CNETERR("Error sending %s to %s: %d\n",
2443 lnet_msgtyp2str(msg->msg_type),
2444 libcfs_idstr(&msg->msg_target), rc);
2445 lnet_finalize(msg, rc);
2448 lnet_net_lock(LNET_LOCK_EX);
2449 lnet_peer_decref_locked(lp);
2453 * Handle inbound push.
2454 * Like any event handler, called with lnet_res_lock/CPT held.
2456 void lnet_peer_push_event(struct lnet_event *ev)
2458 struct lnet_ping_buffer *pbuf;
2459 struct lnet_peer *lp;
2462 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2464 /* lnet_find_peer() adds a refcount */
2465 lp = lnet_find_peer(&ev->source.nid);
2467 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2468 libcfs_nidstr(&ev->initiator.nid),
2469 libcfs_nidstr(&ev->source.nid));
2470 pbuf->pb_needs_post = true;
2474 /* Ensure peer state remains consistent while we modify it. */
2475 spin_lock(&lp->lp_lock);
2478 * If some kind of error happened the contents of the message
2479 * cannot be used. Clear the NIDS_UPTODATE and set the
2480 * FORCE_PING flag to trigger a ping.
2483 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2484 lp->lp_state |= LNET_PEER_FORCE_PING;
2485 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2487 libcfs_nidstr(&lp->lp_primary_nid),
2488 libcfs_nidstr(&ev->source.nid));
2493 * A push with invalid or corrupted info. Clear the UPTODATE
2494 * flag to trigger a ping.
2496 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2497 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2498 lp->lp_state |= LNET_PEER_FORCE_PING;
2499 CDEBUG(D_NET, "Corrupted Push from %s\n",
2500 libcfs_nidstr(&lp->lp_primary_nid));
2504 /* Make sure we'll allocate the correct size ping buffer when
2507 infobytes = lnet_ping_info_size(&pbuf->pb_info);
2508 if (lp->lp_data_bytes < infobytes)
2509 lp->lp_data_bytes = infobytes;
2512 * A non-Multi-Rail peer is not supposed to be capable of
2515 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2516 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2517 libcfs_nidstr(&lp->lp_primary_nid));
2522 * The peer may have discovery disabled at its end. Set
2523 * NO_DISCOVERY as appropriate.
2525 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2526 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2527 libcfs_nidstr(&lp->lp_primary_nid));
2529 * Mark the peer for deletion if we already know about it
2530 * and it's going from discovery set to no discovery set
2532 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2533 LNET_PEER_DISCOVERING)) &&
2534 lp->lp_state & LNET_PEER_DISCOVERED) {
2535 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2536 libcfs_nidstr(&lp->lp_primary_nid),
2538 lp->lp_state |= LNET_PEER_MARK_DELETION;
2540 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2541 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2542 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2543 libcfs_nidstr(&lp->lp_primary_nid));
2544 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2548 * Update the MULTI_RAIL flag based on the push. If the peer
2549 * was configured with DLC then the setting should match what
2551 * NB: We verified above that the MR feature bit is set in pi_features
2553 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2554 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2555 libcfs_nidstr(&lp->lp_primary_nid), lp);
2556 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2557 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2558 libcfs_nidstr(&lp->lp_primary_nid));
2559 } else if (lnet_peer_discovery_disabled) {
2560 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2561 libcfs_nidstr(&lp->lp_primary_nid), lp);
2562 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2563 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2564 libcfs_nidstr(&lp->lp_primary_nid), lp);
2566 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2567 libcfs_nidstr(&lp->lp_primary_nid), lp);
2568 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2569 lnet_peer_clr_non_mr_pref_nids(lp);
2572 /* Check for truncation of the Put message. Clear the
2573 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2574 * and tell discovery to allocate a bigger buffer.
2576 if (ev->mlength < ev->rlength) {
2577 if (the_lnet.ln_push_target_nbytes < infobytes)
2578 the_lnet.ln_push_target_nbytes = infobytes;
2579 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2580 lp->lp_state |= LNET_PEER_FORCE_PING;
2581 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2582 libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2586 /* always assume new data */
2587 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2588 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2590 /* If there is data present that hasn't been processed yet,
2591 * we'll replace it if the Put contained newer data and it
2592 * fits. We're racing with a Ping or earlier Push in this
2595 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2596 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2597 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2598 infobytes <= lp->lp_data->pb_nbytes) {
2599 unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2600 infobytes, FLEXIBLE_OBJECT);
2601 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2602 libcfs_nidstr(&lp->lp_primary_nid),
2603 LNET_PING_BUFFER_SEQNO(pbuf),
2604 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2610 * Allocate a buffer to copy the data. On a failure we drop
2611 * the Push and set FORCE_PING to force the discovery
2612 * thread to fix the problem by pinging the peer.
2614 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2616 lp->lp_state |= LNET_PEER_FORCE_PING;
2617 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2618 libcfs_nidstr(&lp->lp_primary_nid),
2619 LNET_PING_BUFFER_SEQNO(pbuf));
2624 unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2626 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2627 CDEBUG(D_NET, "Received Push %s %u\n",
2628 libcfs_nidstr(&lp->lp_primary_nid),
2629 LNET_PING_BUFFER_SEQNO(pbuf));
2632 /* We've processed this buffer. It can be reposted */
2633 pbuf->pb_needs_post = true;
2636 * Queue the peer for discovery if not done, force it on the request
2637 * queue and wake the discovery thread if the peer was already queued,
2638 * because its status changed.
2640 spin_unlock(&lp->lp_lock);
2641 lnet_net_lock(LNET_LOCK_EX);
2642 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2643 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2644 wake_up(&the_lnet.ln_dc_waitq);
2646 /* Drop refcount from lookup */
2647 lnet_peer_decref_locked(lp);
2648 lnet_net_unlock(LNET_LOCK_EX);
2652 * Clear the discovery error state, unless we're already discovering
2653 * this peer, in which case the error is current.
2655 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2657 spin_lock(&lp->lp_lock);
2658 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2659 lp->lp_dc_error = 0;
2660 spin_unlock(&lp->lp_lock);
2664 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2665 * dropped/retaken within this function. An lnet_peer_ni is passed in
2666 * because discovery could tear down an lnet_peer.
2669 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2672 struct lnet_peer *lp = NULL;
2678 lnet_peer_decref_locked(lp);
2679 lnet_net_unlock(cpt);
2680 lnet_net_lock(LNET_LOCK_EX);
2681 lp = lpni->lpni_peer_net->lpn_peer;
2682 lnet_peer_clear_discovery_error(lp);
2685 * We're willing to be interrupted. The lpni can become a
2686 * zombie if we race with DLC, so we must check for that.
2689 /* Keep lp alive when the lnet_net_lock is unlocked */
2690 lnet_peer_addref_locked(lp);
2691 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2692 if (signal_pending(current))
2694 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2697 * Don't repeat discovery if discovery is disabled. This is
2698 * done to ensure we can use discovery as a standard ping as
2699 * well for backwards compatibility with routers which do not
2700 * have discovery or have discovery disabled
2702 if (lnet_is_discovery_disabled(lp) && count > 0)
2704 if (lp->lp_dc_error)
2706 if (lnet_peer_is_uptodate(lp))
2708 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2710 lnet_peer_queue_for_discovery(lp);
2712 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2715 * If caller requested a non-blocking operation then
2716 * return immediately. Once discovery is complete any
2717 * pending messages that were stopped due to discovery
2718 * will be transmitted.
2723 lnet_net_unlock(LNET_LOCK_EX);
2725 finish_wait(&lp->lp_dc_waitq, &wait);
2726 lnet_net_lock(LNET_LOCK_EX);
2727 lnet_peer_decref_locked(lp);
2728 /* Peer may have changed */
2729 lp = lpni->lpni_peer_net->lpn_peer;
2731 finish_wait(&lp->lp_dc_waitq, &wait);
2733 lnet_net_unlock(LNET_LOCK_EX);
2736 * The peer may have changed, so re-check and rediscover if that turns
2737 * out to have been the case. The reference count on lp ensured that
2738 * even if it was unlinked from lpni the memory could not be recycled.
2739 * Thus the check below is sufficient to determine whether the peer
2740 * changed. If the peer changed, then lp must not be dereferenced.
2742 if (lp != lpni->lpni_peer_net->lpn_peer)
2745 if (signal_pending(current))
2747 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2749 else if (lp->lp_dc_error)
2750 rc = lp->lp_dc_error;
2752 CDEBUG(D_NET, "non-blocking discovery\n");
2753 else if (!lnet_peer_is_uptodate(lp) &&
2754 !(lnet_is_discovery_disabled(lp) ||
2755 (lp->lp_state & LNET_PEER_MARK_DELETED)))
2758 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2759 (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2760 libcfs_nidstr(&lpni->lpni_nid), rc,
2761 (!block) ? "pending discovery" : "discovery complete");
2762 lnet_peer_decref_locked(lp);
2767 /* Handle an incoming ack for a push. */
2769 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2771 struct lnet_ping_buffer *pbuf;
2773 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2774 spin_lock(&lp->lp_lock);
2775 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2776 lp->lp_push_error = ev->status;
2778 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2780 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2781 spin_unlock(&lp->lp_lock);
2783 CDEBUG(D_NET, "peer %s ev->status %d\n",
2784 libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2787 static bool find_primary(struct lnet_nid *nid,
2788 struct lnet_ping_buffer *pbuf)
2790 struct lnet_ping_info *pi = &pbuf->pb_info;
2791 struct lnet_ping_iter piter;
2794 if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2795 /* First large nid is primary */
2796 for (stp = ping_iter_first(&piter, pbuf, nid);
2798 stp = ping_iter_next(&piter, nid)) {
2799 if (nid_is_nid4(nid))
2801 /* nid has already been copied in */
2804 /* no large nids ... weird ... ignore the flag
2805 * and use first nid.
2808 /* pi_nids[1] is primary */
2809 if (pi->pi_nnis < 2)
2811 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2815 /* Handle a Reply message. This is the reply to a Ping message. */
2817 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2819 struct lnet_ping_buffer *pbuf;
2820 struct lnet_nid primary;
2823 bool ping_feat_disc;
2825 spin_lock(&lp->lp_lock);
2827 lp->lp_disc_src_nid = ev->target.nid;
2828 lp->lp_disc_dst_nid = ev->source.nid;
2831 * If some kind of error happened the contents of message
2832 * cannot be used. Set PING_FAILED to trigger a retry.
2835 lp->lp_state |= LNET_PEER_PING_FAILED;
2836 lp->lp_ping_error = ev->status;
2837 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2839 libcfs_nidstr(&lp->lp_primary_nid),
2840 libcfs_nidstr(&ev->source.nid));
2844 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2845 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2846 lnet_swap_pinginfo(pbuf);
2849 * A reply with invalid or corrupted info. Set PING_FAILED to
2852 rc = lnet_ping_info_validate(&pbuf->pb_info);
2854 lp->lp_state |= LNET_PEER_PING_FAILED;
2855 lp->lp_ping_error = 0;
2856 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2857 libcfs_nidstr(&lp->lp_primary_nid), rc);
2862 * The peer may have discovery disabled at its end. Set
2863 * NO_DISCOVERY as appropriate.
2865 ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2866 if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2867 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2868 libcfs_nidstr(&lp->lp_primary_nid),
2869 ping_feat_disc ? "enabled" : "disabled",
2870 lnet_peer_discovery_disabled ? "disabled" : "enabled");
2872 /* Detect whether this peer has toggled discovery from on to
2873 * off and whether we can delete and re-create the peer. Peers
2874 * that were manually configured cannot be deleted by discovery.
2875 * We need to delete this peer and re-create it if the peer was
2876 * not configured manually, is currently considered DD capable,
2878 * 1. We've already discovered the peer (the peer has toggled
2879 * the discovery feature from on to off), or
2880 * 2. The peer is considered MR, but it was not user configured
2881 * (this was a "temporary" peer created via the kernel APIs
2882 * that we're discovering for the first time)
2884 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2885 LNET_PEER_NO_DISCOVERY)) &&
2886 (lp->lp_state & (LNET_PEER_DISCOVERED |
2887 LNET_PEER_MULTI_RAIL))) {
2888 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2889 libcfs_nidstr(&lp->lp_primary_nid),
2891 lp->lp_state |= LNET_PEER_MARK_DELETION;
2893 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2895 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2896 libcfs_nidstr(&lp->lp_primary_nid));
2897 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2901 * Update the MULTI_RAIL flag based on the reply. If the peer
2902 * was configured with DLC then the setting should match what
2905 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2906 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2907 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2908 libcfs_nidstr(&lp->lp_primary_nid), lp);
2909 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2910 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2911 libcfs_nidstr(&lp->lp_primary_nid));
2912 } else if (lnet_peer_discovery_disabled) {
2914 "peer %s(%p) not MR: DD disabled locally\n",
2915 libcfs_nidstr(&lp->lp_primary_nid), lp);
2916 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2918 "peer %s(%p) not MR: DD disabled remotely\n",
2919 libcfs_nidstr(&lp->lp_primary_nid), lp);
2921 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2922 libcfs_nidstr(&lp->lp_primary_nid), lp);
2923 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2924 lnet_peer_clr_non_mr_pref_nids(lp);
2926 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2927 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2928 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2929 libcfs_nidstr(&lp->lp_primary_nid));
2931 CERROR("Multi-Rail state vanished from %s\n",
2932 libcfs_nidstr(&lp->lp_primary_nid));
2933 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2937 infobytes = lnet_ping_info_size(&pbuf->pb_info);
2939 * Make sure we'll allocate the correct size ping buffer when
2942 if (lp->lp_data_bytes < infobytes)
2943 lp->lp_data_bytes = infobytes;
2945 /* Check for truncation of the Reply. Clear PING_SENT and set
2946 * PING_FAILED to trigger a retry.
2948 if (pbuf->pb_nbytes < infobytes) {
2949 if (the_lnet.ln_push_target_nbytes < infobytes)
2950 the_lnet.ln_push_target_nbytes = infobytes;
2951 lp->lp_state |= LNET_PEER_PING_FAILED;
2952 lp->lp_ping_error = 0;
2953 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2954 libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2959 * Check the sequence numbers in the reply. These are only
2960 * available if the reply came from a Multi-Rail peer.
2962 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2963 find_primary(&primary, pbuf) &&
2964 nid_same(&lp->lp_primary_nid, &primary)) {
2965 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2966 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2967 libcfs_nidstr(&lp->lp_primary_nid),
2968 LNET_PING_BUFFER_SEQNO(pbuf),
2971 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2974 /* We're happy with the state of the data in the buffer. */
2975 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2976 libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2978 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2979 lnet_ping_buffer_decref(lp->lp_data);
2981 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2982 lnet_ping_buffer_addref(pbuf);
2985 lp->lp_state &= ~LNET_PEER_PING_SENT;
2986 spin_unlock(&lp->lp_lock);
2990 * Send event handling. Only matters for error cases, where we clean
2991 * up state on the peer and peer_ni that would otherwise be updated in
2992 * the REPLY event handler for a successful Ping, and the ACK event
2993 * handler for a successful Push.
2996 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
3003 spin_lock(&lp->lp_lock);
3004 if (ev->msg_type == LNET_MSG_GET) {
3005 lp->lp_state &= ~LNET_PEER_PING_SENT;
3006 lp->lp_state |= LNET_PEER_PING_FAILED;
3007 lp->lp_ping_error = ev->status;
3008 } else { /* ev->msg_type == LNET_MSG_PUT */
3009 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3010 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3011 lp->lp_push_error = ev->status;
3013 spin_unlock(&lp->lp_lock);
3014 rc = LNET_REDISCOVER_PEER;
3016 CDEBUG(D_NET, "%s Send to %s: %d\n",
3017 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3018 libcfs_nidstr(&ev->target.nid), rc);
3023 * Unlink event handling. This event is only seen if a call to
3024 * LNetMDUnlink() caused the event to be unlinked. If this call was
3025 * made after the event was set up in LNetGet() or LNetPut() then we
3026 * assume the Ping or Push timed out.
3029 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3031 spin_lock(&lp->lp_lock);
3032 /* We've passed through LNetGet() */
3033 if (lp->lp_state & LNET_PEER_PING_SENT) {
3034 lp->lp_state &= ~LNET_PEER_PING_SENT;
3035 lp->lp_state |= LNET_PEER_PING_FAILED;
3036 lp->lp_ping_error = -ETIMEDOUT;
3037 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3038 libcfs_nidstr(&lp->lp_primary_nid));
3040 /* We've passed through LNetPut() */
3041 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3042 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3043 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3044 lp->lp_push_error = -ETIMEDOUT;
3045 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3046 libcfs_nidstr(&lp->lp_primary_nid));
3048 spin_unlock(&lp->lp_lock);
3052 * Event handler for the discovery EQ.
3054 * Called with lnet_res_lock(cpt) held. The cpt is the
3055 * lnet_cpt_of_cookie() of the md handle cookie.
3057 static void lnet_discovery_event_handler(struct lnet_event *event)
3059 struct lnet_peer *lp = event->md_user_ptr;
3060 struct lnet_ping_buffer *pbuf;
3063 /* discovery needs to take another look */
3064 rc = LNET_REDISCOVER_PEER;
3066 CDEBUG(D_NET, "Received event: %d\n", event->type);
3068 switch (event->type) {
3069 case LNET_EVENT_ACK:
3070 lnet_discovery_event_ack(lp, event);
3072 case LNET_EVENT_REPLY:
3073 lnet_discovery_event_reply(lp, event);
3075 case LNET_EVENT_SEND:
3076 /* Only send failure triggers a retry. */
3077 rc = lnet_discovery_event_send(lp, event);
3079 case LNET_EVENT_UNLINK:
3080 /* LNetMDUnlink() was called */
3081 lnet_discovery_event_unlink(lp, event);
3084 /* Invalid events. */
3087 lnet_net_lock(LNET_LOCK_EX);
3089 /* put peer back at end of request queue, if discovery not already
3091 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3092 lnet_peer_queue_for_discovery(lp)) {
3093 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3094 wake_up(&the_lnet.ln_dc_waitq);
3096 if (event->unlinked) {
3097 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3098 lnet_ping_buffer_decref(pbuf);
3099 lnet_peer_decref_locked(lp);
3101 lnet_net_unlock(LNET_LOCK_EX);
3104 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3105 struct lnet_ping_buffer *pbuf,
3106 struct lnet_nid *nid)
3108 pi->pinfo = &pbuf->pb_info;
3109 pi->pos = &pbuf->pb_info.pi_ni;
3110 pi->end = (void *)pi->pinfo +
3111 min_t(int, pbuf->pb_nbytes,
3112 lnet_ping_info_size(pi->pinfo));
3113 /* lnet_ping_info_validiate ensures there will be one
3114 * lnet_ni_status at the start
3117 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3119 pi->pos += sizeof(struct lnet_ni_status);
3120 return &pbuf->pb_info.pi_ni[0].ns_status;
3123 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3125 int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3127 if (pi->pos < ((void *)pi->pinfo + off)) {
3128 struct lnet_ni_status *ns = pi->pos;
3131 if (pi->pos > pi->end)
3134 lnet_nid4_to_nid(ns->ns_nid, nid);
3135 return &ns->ns_status;
3138 while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3139 struct lnet_ni_large_status *lns = pi->pos;
3141 if (pi->pos + 8 > pi->end)
3142 /* Not safe to examine next */
3144 pi->pos = lnet_ping_sts_next(lns);
3145 if (pi->pos > pi->end)
3147 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3151 return &lns->ns_status;
3156 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3158 struct lnet_ping_iter pi;
3162 for (st = ping_iter_first(&pi, pbuf, NULL); st;
3163 st = ping_iter_next(&pi, NULL))
3169 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3171 if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN) {
3173 lnet_handle_remote_failure_locked(lpni);
3175 } else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3176 !lpni->lpni_last_alive)
3177 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3181 * Build a peer from incoming data.
3183 * The NIDs in the incoming data are supposed to be structured as follows:
3186 * - other NIDs in same net
3187 * - NIDs in second net
3188 * - NIDs in third net
3190 * This due to the way the list of NIDs in the data is created.
3192 * Note that this function will mark the peer uptodate unless an
3193 * ENOMEM is encontered. All other errors are due to a conflict
3194 * between the DLC configuration and what discovery sees. We treat DLC
3195 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3196 * peer from becoming stuck in discovery.
3198 static int lnet_peer_merge_data(struct lnet_peer *lp,
3199 struct lnet_ping_buffer *pbuf)
3201 struct lnet_peer_net *lpn;
3202 struct lnet_peer_ni *lpni;
3203 struct lnet_nid *curnis = NULL;
3204 struct lnet_ni_large_status *addnis = NULL;
3205 struct lnet_nid *delnis = NULL;
3206 struct lnet_ping_iter pi;
3207 struct lnet_nid nid;
3209 struct lnet_nid primary = {};
3210 bool want_large_primary;
3221 flags = LNET_PEER_DISCOVERED;
3222 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3223 flags |= LNET_PEER_MULTI_RAIL;
3226 * Cache the routing feature for the peer; whether it is enabled
3227 * for disabled as reported by the remote peer.
3229 spin_lock(&lp->lp_lock);
3230 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3231 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3233 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3234 spin_unlock(&lp->lp_lock);
3236 nnis = ping_info_count_entries(pbuf);
3237 nnis = max_t(int, lp->lp_nnis, nnis);
3238 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3239 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3240 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3241 if (!curnis || !addnis || !delnis) {
3249 /* Construct the list of NIDs present in peer. */
3251 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3252 curnis[ncurnis++] = lpni->lpni_nid;
3254 /* Check for NIDs in pbuf not present in curnis[].
3255 * Skip the first, which is loop-back. Take second as
3256 * primary, unless a large primary is found.
3258 ping_iter_first(&pi, pbuf, NULL);
3259 stp = ping_iter_next(&pi, &nid);
3262 want_large_primary = (pbuf->pb_info.pi_features &
3263 LNET_PING_FEAT_PRIMARY_LARGE);
3264 for (; stp; stp = ping_iter_next(&pi, &nid)) {
3265 for (j = 0; j < ncurnis; j++)
3266 if (nid_same(&nid, &curnis[j]))
3269 addnis[naddnis].ns_nid = nid;
3270 addnis[naddnis].ns_status = *stp;
3273 if (want_large_primary && nid.nid_size) {
3275 want_large_primary = false;
3279 * Check for NIDs in curnis[] not present in pbuf.
3280 * The nested loop starts at 1 to skip the loopback NID.
3282 * But never add the loopback NID to delnis[]: if it is
3283 * present in curnis[] then this peer is for this node.
3285 for (i = 0; i < ncurnis; i++) {
3286 if (nid_is_lo0(&curnis[i]))
3288 ping_iter_first(&pi, pbuf, NULL);
3289 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3290 if (nid_same(&curnis[i], &nid)) {
3292 * update the information we cache for the
3293 * peer with the latest information we
3296 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3298 old_st = lpni->lpni_ns_status;
3299 lpni->lpni_ns_status = *stp;
3300 if (old_st != lpni->lpni_ns_status)
3301 handle_disc_lpni_health(lpni);
3302 lnet_peer_ni_decref_locked(lpni);
3308 delnis[ndelnis++] = curnis[i];
3312 * If we get here and the discovery is disabled then we don't want
3313 * to add or delete any NIs. We just updated the ones we have some
3314 * information on, and call it a day
3317 if (lnet_is_discovery_disabled(lp))
3320 for (i = 0; i < naddnis; i++) {
3321 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3323 CERROR("Error adding NID %s to peer %s: %d\n",
3324 libcfs_nidstr(&addnis[i].ns_nid),
3325 libcfs_nidstr(&lp->lp_primary_nid), rc);
3329 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3331 lpni->lpni_ns_status = addnis[i].ns_status;
3332 handle_disc_lpni_health(lpni);
3333 lnet_peer_ni_decref_locked(lpni);
3337 for (i = 0; i < ndelnis; i++) {
3339 * for routers it's okay to delete the primary_nid because
3340 * the upper layers don't really rely on it. So if we're
3341 * being told that the router changed its primary_nid
3342 * then it's okay to delete it.
3344 if (lp->lp_rtr_refcount > 0)
3345 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3346 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3348 CERROR("Error deleting NID %s from peer %s: %d\n",
3349 libcfs_nidstr(&delnis[i]),
3350 libcfs_nidstr(&lp->lp_primary_nid), rc);
3356 /* The peer net for the primary NID should be the first entry in the
3357 * peer's lp_peer_nets list, and the peer NI for the primary NID should
3358 * be the first entry in its peer net's lpn_peer_nis list.
3360 find_primary(&nid, pbuf);
3361 lpni = lnet_peer_ni_find_locked(&nid);
3363 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3364 libcfs_nidstr(&nid));
3368 lpn = lpni->lpni_peer_net;
3369 if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3370 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3372 if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3373 list_move(&lpni->lpni_peer_nis,
3374 &lpni->lpni_peer_net->lpn_peer_nis);
3376 lnet_peer_ni_decref_locked(lpni);
3378 * Errors other than -ENOMEM are due to peers having been
3379 * configured with DLC. Ignore these because DLC overrides
3384 /* If this peer is a gateway, invoke the routing callback to update
3385 * the associated route status
3387 if (lp->lp_rtr_refcount > 0)
3388 lnet_router_discovery_ping_reply(lp, pbuf);
3390 CFS_FREE_PTR_ARRAY(curnis, nnis);
3391 CFS_FREE_PTR_ARRAY(addnis, nnis);
3392 CFS_FREE_PTR_ARRAY(delnis, nnis);
3393 lnet_ping_buffer_decref(pbuf);
3394 CDEBUG(D_NET, "peer %s (%p): %d\n",
3395 libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3398 spin_lock(&lp->lp_lock);
3399 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3400 lp->lp_state |= LNET_PEER_FORCE_PING;
3401 spin_unlock(&lp->lp_lock);
3407 * The data in pbuf says lp is its primary peer, but the data was
3408 * received by a different peer. Try to update lp with the data.
3411 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3413 struct lnet_handle_md mdh;
3415 /* Queue lp for discovery, and force it on the request queue. */
3416 lnet_net_lock(LNET_LOCK_EX);
3417 if (lnet_peer_queue_for_discovery(lp))
3418 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3419 lnet_net_unlock(LNET_LOCK_EX);
3421 LNetInvalidateMDHandle(&mdh);
3424 * Decide whether we can move the peer to the DATA_PRESENT state.
3426 * We replace stale data for a multi-rail peer, repair PING_FAILED
3427 * status, and preempt FORCE_PING.
3429 * If after that we have DATA_PRESENT, we merge it into this peer.
3431 spin_lock(&lp->lp_lock);
3432 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3433 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3434 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3435 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3436 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3437 lnet_ping_buffer_decref(pbuf);
3442 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3443 lnet_ping_buffer_decref(lp->lp_data);
3445 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3447 if (lp->lp_state & LNET_PEER_PING_FAILED) {
3448 mdh = lp->lp_ping_mdh;
3449 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3450 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3451 lp->lp_ping_error = 0;
3453 if (lp->lp_state & LNET_PEER_FORCE_PING)
3454 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3455 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3456 spin_unlock(&lp->lp_lock);
3458 if (!LNetMDHandleIsInvalid(mdh))
3462 return lnet_peer_merge_data(lp, pbuf);
3464 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3468 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3469 struct lnet_ping_buffer *pbuf)
3471 struct lnet_ping_iter pi;
3472 struct lnet_nid pnid;
3475 for (st = ping_iter_first(&pi, pbuf, &pnid);
3477 st = ping_iter_next(&pi, &pnid))
3478 if (nid_same(nid, &pnid))
3483 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3484 * to the discovery queue a reference was taken that will prevent the peer from
3485 * actually being freed by this function. After this function exits the
3486 * discovery thread should call lnet_peer_discovery_complete() which will
3487 * drop that reference as well as wake any waiters that may also be holding a
3490 static int lnet_peer_deletion(struct lnet_peer *lp)
3491 __must_hold(&lp->lp_lock)
3493 struct list_head rlist;
3494 struct lnet_route *route, *tmp;
3495 int sensitivity = lp->lp_health_sensitivity;
3498 INIT_LIST_HEAD(&rlist);
3500 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3501 libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3503 /* no-op if lnet_peer_del() has already been called on this peer */
3504 if (lp->lp_state & LNET_PEER_MARK_DELETED)
3505 goto clear_discovering;
3507 spin_unlock(&lp->lp_lock);
3509 mutex_lock(&the_lnet.ln_api_mutex);
3510 if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3511 the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3512 mutex_unlock(&the_lnet.ln_api_mutex);
3513 spin_lock(&lp->lp_lock);
3515 goto clear_discovering;
3518 lnet_peer_cancel_discovery(lp);
3519 lnet_net_lock(LNET_LOCK_EX);
3520 list_for_each_entry_safe(route, tmp,
3523 lnet_move_route(route, NULL, &rlist);
3525 /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3526 rc = lnet_peer_del_locked(lp);
3528 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3529 libcfs_nidstr(&lp->lp_primary_nid), rc);
3531 lnet_net_unlock(LNET_LOCK_EX);
3533 list_for_each_entry_safe(route, tmp,
3535 /* re-add these routes */
3536 lnet_add_route(route->lr_net,
3541 LIBCFS_FREE(route, sizeof(*route));
3544 mutex_unlock(&the_lnet.ln_api_mutex);
3546 spin_lock(&lp->lp_lock);
3551 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3552 LNET_PEER_FORCE_PUSH);
3558 * Update a peer using the data received.
3560 static int lnet_peer_data_present(struct lnet_peer *lp)
3561 __must_hold(&lp->lp_lock)
3563 struct lnet_ping_buffer *pbuf;
3564 struct lnet_peer_ni *lpni;
3565 struct lnet_nid nid;
3571 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3572 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3573 spin_unlock(&lp->lp_lock);
3576 * Modifications of peer structures are done while holding the
3577 * ln_api_mutex. A global lock is required because we may be
3578 * modifying multiple peer structures, and a mutex greatly
3579 * simplifies memory management.
3581 * The actual changes to the data structures must also protect
3582 * against concurrent lookups, for which the lnet_net_lock in
3583 * LNET_LOCK_EX mode is used.
3585 mutex_lock(&the_lnet.ln_api_mutex);
3586 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3587 lnet_ping_buffer_decref(pbuf);
3593 * If this peer is not on the peer list then it is being torn
3594 * down, and our reference count may be all that is keeping it
3595 * alive. Don't do any work on it.
3597 if (list_empty(&lp->lp_peer_list)) {
3598 lnet_ping_buffer_decref(pbuf);
3602 flags = LNET_PEER_DISCOVERED;
3603 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3604 flags |= LNET_PEER_MULTI_RAIL;
3607 * Check whether the primary NID in the message matches the
3608 * primary NID of the peer. If it does, update the peer, if
3609 * it it does not, check whether there is already a peer with
3610 * that primary NID. If no such peer exists, try to update
3611 * the primary NID of the current peer (allowed if it was
3612 * created due to message traffic) and complete the update.
3613 * If the peer did exist, hand off the data to it.
3615 * The peer for the loopback interface is a special case: this
3616 * is the peer for the local node, and we want to set its
3617 * primary NID to the correct value here. Moreover, this peer
3618 * can show up with only the loopback NID in the ping buffer.
3620 if (!find_primary(&nid, pbuf)) {
3621 lnet_ping_buffer_decref(pbuf);
3624 if (nid_is_lo0(&lp->lp_primary_nid)) {
3625 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3627 lnet_ping_buffer_decref(pbuf);
3629 rc = lnet_peer_merge_data(lp, pbuf);
3631 * if the primary nid of the peer is present in the ping info returned
3632 * from the peer, but it's not the local primary peer we have
3633 * cached and discovery is disabled, then we don't want to update
3634 * our local peer info, by adding or removing NIDs, we just want
3635 * to update the status of the nids that we currently have
3636 * recorded in that peer.
3638 } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3639 (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3640 lnet_is_discovery_disabled(lp))) {
3641 rc = lnet_peer_merge_data(lp, pbuf);
3643 lpni = lnet_peer_ni_find_locked(&nid);
3644 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3645 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3647 CERROR("Primary NID error %s versus %s: %d\n",
3648 libcfs_nidstr(&lp->lp_primary_nid),
3649 libcfs_nidstr(&nid), rc);
3650 lnet_ping_buffer_decref(pbuf);
3652 rc = lnet_peer_merge_data(lp, pbuf);
3655 lnet_peer_ni_decref_locked(lpni);
3657 struct lnet_peer *new_lp;
3658 new_lp = lpni->lpni_peer_net->lpn_peer;
3660 * if lp has discovery/MR enabled that means new_lp
3661 * should have discovery/MR enabled as well, since
3662 * it's the same peer, which we're about to merge
3664 spin_lock(&lp->lp_lock);
3665 spin_lock(&new_lp->lp_lock);
3666 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3667 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3668 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3669 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3670 /* If we're processing a ping reply then we may be
3671 * about to send a push to the peer that we ping'd.
3672 * Since the ping reply that we're processing was
3673 * received by lp, we need to set the discovery source
3674 * NID for new_lp to the NID stored in lp.
3676 if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3677 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3678 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3680 spin_unlock(&new_lp->lp_lock);
3681 spin_unlock(&lp->lp_lock);
3683 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3684 lnet_consolidate_routes_locked(lp, new_lp);
3685 lnet_peer_ni_decref_locked(lpni);
3689 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3690 libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3692 mutex_unlock(&the_lnet.ln_api_mutex);
3694 spin_lock(&lp->lp_lock);
3695 /* Tell discovery to re-check the peer immediately. */
3697 rc = LNET_REDISCOVER_PEER;
3702 * A ping failed. Clear the PING_FAILED state and set the
3703 * FORCE_PING state, to ensure a retry even if discovery is
3704 * disabled. This avoids being left with incorrect state.
3706 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3707 __must_hold(&lp->lp_lock)
3709 struct lnet_handle_md mdh;
3712 mdh = lp->lp_ping_mdh;
3713 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3714 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3715 lp->lp_state |= LNET_PEER_FORCE_PING;
3716 rc = lp->lp_ping_error;
3717 lp->lp_ping_error = 0;
3718 spin_unlock(&lp->lp_lock);
3720 if (!LNetMDHandleIsInvalid(mdh))
3723 CDEBUG(D_NET, "peer %s:%d\n",
3724 libcfs_nidstr(&lp->lp_primary_nid), rc);
3726 spin_lock(&lp->lp_lock);
3727 return rc ? rc : LNET_REDISCOVER_PEER;
3730 /* Active side of ping. */
3731 static int lnet_peer_send_ping(struct lnet_peer *lp)
3732 __must_hold(&lp->lp_lock)
3738 lp->lp_state |= LNET_PEER_PING_SENT;
3739 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3740 spin_unlock(&lp->lp_lock);
3742 cpt = lnet_net_lock_current();
3743 /* Refcount for MD. */
3744 lnet_peer_addref_locked(lp);
3745 lnet_net_unlock(cpt);
3747 bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3749 rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3750 the_lnet.ln_dc_handler, false);
3751 /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3752 * refcount on the peer, otherwise LNetMDUnlink will be called
3753 * which will eventually do that.
3757 lnet_peer_decref_locked(lp);
3758 lnet_net_unlock(cpt);
3759 rc = -rc; /* change the rc to negative value */
3761 } else if (rc < 0) {
3765 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3767 spin_lock(&lp->lp_lock);
3771 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3773 * The errors that get us here are considered hard errors and
3774 * cause Discovery to terminate. So we clear PING_SENT, but do
3775 * not set either PING_FAILED or FORCE_PING. In fact we need
3776 * to clear PING_FAILED, because the unlink event handler will
3777 * have set it if we called LNetMDUnlink() above.
3779 spin_lock(&lp->lp_lock);
3780 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3785 * This function exists because you cannot call LNetMDUnlink() from an
3788 static int lnet_peer_push_failed(struct lnet_peer *lp)
3789 __must_hold(&lp->lp_lock)
3791 struct lnet_handle_md mdh;
3794 mdh = lp->lp_push_mdh;
3795 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3796 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3797 rc = lp->lp_push_error;
3798 lp->lp_push_error = 0;
3799 spin_unlock(&lp->lp_lock);
3801 if (!LNetMDHandleIsInvalid(mdh))
3804 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3805 spin_lock(&lp->lp_lock);
3806 return rc ? rc : LNET_REDISCOVER_PEER;
3810 * Mark the peer as discovered.
3812 static int lnet_peer_discovered(struct lnet_peer *lp)
3813 __must_hold(&lp->lp_lock)
3815 lp->lp_state |= LNET_PEER_DISCOVERED;
3816 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3817 LNET_PEER_REDISCOVER);
3819 lp->lp_dc_error = 0;
3821 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3826 /* Active side of push. */
3827 static int lnet_peer_send_push(struct lnet_peer *lp)
3828 __must_hold(&lp->lp_lock)
3830 struct lnet_ping_buffer *pbuf;
3831 struct lnet_processid id;
3836 /* Don't push to a non-multi-rail peer. */
3837 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3838 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3839 /* if peer's NIDs are uptodate then peer is discovered */
3840 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3841 rc = lnet_peer_discovered(lp);
3848 lp->lp_state |= LNET_PEER_PUSH_SENT;
3849 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3850 spin_unlock(&lp->lp_lock);
3852 cpt = lnet_net_lock_current();
3853 pbuf = the_lnet.ln_ping_target;
3854 lnet_ping_buffer_addref(pbuf);
3855 lnet_net_unlock(cpt);
3857 /* Push source MD */
3858 md.start = &pbuf->pb_info;
3859 md.length = pbuf->pb_nbytes;
3860 md.threshold = 2; /* Put/Ack */
3862 md.options = LNET_MD_TRACK_RESPONSE;
3863 md.handler = the_lnet.ln_dc_handler;
3866 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3868 lnet_ping_buffer_decref(pbuf);
3869 CERROR("Can't bind push source MD: %d\n", rc);
3873 cpt = lnet_net_lock_current();
3874 /* Refcount for MD. */
3875 lnet_peer_addref_locked(lp);
3876 id.pid = LNET_PID_LUSTRE;
3877 if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3878 id.nid = lp->lp_disc_dst_nid;
3880 id.nid = lp->lp_primary_nid;
3881 lnet_net_unlock(cpt);
3883 rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3884 LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3885 LNET_PROTO_PING_MATCHBITS, 0, 0);
3888 * reset the discovery nid. There is no need to restrict sending
3889 * from that source, if we call lnet_push_update_to_peers(). It'll
3890 * get set to a specific NID, if we initiate discovery from the
3893 lp->lp_disc_src_nid = LNET_ANY_NID;
3894 lp->lp_disc_dst_nid = LNET_ANY_NID;
3899 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3901 spin_lock(&lp->lp_lock);
3905 LNetMDUnlink(lp->lp_push_mdh);
3906 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3908 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3911 * The errors that get us here are considered hard errors and
3912 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3913 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3914 * because the unlink event handler will have set it if we
3915 * called LNetMDUnlink() above.
3917 spin_lock(&lp->lp_lock);
3918 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3923 * Wait for work to be queued or some other change that must be
3924 * attended to. Returns non-zero if the discovery thread should shut
3927 static int lnet_peer_discovery_wait_for_work(void)
3934 cpt = lnet_net_lock_current();
3936 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3937 TASK_INTERRUPTIBLE);
3938 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3940 if (lnet_push_target_resize_needed() ||
3941 the_lnet.ln_push_target->pb_needs_post)
3943 if (!list_empty(&the_lnet.ln_dc_request))
3945 if (!list_empty(&the_lnet.ln_msg_resend))
3947 lnet_net_unlock(cpt);
3950 * wakeup max every second to check if there are peers that
3951 * have been stuck on the working queue for greater than
3954 schedule_timeout(cfs_time_seconds(1));
3955 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3956 cpt = lnet_net_lock_current();
3958 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3960 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3963 lnet_net_unlock(cpt);
3965 CDEBUG(D_NET, "woken: %d\n", rc);
3971 * Messages that were pending on a destroyed peer will be put on a global
3972 * resend list. The message resend list will be checked by
3973 * the discovery thread when it wakes up, and will resend messages. These
3974 * messages can still be sendable in the case the lpni which was the initial
3975 * cause of the message re-queue was transfered to another peer.
3977 * It is possible that LNet could be shutdown while we're iterating
3978 * through the list. lnet_shudown_lndnets() will attempt to access the
3979 * resend list, but will have to wait until the spinlock is released, by
3980 * which time there shouldn't be any more messages on the resend list.
3981 * During shutdown lnet_send() will fail and lnet_finalize() will be called
3982 * for the messages so they can be released. The other case is that
3983 * lnet_shudown_lndnets() can finalize all the messages before this
3984 * function can visit the resend list, in which case this function will be
3987 static void lnet_resend_msgs(void)
3989 struct lnet_msg *msg, *tmp;
3993 spin_lock(&the_lnet.ln_msg_resend_lock);
3994 list_splice(&the_lnet.ln_msg_resend, &resend);
3995 spin_unlock(&the_lnet.ln_msg_resend_lock);
3997 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3998 list_del_init(&msg->msg_list);
3999 rc = lnet_send(&msg->msg_src_nid_param, msg,
4000 &msg->msg_rtr_nid_param);
4002 CNETERR("Error sending %s to %s: %d\n",
4003 lnet_msgtyp2str(msg->msg_type),
4004 libcfs_idstr(&msg->msg_target), rc);
4005 lnet_finalize(msg, rc);
4010 /* The discovery thread. */
4011 static int lnet_peer_discovery(void *arg)
4013 struct lnet_peer *lp;
4017 wait_for_completion(&the_lnet.ln_started);
4019 CDEBUG(D_NET, "started\n");
4022 if (lnet_peer_discovery_wait_for_work())
4025 if (lnet_push_target_resize_needed())
4026 lnet_push_target_resize();
4027 else if (the_lnet.ln_push_target->pb_needs_post)
4028 lnet_push_target_post(the_lnet.ln_push_target,
4029 &the_lnet.ln_push_target_md);
4033 lnet_net_lock(LNET_LOCK_EX);
4034 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4035 lnet_net_unlock(LNET_LOCK_EX);
4040 * Process all incoming discovery work requests. When
4041 * discovery must wait on a peer to change state, it
4042 * is added to the tail of the ln_dc_working queue. A
4043 * timestamp keeps track of when the peer was added,
4044 * so we can time out discovery requests that take too
4047 while (!list_empty(&the_lnet.ln_dc_request)) {
4048 lp = list_first_entry(&the_lnet.ln_dc_request,
4049 struct lnet_peer, lp_dc_list);
4050 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4052 * set the time the peer was put on the dc_working
4053 * queue. It shouldn't remain on the queue
4054 * forever, in case the GET message (for ping)
4055 * doesn't get a REPLY or the PUT message (for
4056 * push) doesn't get an ACK.
4058 lp->lp_last_queued = ktime_get_real_seconds();
4059 lnet_net_unlock(LNET_LOCK_EX);
4061 if (lnet_push_target_resize_needed())
4062 lnet_push_target_resize();
4063 else if (the_lnet.ln_push_target->pb_needs_post)
4064 lnet_push_target_post(the_lnet.ln_push_target,
4065 &the_lnet.ln_push_target_md);
4068 * Select an action depending on the state of
4069 * the peer and whether discovery is disabled.
4070 * The check whether discovery is disabled is
4071 * done after the code that handles processing
4072 * for arrived data, cleanup for failures, and
4073 * forcing a Ping or Push.
4075 spin_lock(&lp->lp_lock);
4076 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4077 libcfs_nidstr(&lp->lp_primary_nid), lp,
4079 if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4080 LNET_PEER_MARK_DELETED))
4081 rc = lnet_peer_deletion(lp);
4082 else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4083 rc = lnet_peer_data_present(lp);
4084 else if (lp->lp_state & LNET_PEER_PING_FAILED)
4085 rc = lnet_peer_ping_failed(lp);
4086 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4087 rc = lnet_peer_push_failed(lp);
4088 else if (lp->lp_state & LNET_PEER_FORCE_PING)
4089 rc = lnet_peer_send_ping(lp);
4090 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4091 rc = lnet_peer_send_push(lp);
4092 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4093 rc = lnet_peer_send_ping(lp);
4094 else if (lnet_peer_needs_push(lp))
4095 rc = lnet_peer_send_push(lp);
4097 rc = lnet_peer_discovered(lp);
4098 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4099 libcfs_nidstr(&lp->lp_primary_nid), lp,
4102 if (rc == LNET_REDISCOVER_PEER) {
4103 spin_unlock(&lp->lp_lock);
4104 lnet_net_lock(LNET_LOCK_EX);
4105 list_move(&lp->lp_dc_list,
4106 &the_lnet.ln_dc_request);
4108 !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4109 spin_unlock(&lp->lp_lock);
4110 lnet_net_lock(LNET_LOCK_EX);
4111 lnet_peer_discovery_complete(lp, rc);
4113 spin_unlock(&lp->lp_lock);
4114 lnet_net_lock(LNET_LOCK_EX);
4117 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4122 lnet_net_unlock(LNET_LOCK_EX);
4126 CDEBUG(D_NET, "stopping\n");
4128 * Clean up before telling lnet_peer_discovery_stop() that
4129 * we're done. Use wake_up() below to somewhat reduce the
4130 * size of the thundering herd if there are multiple threads
4131 * waiting on discovery of a single peer.
4134 /* Queue cleanup 1: stop all pending pings and pushes. */
4135 lnet_net_lock(LNET_LOCK_EX);
4136 while (!list_empty(&the_lnet.ln_dc_working)) {
4137 lp = list_first_entry(&the_lnet.ln_dc_working,
4138 struct lnet_peer, lp_dc_list);
4139 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4140 lnet_net_unlock(LNET_LOCK_EX);
4141 lnet_peer_cancel_discovery(lp);
4142 lnet_net_lock(LNET_LOCK_EX);
4144 lnet_net_unlock(LNET_LOCK_EX);
4146 /* Queue cleanup 2: wait for the expired queue to clear. */
4147 while (!list_empty(&the_lnet.ln_dc_expired))
4148 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4150 /* Queue cleanup 3: clear the request queue. */
4151 lnet_net_lock(LNET_LOCK_EX);
4152 while (!list_empty(&the_lnet.ln_dc_request)) {
4153 lp = list_first_entry(&the_lnet.ln_dc_request,
4154 struct lnet_peer, lp_dc_list);
4155 lnet_net_unlock(LNET_LOCK_EX);
4156 spin_lock(&lp->lp_lock);
4157 if (lp->lp_state & LNET_PEER_PING_FAILED)
4158 (void)lnet_peer_ping_failed(lp);
4159 if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4160 (void)lnet_peer_push_failed(lp);
4161 spin_unlock(&lp->lp_lock);
4162 lnet_net_lock(LNET_LOCK_EX);
4163 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4165 lnet_net_unlock(LNET_LOCK_EX);
4167 if (lnet_assert_handler_unused(the_lnet.ln_dc_handler, --retry <= 0))
4170 the_lnet.ln_dc_handler = NULL;
4172 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4173 wake_up(&the_lnet.ln_dc_waitq);
4175 CDEBUG(D_NET, "stopped\n");
4180 /* ln_api_mutex is held on entry. */
4181 int lnet_peer_discovery_start(void)
4183 struct task_struct *task;
4186 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4189 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4190 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4191 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4194 CERROR("Can't start peer discovery thread: %d\n", rc);
4196 the_lnet.ln_dc_handler = NULL;
4198 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4201 CDEBUG(D_NET, "discovery start: %d\n", rc);
4206 /* ln_api_mutex is held on entry. */
4207 void lnet_peer_discovery_stop(void)
4209 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4212 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4213 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4215 /* In the LNetNIInit() path we may be stopping discovery before it
4216 * entered its work loop
4218 if (!completion_done(&the_lnet.ln_started))
4219 complete(&the_lnet.ln_started);
4221 wake_up(&the_lnet.ln_dc_waitq);
4223 mutex_unlock(&the_lnet.ln_api_mutex);
4224 wait_event(the_lnet.ln_dc_waitq,
4225 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4226 mutex_lock(&the_lnet.ln_api_mutex);
4228 LASSERT(list_empty(&the_lnet.ln_dc_request));
4229 LASSERT(list_empty(&the_lnet.ln_dc_working));
4230 LASSERT(list_empty(&the_lnet.ln_dc_expired));
4232 CDEBUG(D_NET, "discovery stopped\n");
4238 lnet_debug_peer(struct lnet_nid *nid)
4240 char *aliveness = "NA";
4241 struct lnet_peer_ni *lp;
4244 cpt = lnet_nid2cpt(nid, NULL);
4247 lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4249 lnet_net_unlock(cpt);
4250 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4254 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4255 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4257 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4258 libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4259 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4260 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4261 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4263 lnet_peer_ni_decref_locked(lp);
4265 lnet_net_unlock(cpt);
4268 /* Gathering information for userspace. */
4270 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4271 char aliveness[LNET_MAX_STR_LEN],
4272 __u32 *cpt_iter, __u32 *refcount,
4273 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4274 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4275 __u32 *peer_tx_qnob)
4277 struct lnet_peer_table *peer_table;
4278 struct lnet_peer_ni *lp;
4283 /* get the number of CPTs */
4284 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4286 /* if the cpt number to be examined is >= the number of cpts in
4287 * the system then indicate that there are no more cpts to examin
4289 if (*cpt_iter >= lncpt)
4292 /* get the current table */
4293 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4294 /* if the ptable is NULL then there are no more cpts to examine */
4295 if (peer_table == NULL)
4298 lnet_net_lock(*cpt_iter);
4300 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4301 struct list_head *peers = &peer_table->pt_hash[j];
4303 list_for_each_entry(lp, peers, lpni_hashlist) {
4304 if (!nid_is_nid4(&lp->lpni_nid))
4306 if (peer_index-- > 0)
4309 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4310 if (lnet_isrouter(lp) ||
4311 lnet_peer_aliveness_enabled(lp))
4312 snprintf(aliveness, LNET_MAX_STR_LEN,
4313 lnet_is_peer_ni_alive(lp) ? "up" : "down");
4315 *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4316 *refcount = kref_read(&lp->lpni_kref);
4317 *ni_peer_tx_credits =
4318 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4319 *peer_tx_credits = lp->lpni_txcredits;
4320 *peer_rtr_credits = lp->lpni_rtrcredits;
4321 *peer_min_rtr_credits = lp->lpni_mintxcredits;
4322 *peer_tx_qnob = lp->lpni_txqnob;
4328 lnet_net_unlock(*cpt_iter);
4332 return found ? 0 : -ENOENT;
4335 /* ln_api_mutex is held, which keeps the peer list stable */
4336 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4338 struct lnet_ioctl_element_stats *lpni_stats;
4339 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4340 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4341 struct lnet_peer_ni_credit_info *lpni_info;
4342 struct lnet_peer_ni *lpni;
4343 struct lnet_peer *lp;
4345 struct lnet_nid nid;
4349 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4350 lp = lnet_find_peer(&nid);
4356 size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4357 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4358 size *= lp->lp_nnis;
4359 if (size > cfg->prcfg_size) {
4360 cfg->prcfg_size = size;
4365 cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4366 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4367 cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4368 cfg->prcfg_count = lp->lp_nnis;
4369 cfg->prcfg_size = size;
4370 cfg->prcfg_state = lp->lp_state;
4372 /* Allocate helper buffers. */
4374 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4377 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4380 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4381 if (!lpni_msg_stats)
4382 goto out_free_stats;
4383 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4385 goto out_free_msg_stats;
4390 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4391 if (!nid_is_nid4(&lpni->lpni_nid))
4393 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4394 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4395 goto out_free_hstats;
4396 bulk += sizeof(nid4);
4398 memset(lpni_info, 0, sizeof(*lpni_info));
4399 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4400 if (lnet_isrouter(lpni) ||
4401 lnet_peer_aliveness_enabled(lpni))
4402 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4403 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4405 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4406 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4407 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4408 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4409 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4410 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4411 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4412 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4413 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4414 goto out_free_hstats;
4415 bulk += sizeof(*lpni_info);
4417 memset(lpni_stats, 0, sizeof(*lpni_stats));
4418 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4419 LNET_STATS_TYPE_SEND);
4420 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4421 LNET_STATS_TYPE_RECV);
4422 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4423 LNET_STATS_TYPE_DROP);
4424 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4425 goto out_free_hstats;
4426 bulk += sizeof(*lpni_stats);
4427 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4428 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4429 goto out_free_hstats;
4430 bulk += sizeof(*lpni_msg_stats);
4431 lpni_hstats->hlpni_network_timeout =
4432 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4433 lpni_hstats->hlpni_remote_dropped =
4434 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4435 lpni_hstats->hlpni_remote_timeout =
4436 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4437 lpni_hstats->hlpni_remote_error =
4438 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4439 lpni_hstats->hlpni_health_value =
4440 atomic_read(&lpni->lpni_healthv);
4441 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4442 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4443 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4444 goto out_free_hstats;
4445 bulk += sizeof(*lpni_hstats);
4450 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4452 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4454 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4456 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4458 lnet_peer_decref_locked(lp);
4463 /* must hold net_lock/0 */
4465 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4466 struct list_head *recovery_queue,
4469 /* the mt could've shutdown and cleaned up the queues */
4470 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4473 if (!list_empty(&lpni->lpni_recovery))
4476 if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4479 if (!lpni->lpni_last_alive) {
4481 "lpni %s(%p) not eligible for recovery last alive %lld\n",
4482 libcfs_nidstr(&lpni->lpni_nid), lpni,
4483 lpni->lpni_last_alive);
4487 if (lnet_recovery_limit &&
4488 now > lpni->lpni_last_alive + lnet_recovery_limit) {
4489 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4490 libcfs_nidstr(&lpni->lpni_nid),
4491 lpni->lpni_last_alive);
4492 /* Reset the ping count so that if this peer NI is added back to
4493 * the recovery queue we will send the first ping right away.
4495 lpni->lpni_ping_count = 0;
4499 /* This peer NI is going on the recovery queue, so take a ref on it */
4500 lnet_peer_ni_addref_locked(lpni);
4502 lnet_peer_ni_set_next_ping(lpni, now);
4504 CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4505 libcfs_nidstr(&lpni->lpni_nid),
4506 lpni->lpni_ping_count,
4507 lpni->lpni_next_ping,
4508 lpni->lpni_last_alive,
4509 atomic_read(&lpni->lpni_healthv));
4511 list_add_tail(&lpni->lpni_recovery, recovery_queue);
4514 /* Call with the ln_api_mutex held */
4516 lnet_peer_ni_set_healthv(struct lnet_nid *nid, int value, bool all)
4518 struct lnet_peer_table *ptable;
4519 struct lnet_peer *lp;
4520 struct lnet_peer_net *lpn;
4521 struct lnet_peer_ni *lpni;
4526 if (the_lnet.ln_state != LNET_STATE_RUNNING)
4529 now = ktime_get_seconds();
4532 lnet_net_lock(LNET_LOCK_EX);
4533 lpni = lnet_peer_ni_find_locked(nid);
4535 lnet_net_unlock(LNET_LOCK_EX);
4538 lnet_set_lpni_healthv_locked(lpni, value);
4539 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4540 &the_lnet.ln_mt_peerNIRecovq, now);
4541 lnet_peer_ni_decref_locked(lpni);
4542 lnet_net_unlock(LNET_LOCK_EX);
4546 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4549 * Walk all the peers and reset the health value for each one to the
4552 lnet_net_lock(LNET_LOCK_EX);
4553 for (cpt = 0; cpt < lncpt; cpt++) {
4554 ptable = the_lnet.ln_peer_tables[cpt];
4555 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4556 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4557 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4559 lnet_set_lpni_healthv_locked(lpni,
4561 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4562 &the_lnet.ln_mt_peerNIRecovq, now);
4567 lnet_net_unlock(LNET_LOCK_EX);