1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Use is subject to license terms.
6 * Copyright (c) 2012, 2017, Intel Corporation.
9 /* This file is part of Lustre, http://www.lustre.org/ */
11 #define DEBUG_SUBSYSTEM S_LNET
13 #include <linux/sched.h>
14 #ifdef HAVE_SCHED_HEADERS
15 #include <linux/sched/signal.h>
17 #include <linux/uaccess.h>
19 #include <lnet/udsp.h>
20 #include <lnet/lib-lnet.h>
21 #include <uapi/linux/lnet/lnet-dlc.h>
23 /* Value indicating that recovery needs to re-check a peer immediately. */
24 #define LNET_REDISCOVER_PEER (1)
26 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
27 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
31 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
33 if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
34 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
35 lnet_peer_ni_decref_locked(lpni);
40 lnet_peer_net_added(struct lnet_net *net)
42 struct lnet_peer_ni *lpni, *tmp;
44 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
45 lpni_on_remote_peer_ni_list) {
47 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
50 spin_lock(&lpni->lpni_lock);
51 lpni->lpni_txcredits =
52 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
53 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
54 lpni->lpni_rtrcredits =
55 lnet_peer_buffer_credits(lpni->lpni_net);
56 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
57 spin_unlock(&lpni->lpni_lock);
59 lnet_peer_remove_from_remote_list(lpni);
65 lnet_peer_tables_destroy(void)
67 struct lnet_peer_table *ptable;
68 struct list_head *hash;
72 if (!the_lnet.ln_peer_tables)
75 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
76 hash = ptable->pt_hash;
77 if (!hash) /* not intialized */
80 LASSERT(list_empty(&ptable->pt_zombie_list));
82 ptable->pt_hash = NULL;
83 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
84 LASSERT(list_empty(&hash[j]));
86 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
89 cfs_percpt_free(the_lnet.ln_peer_tables);
90 the_lnet.ln_peer_tables = NULL;
94 lnet_peer_tables_create(void)
96 struct lnet_peer_table *ptable;
97 struct list_head *hash;
101 the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
103 if (the_lnet.ln_peer_tables == NULL) {
104 CERROR("Failed to allocate cpu-partition peer tables\n");
108 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
109 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
110 LNET_PEER_HASH_SIZE * sizeof(*hash));
112 CERROR("Failed to create peer hash table\n");
113 lnet_peer_tables_destroy();
117 spin_lock_init(&ptable->pt_zombie_lock);
118 INIT_LIST_HEAD(&ptable->pt_zombie_list);
120 INIT_LIST_HEAD(&ptable->pt_peer_list);
122 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
123 INIT_LIST_HEAD(&hash[j]);
124 ptable->pt_hash = hash; /* sign of initialization */
130 static struct lnet_peer_ni *
131 lnet_peer_ni_alloc(struct lnet_nid *nid)
133 struct lnet_peer_ni *lpni;
134 struct lnet_net *net;
137 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
139 LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
143 INIT_LIST_HEAD(&lpni->lpni_txq);
144 INIT_LIST_HEAD(&lpni->lpni_hashlist);
145 INIT_LIST_HEAD(&lpni->lpni_peer_nis);
146 INIT_LIST_HEAD(&lpni->lpni_recovery);
147 INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
148 INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
149 LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
150 kref_init(&lpni->lpni_kref);
151 lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
153 spin_lock_init(&lpni->lpni_lock);
155 if (lnet_peers_start_down())
156 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
158 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
159 lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
160 lpni->lpni_nid = *nid;
161 lpni->lpni_cpt = cpt;
162 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
164 net = lnet_get_net_locked(LNET_NID_NET(nid));
165 lpni->lpni_net = net;
167 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
168 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
169 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
170 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
173 * This peer_ni is not on a local network, so we
174 * cannot add the credits here. In case the net is
175 * added later, add the peer_ni to the remote peer ni
176 * list so it can be easily found and revisited.
178 /* FIXME: per-net implementation instead? */
179 lnet_peer_ni_addref_locked(lpni);
180 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
181 &the_lnet.ln_remote_peer_ni_list);
184 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
189 static struct lnet_peer_net *
190 lnet_peer_net_alloc(__u32 net_id)
192 struct lnet_peer_net *lpn;
194 LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
198 INIT_LIST_HEAD(&lpn->lpn_peer_nets);
199 INIT_LIST_HEAD(&lpn->lpn_peer_nis);
200 lpn->lpn_net_id = net_id;
201 lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
203 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
209 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
211 struct lnet_peer *lp;
213 CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
215 LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
216 LASSERT(list_empty(&lpn->lpn_peer_nis));
217 LASSERT(list_empty(&lpn->lpn_peer_nets));
219 lpn->lpn_peer = NULL;
220 LIBCFS_FREE(lpn, sizeof(*lpn));
222 lnet_peer_decref_locked(lp);
225 static struct lnet_peer *
226 lnet_peer_alloc(struct lnet_nid *nid)
228 struct lnet_peer *lp;
230 LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
234 INIT_LIST_HEAD(&lp->lp_rtrq);
235 INIT_LIST_HEAD(&lp->lp_routes);
236 INIT_LIST_HEAD(&lp->lp_peer_list);
237 INIT_LIST_HEAD(&lp->lp_peer_nets);
238 INIT_LIST_HEAD(&lp->lp_dc_list);
239 INIT_LIST_HEAD(&lp->lp_dc_pendq);
240 INIT_LIST_HEAD(&lp->lp_rtr_list);
241 init_waitqueue_head(&lp->lp_dc_waitq);
242 spin_lock_init(&lp->lp_lock);
243 lp->lp_primary_nid = *nid;
244 lp->lp_disc_src_nid = LNET_ANY_NID;
245 lp->lp_disc_dst_nid = LNET_ANY_NID;
246 lp->lp_merge_primary_nid = LNET_ANY_NID;
247 if (lnet_peers_start_down())
248 lp->lp_alive = false;
253 * all peers created on a router should have health on
254 * if it's not already on.
256 if (the_lnet.ln_routing && !lnet_health_sensitivity)
257 lp->lp_health_sensitivity = 1;
260 * Turn off discovery for loopback peer. If you're creating a peer
261 * for the loopback interface then that was initiated when we
262 * attempted to send a message over the loopback. There is no need
263 * to ever use a different interface when sending messages to
267 lp->lp_state = LNET_PEER_NO_DISCOVERY;
268 lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
270 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
276 lnet_destroy_peer_locked(struct lnet_peer *lp)
278 CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
280 LASSERT(atomic_read(&lp->lp_refcount) == 0);
281 LASSERT(lp->lp_rtr_refcount == 0);
282 LASSERT(list_empty(&lp->lp_peer_nets));
283 LASSERT(list_empty(&lp->lp_peer_list));
284 LASSERT(list_empty(&lp->lp_dc_list));
287 lnet_ping_buffer_decref(lp->lp_data);
290 * if there are messages still on the pending queue, then make
291 * sure to queue them on the ln_msg_resend list so they can be
292 * resent at a later point if the discovery thread is still
294 * If the discovery thread has stopped, then the wakeup will be a
295 * no-op, and it is expected the lnet_shutdown_lndnets() will
296 * eventually be called, which will traverse this list and
297 * finalize the messages on the list.
298 * We can not resend them now because we're holding the cpt lock.
299 * Releasing the lock can cause an inconsistent state
301 spin_lock(&the_lnet.ln_msg_resend_lock);
302 spin_lock(&lp->lp_lock);
303 list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
304 spin_unlock(&lp->lp_lock);
305 spin_unlock(&the_lnet.ln_msg_resend_lock);
306 wake_up(&the_lnet.ln_dc_waitq);
308 LIBCFS_FREE(lp, sizeof(*lp));
312 * Detach a peer_ni from its peer_net. If this was the last peer_ni on
313 * that peer_net, detach the peer_net from the peer.
315 * Call with lnet_net_lock/EX held
318 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
320 struct lnet_peer_table *ptable;
321 struct lnet_peer_net *lpn;
322 struct lnet_peer *lp;
325 * Belts and suspenders: gracefully handle teardown of a
326 * partially connected peer_ni.
328 lpn = lpni->lpni_peer_net;
330 list_del_init(&lpni->lpni_peer_nis);
332 * If there are no lpni's left, we detach lpn from
333 * lp_peer_nets, so it cannot be found anymore.
335 if (list_empty(&lpn->lpn_peer_nis))
336 list_del_init(&lpn->lpn_peer_nets);
338 /* Update peer NID count. */
343 * If there are no more peer nets, make the peer unfindable
344 * via the peer_tables.
346 * Otherwise, if the peer is DISCOVERED, tell discovery to
347 * take another look at it. This is a no-op if discovery for
348 * this peer did the detaching.
350 if (list_empty(&lp->lp_peer_nets)) {
351 list_del_init(&lp->lp_peer_list);
352 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
354 } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
355 /* Discovery isn't running, nothing to do here. */
356 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
357 lnet_peer_queue_for_discovery(lp);
358 wake_up(&the_lnet.ln_dc_waitq);
360 CDEBUG(D_NET, "peer %s NID %s\n",
361 libcfs_nidstr(&lp->lp_primary_nid),
362 libcfs_nidstr(&lpni->lpni_nid));
365 /* called with lnet_net_lock LNET_LOCK_EX held */
367 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
369 struct lnet_peer_table *ptable = NULL;
371 /* don't remove a peer_ni if it's also a gateway */
372 if (lnet_isrouter(lpni) && !force) {
373 CERROR("Peer NI %s is a gateway. Can not delete it\n",
374 libcfs_nidstr(&lpni->lpni_nid));
378 lnet_peer_remove_from_remote_list(lpni);
380 /* remove peer ni from the hash list. */
381 list_del_init(&lpni->lpni_hashlist);
384 * indicate the peer is being deleted so the monitor thread can
385 * remove it from the recovery queue.
387 spin_lock(&lpni->lpni_lock);
388 lpni->lpni_state |= LNET_PEER_NI_DELETING;
389 spin_unlock(&lpni->lpni_lock);
391 /* decrement the ref count on the peer table */
392 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
395 * The peer_ni can no longer be found with a lookup. But there
396 * can be current users, so keep track of it on the zombie
397 * list until the reference count has gone to zero.
399 * The last reference may be lost in a place where the
400 * lnet_net_lock locks only a single cpt, and that cpt may not
401 * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
404 spin_lock(&ptable->pt_zombie_lock);
405 list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
406 ptable->pt_zombies++;
407 spin_unlock(&ptable->pt_zombie_lock);
409 /* no need to keep this peer_ni on the hierarchy anymore */
410 lnet_peer_detach_peer_ni_locked(lpni);
412 /* remove hashlist reference on peer_ni */
413 lnet_peer_ni_decref_locked(lpni);
418 void lnet_peer_uninit(void)
420 struct lnet_peer_ni *lpni, *tmp;
422 lnet_net_lock(LNET_LOCK_EX);
424 /* remove all peer_nis from the remote peer and the hash list */
425 list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
426 lpni_on_remote_peer_ni_list)
427 lnet_peer_ni_del_locked(lpni, false);
429 lnet_peer_tables_destroy();
431 lnet_net_unlock(LNET_LOCK_EX);
435 lnet_peer_del_locked(struct lnet_peer *peer)
437 struct lnet_peer_ni *lpni = NULL, *lpni2;
440 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
442 spin_lock(&peer->lp_lock);
443 peer->lp_state |= LNET_PEER_MARK_DELETED;
444 spin_unlock(&peer->lp_lock);
446 lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
447 while (lpni != NULL) {
448 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
449 rc = lnet_peer_ni_del_locked(lpni, false);
459 * Discovering this peer is taking too long. Cancel any Ping or Push
460 * that discovery is waiting on by unlinking the relevant MDs. The
461 * lnet_discovery_event_handler() will proceed from here and complete
464 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
466 struct lnet_handle_md ping_mdh;
467 struct lnet_handle_md push_mdh;
469 LNetInvalidateMDHandle(&ping_mdh);
470 LNetInvalidateMDHandle(&push_mdh);
472 spin_lock(&lp->lp_lock);
473 if (lp->lp_state & LNET_PEER_PING_SENT) {
474 ping_mdh = lp->lp_ping_mdh;
475 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
477 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
478 push_mdh = lp->lp_push_mdh;
479 LNetInvalidateMDHandle(&lp->lp_push_mdh);
481 spin_unlock(&lp->lp_lock);
483 if (!LNetMDHandleIsInvalid(ping_mdh))
484 LNetMDUnlink(ping_mdh);
485 if (!LNetMDHandleIsInvalid(push_mdh))
486 LNetMDUnlink(push_mdh);
490 lnet_peer_del(struct lnet_peer *peer)
494 lnet_peer_cancel_discovery(peer);
495 lnet_net_lock(LNET_LOCK_EX);
496 rc = lnet_peer_del_locked(peer);
497 lnet_net_unlock(LNET_LOCK_EX);
503 * Delete a NID from a peer. Call with ln_api_mutex held.
506 * -EPERM: Non-DLC deletion from DLC-configured peer.
507 * -ENOENT: No lnet_peer_ni corresponding to the nid.
508 * -ECHILD: The lnet_peer_ni isn't connected to the peer.
509 * -EBUSY: The lnet_peer_ni is the primary, and not the only peer_ni.
512 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
515 struct lnet_peer_ni *lpni;
516 struct lnet_nid primary_nid = lp->lp_primary_nid;
518 bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
520 if (!(flags & LNET_PEER_CONFIGURED)) {
521 if (lp->lp_state & LNET_PEER_CONFIGURED) {
527 /* If we're asked to lock down the primary NID we shouldn't be
530 if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
531 nid_same(&primary_nid, nid)) {
536 lpni = lnet_peer_ni_find_locked(nid);
541 if (lp != lpni->lpni_peer_net->lpn_peer) {
543 lnet_peer_ni_decref_locked(lpni);
548 * This function only allows deletion of the primary NID if it
551 if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
553 lnet_peer_ni_decref_locked(lpni);
557 lnet_net_lock(LNET_LOCK_EX);
559 if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
560 struct lnet_peer_ni *lpni2;
561 /* assign the next peer_ni to be the primary */
562 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
564 lp->lp_primary_nid = lpni2->lpni_nid;
566 rc = lnet_peer_ni_del_locked(lpni, force);
567 lnet_peer_ni_decref_locked(lpni);
569 lnet_net_unlock(LNET_LOCK_EX);
572 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
573 libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
580 lnet_peer_table_cleanup_locked(struct lnet_net *net,
581 struct lnet_peer_table *ptable)
584 struct lnet_peer_ni *next;
585 struct lnet_peer_ni *lpni;
586 struct lnet_peer *peer;
588 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
589 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
591 if (net != NULL && net != lpni->lpni_net)
594 peer = lpni->lpni_peer_net->lpn_peer;
595 if (!nid_same(&peer->lp_primary_nid,
597 lnet_peer_ni_del_locked(lpni, false);
601 * Removing the primary NID implies removing
602 * the entire peer. Advance next beyond any
603 * peer_ni that belongs to the same peer.
605 list_for_each_entry_from(next, &ptable->pt_hash[i],
607 if (next->lpni_peer_net->lpn_peer != peer)
610 lnet_peer_del_locked(peer);
616 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
618 wait_var_event_warning(&ptable->pt_zombies,
619 ptable->pt_zombies == 0,
620 "Waiting for %d zombies on peer table\n",
625 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
626 struct lnet_peer_table *ptable)
628 struct lnet_peer_ni *lp;
629 struct lnet_peer_ni *tmp;
630 struct lnet_nid gw_nid;
633 for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
634 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
636 if (net != lp->lpni_net)
639 if (!lnet_isrouter(lp))
642 gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
644 lnet_net_unlock(LNET_LOCK_EX);
645 lnet_del_route(LNET_NET_ANY, &gw_nid);
646 lnet_net_lock(LNET_LOCK_EX);
652 lnet_peer_tables_cleanup(struct lnet_net *net)
655 struct lnet_peer_table *ptable;
657 LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
658 /* If just deleting the peers for a NI, get rid of any routes these
659 * peers are gateways for. */
660 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
661 lnet_net_lock(LNET_LOCK_EX);
662 lnet_peer_table_del_rtrs_locked(net, ptable);
663 lnet_net_unlock(LNET_LOCK_EX);
666 /* Start the cleanup process */
667 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
668 lnet_net_lock(LNET_LOCK_EX);
669 lnet_peer_table_cleanup_locked(net, ptable);
670 lnet_net_unlock(LNET_LOCK_EX);
673 cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
674 lnet_peer_ni_finalize_wait(ptable);
677 static struct lnet_peer_ni *
678 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
680 struct list_head *peers;
681 struct lnet_peer_ni *lp;
683 if (the_lnet.ln_state != LNET_STATE_RUNNING)
686 peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
687 list_for_each_entry(lp, peers, lpni_hashlist) {
688 if (nid_same(&lp->lpni_nid, nid)) {
689 lnet_peer_ni_addref_locked(lp);
697 struct lnet_peer_ni *
698 lnet_peer_ni_find_locked(struct lnet_nid *nid)
700 struct lnet_peer_ni *lpni;
701 struct lnet_peer_table *ptable;
704 cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
706 ptable = the_lnet.ln_peer_tables[cpt];
707 lpni = lnet_get_peer_ni_locked(ptable, nid);
712 struct lnet_peer_ni *
713 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
715 struct lnet_peer_net *lpn;
716 struct lnet_peer_ni *lpni;
718 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
722 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
723 if (nid_same(&lpni->lpni_nid, nid))
731 lnet_find_peer(struct lnet_nid *nid)
733 struct lnet_peer_ni *lpni;
734 struct lnet_peer *lp = NULL;
737 cpt = lnet_net_lock_current();
738 lpni = lnet_peer_ni_find_locked(nid);
740 lp = lpni->lpni_peer_net->lpn_peer;
741 lnet_peer_addref_locked(lp);
742 lnet_peer_ni_decref_locked(lpni);
744 lnet_net_unlock(cpt);
749 struct lnet_peer_net *
750 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
752 struct lnet_peer_net *net;
755 /* no net id provided return the first net */
756 net = list_first_entry_or_null(&lp->lp_peer_nets,
757 struct lnet_peer_net,
763 /* find the net after the one provided */
764 list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
765 if (net->lpn_net_id == prev_lpn_id) {
767 * if we reached the end of the list loop to the
770 if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
771 return list_first_entry_or_null(&lp->lp_peer_nets,
772 struct lnet_peer_net,
775 return list_next_entry(net, lpn_peer_nets);
782 struct lnet_peer_ni *
783 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
784 struct lnet_peer_net *peer_net,
785 struct lnet_peer_ni *prev)
787 struct lnet_peer_ni *lpni;
788 struct lnet_peer_net *net = peer_net;
792 if (list_empty(&peer->lp_peer_nets))
795 net = list_first_entry(&peer->lp_peer_nets,
796 struct lnet_peer_net,
799 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
805 if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
807 * if you reached the end of the peer ni list and the peer
808 * net is specified then there are no more peer nis in that
815 * we reached the end of this net ni list. move to the
818 if (prev->lpni_peer_net->lpn_peer_nets.next ==
820 /* no more nets and no more NIs. */
823 /* get the next net */
824 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
825 struct lnet_peer_net,
827 /* get the ni on it */
828 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
834 /* there are more nis left */
835 lpni = list_first_entry(&prev->lpni_peer_nis,
836 struct lnet_peer_ni, lpni_peer_nis);
841 /* Call with the ln_api_mutex held */
842 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
844 struct lnet_process_id id;
845 struct lnet_peer_table *ptable;
846 struct lnet_peer *lp;
855 if (the_lnet.ln_state != LNET_STATE_RUNNING)
858 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
861 * Count the number of peers, and return E2BIG if the buffer
862 * is too small. We'll also return the desired size.
865 for (cpt = 0; cpt < lncpt; cpt++) {
866 ptable = the_lnet.ln_peer_tables[cpt];
867 count += ptable->pt_peers;
869 size = count * sizeof(*ids);
874 * Walk the peer lists and copy out the primary nids.
875 * This is safe because the peer lists are only modified
876 * while the ln_api_mutex is held. So we don't need to
877 * hold the lnet_net_lock as well, and can therefore
878 * directly call copy_to_user().
881 memset(&id, 0, sizeof(id));
882 id.pid = LNET_PID_LUSTRE;
884 for (cpt = 0; cpt < lncpt; cpt++) {
885 ptable = the_lnet.ln_peer_tables[cpt];
886 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
887 if (!nid_is_nid4(&lp->lp_primary_nid))
891 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
892 if (copy_to_user(&ids[i], &id, sizeof(id)))
905 * Start pushes to peers that need to be updated for a configuration
906 * change on this node.
909 lnet_push_update_to_peers(int force)
911 struct lnet_peer_table *ptable;
912 struct lnet_peer *lp;
916 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
919 lnet_net_lock(LNET_LOCK_EX);
920 if (lnet_peer_discovery_disabled)
922 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
923 for (cpt = 0; cpt < lncpt; cpt++) {
924 ptable = the_lnet.ln_peer_tables[cpt];
925 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
927 spin_lock(&lp->lp_lock);
928 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
929 lp->lp_state |= LNET_PEER_FORCE_PUSH;
930 spin_unlock(&lp->lp_lock);
932 if (lnet_peer_needs_push(lp))
933 lnet_peer_queue_for_discovery(lp);
936 lnet_net_unlock(LNET_LOCK_EX);
937 wake_up(&the_lnet.ln_dc_waitq);
940 /* find the NID in the preferred gateways for the remote peer
942 * false: list is not empty and NID is not preferred
943 * false: list is empty
944 * true: nid is found in the list
947 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
948 struct lnet_nid *gw_nid)
950 struct lnet_nid_list *ne;
952 CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
953 libcfs_nidstr(&lpni->lpni_nid),
954 list_empty(&lpni->lpni_rtr_pref_nids));
956 if (list_empty(&lpni->lpni_rtr_pref_nids))
959 /* iterate through all the preferred NIDs and see if any of them
960 * matches the provided gw_nid
962 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
963 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
964 libcfs_nidstr(&ne->nl_nid),
965 libcfs_nidstr(gw_nid));
966 if (nid_same(&ne->nl_nid, gw_nid))
974 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
976 struct list_head zombies;
977 struct lnet_nid_list *ne;
978 struct lnet_nid_list *tmp;
979 int cpt = lpni->lpni_cpt;
981 INIT_LIST_HEAD(&zombies);
984 list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
985 lnet_net_unlock(cpt);
987 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
988 list_del(&ne->nl_list);
989 LIBCFS_FREE(ne, sizeof(*ne));
994 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
995 struct lnet_nid *gw_nid)
997 int cpt = lpni->lpni_cpt;
998 struct lnet_nid_list *ne = NULL;
1000 /* This function is called with api_mutex held. When the api_mutex
1001 * is held the list can not be modified, as it is only modified as
1002 * a result of applying a UDSP and that happens under api_mutex
1005 __must_hold(&the_lnet.ln_api_mutex);
1007 list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1008 if (nid_same(&ne->nl_nid, gw_nid))
1012 LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1016 ne->nl_nid = *gw_nid;
1018 /* Lock the cpt to protect against addition and checks in the
1019 * selection algorithm
1022 list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1023 lnet_net_unlock(cpt);
1029 * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1030 * this is a preferred point-to-point path. Call with lnet_net_lock in
1034 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1036 struct lnet_nid_list *ne;
1038 if (lpni->lpni_pref_nnids == 0)
1040 if (lpni->lpni_pref_nnids == 1)
1041 return nid_same(&lpni->lpni_pref.nid, nid);
1042 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1043 if (nid_same(&ne->nl_nid, nid))
1050 * Set a single ni as preferred, provided no preferred ni is already
1051 * defined. Only to be used for non-multi-rail peer_ni.
1054 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1055 struct lnet_nid *nid)
1061 spin_lock(&lpni->lpni_lock);
1062 if (LNET_NID_IS_ANY(nid)) {
1064 } else if (lpni->lpni_pref_nnids > 0) {
1066 } else if (lpni->lpni_pref_nnids == 0) {
1067 lpni->lpni_pref.nid = *nid;
1068 lpni->lpni_pref_nnids = 1;
1069 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1071 spin_unlock(&lpni->lpni_lock);
1073 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1074 libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1079 * Clear the preferred NID from a non-multi-rail peer_ni, provided
1080 * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1083 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1087 spin_lock(&lpni->lpni_lock);
1088 if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1089 lpni->lpni_pref_nnids = 0;
1090 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1091 } else if (lpni->lpni_pref_nnids == 0) {
1096 spin_unlock(&lpni->lpni_lock);
1098 CDEBUG(D_NET, "peer %s: %d\n",
1099 libcfs_nidstr(&lpni->lpni_nid), rc);
1104 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1106 lpni->lpni_sel_priority = priority;
1110 * Clear the preferred NIDs from a non-multi-rail peer.
1113 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1115 struct lnet_peer_ni *lpni = NULL;
1117 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1118 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1122 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1124 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1125 struct lnet_nid_list *ne1 = NULL;
1126 struct lnet_nid_list *ne2 = NULL;
1127 struct lnet_nid *tmp_nid = NULL;
1130 if (LNET_NID_IS_ANY(nid)) {
1135 if (lpni->lpni_pref_nnids == 1 &&
1136 nid_same(&lpni->lpni_pref.nid, nid)) {
1141 /* A non-MR node may have only one preferred NI per peer_ni */
1142 if (lpni->lpni_pref_nnids > 0 &&
1143 !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1148 /* add the new preferred nid to the list of preferred nids */
1149 if (lpni->lpni_pref_nnids != 0) {
1150 size_t alloc_size = sizeof(*ne1);
1152 if (lpni->lpni_pref_nnids == 1) {
1153 tmp_nid = &lpni->lpni_pref.nid;
1154 INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1157 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1158 if (nid_same(&ne1->nl_nid, nid)) {
1164 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1171 /* move the originally stored nid to the list */
1172 if (lpni->lpni_pref_nnids == 1) {
1173 LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1174 lpni->lpni_cpt, alloc_size);
1179 INIT_LIST_HEAD(&ne2->nl_list);
1180 ne2->nl_nid = *tmp_nid;
1185 lnet_net_lock(LNET_LOCK_EX);
1186 spin_lock(&lpni->lpni_lock);
1187 if (lpni->lpni_pref_nnids == 0) {
1188 lpni->lpni_pref.nid = *nid;
1191 list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1192 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1194 lpni->lpni_pref_nnids++;
1195 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1196 spin_unlock(&lpni->lpni_lock);
1197 lnet_net_unlock(LNET_LOCK_EX);
1200 if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1201 spin_lock(&lpni->lpni_lock);
1202 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1203 spin_unlock(&lpni->lpni_lock);
1205 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1206 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1211 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1213 struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1214 struct lnet_nid_list *ne = NULL;
1217 if (lpni->lpni_pref_nnids == 0) {
1222 if (lpni->lpni_pref_nnids == 1) {
1223 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1228 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1229 if (nid_same(&ne->nl_nid, nid))
1230 goto remove_nid_entry;
1238 lnet_net_lock(LNET_LOCK_EX);
1239 spin_lock(&lpni->lpni_lock);
1240 if (lpni->lpni_pref_nnids == 1)
1241 lpni->lpni_pref.nid = LNET_ANY_NID;
1243 list_del_init(&ne->nl_list);
1244 if (lpni->lpni_pref_nnids == 2) {
1245 struct lnet_nid_list *ne, *tmp;
1247 list_for_each_entry_safe(ne, tmp,
1248 &lpni->lpni_pref.nids,
1250 lpni->lpni_pref.nid = ne->nl_nid;
1251 list_del_init(&ne->nl_list);
1252 LIBCFS_FREE(ne, sizeof(*ne));
1256 lpni->lpni_pref_nnids--;
1257 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1258 spin_unlock(&lpni->lpni_lock);
1259 lnet_net_unlock(LNET_LOCK_EX);
1262 LIBCFS_FREE(ne, sizeof(*ne));
1264 CDEBUG(D_NET, "peer %s nid %s: %d\n",
1265 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1270 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1272 struct list_head zombies;
1273 struct lnet_nid_list *ne;
1274 struct lnet_nid_list *tmp;
1276 INIT_LIST_HEAD(&zombies);
1278 lnet_net_lock(LNET_LOCK_EX);
1279 if (lpni->lpni_pref_nnids == 1)
1280 lpni->lpni_pref.nid = LNET_ANY_NID;
1281 else if (lpni->lpni_pref_nnids > 1)
1282 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1283 lpni->lpni_pref_nnids = 0;
1284 lnet_net_unlock(LNET_LOCK_EX);
1286 list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1287 list_del_init(&ne->nl_list);
1288 LIBCFS_FREE(ne, sizeof(*ne));
1293 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1295 struct lnet_peer_ni *lpni;
1298 lpni = lnet_peer_ni_find_locked(nid);
1300 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1301 lnet_peer_ni_decref_locked(lpni);
1306 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1307 __must_hold(&lp->lp_lock)
1309 if (lnet_peer_discovery_disabled)
1312 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1313 (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1324 lnet_is_discovery_disabled(struct lnet_peer *lp)
1328 spin_lock(&lp->lp_lock);
1329 rc = lnet_is_discovery_disabled_locked(lp);
1330 spin_unlock(&lp->lp_lock);
1336 lnet_discover_peer_nid(struct lnet_nid *nid)
1338 int cpt = lnet_net_lock_current();
1339 struct lnet_peer_ni *lpni = lnet_peer_ni_find_locked(nid);
1342 lnet_discover_peer_locked(lpni, cpt, false);
1343 lnet_peer_ni_decref_locked(lpni);
1345 lnet_net_unlock(cpt);
1349 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1351 struct lnet_nid pnid = LNET_ANY_NID;
1354 int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1356 if (!nids || num_nids < 1)
1359 rc = LNetNIInit(LNET_PID_ANY);
1363 mutex_lock(&the_lnet.ln_api_mutex);
1365 mr = lnet_peer_discovery_disabled == 0;
1368 CDEBUG(D_NET, "num_nids %d\n", num_nids);
1370 for (i = 0; i < num_nids; i++) {
1371 if (nid_is_lo0(&nids[i]))
1374 if (LNET_NID_IS_ANY(&pnid)) {
1376 rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1377 if (rc == -EALREADY) {
1378 struct lnet_peer *lp;
1380 CDEBUG(D_NET, "A peer exists for NID %s\n",
1381 libcfs_nidstr(&pnid));
1383 /* Adds a refcount */
1384 lp = lnet_find_peer(&pnid);
1386 pnid = lp->lp_primary_nid;
1387 /* Drop refcount from lookup */
1388 lnet_peer_decref_locked(lp);
1389 } else if (mr && !rc) {
1390 lnet_discover_peer_nid(&pnid);
1392 } else if (lnet_peer_discovery_disabled) {
1393 rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1395 } else if (!nid_same(&pnid, &nids[i])) {
1396 rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID,
1399 if (lock_prim_nid) {
1400 struct lnet_peer *lp;
1401 lp = lnet_find_peer(&nids[i]);
1403 lp->lp_merge_primary_nid = pnid;
1404 lnet_peer_decref_locked(lp);
1407 lnet_discover_peer_nid(&nids[i]);
1411 if (rc && rc != -EEXIST)
1416 mutex_unlock(&the_lnet.ln_api_mutex);
1420 return rc == -EEXIST ? 0 : rc;
1422 EXPORT_SYMBOL(LNetAddPeer);
1424 void LNetPrimaryNID(struct lnet_nid *nid)
1426 struct lnet_peer *lp;
1427 struct lnet_peer_ni *lpni;
1428 struct lnet_nid orig;
1432 if (!nid || nid_is_lo0(nid))
1436 cpt = lnet_net_lock_current();
1437 lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1442 lp = lpni->lpni_peer_net->lpn_peer;
1444 /* If discovery is disabled locally then we needn't bother running
1445 * discovery here because discovery will not modify whatever
1446 * primary NID is currently set for this peer. If the specified peer is
1447 * down then this discovery can introduce long delays into the mount
1448 * process, so skip it if it isn't necessary.
1451 spin_lock(&lp->lp_lock);
1452 /* DD disabled, nothing to do */
1453 if (lnet_peer_discovery_disabled) {
1454 *nid = lp->lp_primary_nid;
1455 spin_unlock(&lp->lp_lock);
1459 /* Peer already up to date, nothing to do */
1460 if (lnet_peer_is_uptodate_locked(lp)) {
1461 *nid = lp->lp_primary_nid;
1462 spin_unlock(&lp->lp_lock);
1465 spin_unlock(&lp->lp_lock);
1467 /* If primary nid locking is enabled, discovery is performed
1468 * in the background.
1469 * If primary nid locking is disabled, discovery blocks here.
1470 * Messages to the peer will not go through until the discovery is
1473 if (lock_prim_nid && lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1474 rc = lnet_discover_peer_locked(lpni, cpt, false);
1476 rc = lnet_discover_peer_locked(lpni, cpt, true);
1480 /* The lpni (or lp) for this NID may have changed and our ref is
1481 * the only thing keeping the old one around. Release the ref
1482 * and lookup the lpni again
1484 lnet_peer_ni_decref_locked(lpni);
1485 lpni = lnet_peer_ni_find_locked(nid);
1490 lp = lpni->lpni_peer_net->lpn_peer;
1492 if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1494 *nid = lp->lp_primary_nid;
1496 lnet_peer_ni_decref_locked(lpni);
1498 lnet_net_unlock(cpt);
1500 CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1501 libcfs_nidstr(nid), rc);
1503 EXPORT_SYMBOL(LNetPrimaryNID);
1506 LNetPeerDiscovered(struct lnet_nid *nid)
1508 int cpt, disc = false;
1509 struct lnet_peer *lp;
1511 lp = lnet_find_peer(nid);
1515 cpt = lnet_net_lock_current();
1516 spin_lock(&lp->lp_lock);
1517 if (((lp->lp_state & LNET_PEER_DISCOVERED) &&
1518 (lp->lp_state & LNET_PEER_NIDS_UPTODATE)) ||
1519 (lp->lp_state & LNET_PEER_NO_DISCOVERY))
1521 spin_unlock(&lp->lp_lock);
1523 /* Drop refcount from lookup */
1524 lnet_peer_decref_locked(lp);
1525 lnet_net_unlock(cpt);
1527 CDEBUG(D_NET, "Peer NID %s discovered: %d\n", libcfs_nidstr(nid),
1531 EXPORT_SYMBOL(LNetPeerDiscovered);
1533 struct lnet_peer_net *
1534 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1536 struct lnet_peer_net *peer_net;
1537 list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1538 if (peer_net->lpn_net_id == net_id)
1545 * Attach a peer_ni to a peer_net and peer. This function assumes
1546 * peer_ni is not already attached to the peer_net/peer. The peer_ni
1547 * may be attached to a different peer, in which case it will be
1548 * properly detached first. The whole operation is done atomically.
1550 * This function consumes the reference on lpni and Always returns 0.
1551 * This is the last function called from functions that do return an
1552 * int, so returning 0 here allows the compiler to do a tail call.
1555 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1556 struct lnet_peer_net *lpn,
1557 struct lnet_peer_ni *lpni,
1560 struct lnet_peer_table *ptable;
1561 bool new_lpn = false;
1564 /* Install the new peer_ni */
1565 lnet_net_lock(LNET_LOCK_EX);
1566 /* Add peer_ni to global peer table hash, if necessary. */
1567 if (list_empty(&lpni->lpni_hashlist)) {
1568 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1570 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1571 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1572 ptable->pt_version++;
1573 lnet_peer_ni_addref_locked(lpni);
1576 /* Detach the peer_ni from an existing peer, if necessary. */
1577 if (lpni->lpni_peer_net) {
1578 LASSERT(lpni->lpni_peer_net != lpn);
1579 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1580 lnet_peer_detach_peer_ni_locked(lpni);
1581 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1582 lpni->lpni_peer_net = NULL;
1585 /* Add peer_ni to peer_net */
1586 lpni->lpni_peer_net = lpn;
1587 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1588 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1590 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1591 lnet_update_peer_net_healthv(lpni);
1592 lnet_peer_net_addref_locked(lpn);
1594 /* Add peer_net to peer */
1595 if (!lpn->lpn_peer) {
1598 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1599 list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1601 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1602 lnet_peer_addref_locked(lp);
1605 /* Add peer to global peer list, if necessary */
1606 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1607 if (list_empty(&lp->lp_peer_list)) {
1608 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1612 /* Update peer state */
1613 spin_lock(&lp->lp_lock);
1614 if (flags & LNET_PEER_CONFIGURED) {
1615 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1616 lp->lp_state |= LNET_PEER_CONFIGURED;
1618 if (flags & LNET_PEER_MULTI_RAIL) {
1619 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1620 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1621 lnet_peer_clr_non_mr_pref_nids(lp);
1624 if (flags & LNET_PEER_LOCK_PRIMARY) {
1625 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1626 lp->lp_prim_lock_ts = ktime_get_ns();
1628 spin_unlock(&lp->lp_lock);
1634 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1636 CERROR("Failed to apply UDSPs on lpn %s\n",
1637 libcfs_net2str(lpn->lpn_net_id));
1639 rc = lnet_udsp_apply_policies_on_lpni(lpni);
1641 CERROR("Failed to apply UDSPs on lpni %s\n",
1642 libcfs_nidstr(&lpni->lpni_nid));
1644 CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1645 libcfs_nidstr(&lp->lp_primary_nid),
1646 libcfs_nidstr(&lpni->lpni_nid), flags);
1647 lnet_peer_ni_decref_locked(lpni);
1648 lnet_net_unlock(LNET_LOCK_EX);
1654 * Create a new peer, with nid as its primary nid.
1656 * Call with the lnet_api_mutex held.
1659 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1661 struct lnet_peer *lp;
1662 struct lnet_peer_net *lpn;
1663 struct lnet_peer_ni *lpni;
1669 * No need for the lnet_net_lock here, because the
1670 * lnet_api_mutex is held.
1672 lpni = lnet_peer_ni_find_locked(nid);
1674 /* A peer with this NID already exists. */
1675 lp = lpni->lpni_peer_net->lpn_peer;
1676 lnet_peer_ni_decref_locked(lpni);
1678 * This is an error if the peer was configured and the
1679 * primary NID differs or an attempt is made to change
1680 * the Multi-Rail flag. Otherwise the assumption is
1681 * that an existing peer is being modified.
1683 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1684 if (!nid_same(&lp->lp_primary_nid, nid))
1686 else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1689 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1690 if (nid_same(&lp->lp_primary_nid, nid))
1692 /* we're trying to recreate an existing peer which
1693 * has already been created and its primary
1694 * locked. This is likely due to two servers
1695 * existing on the same node. So we'll just refer
1696 * to that node with the primary NID which was
1697 * first added by Lustre
1702 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1703 /* if not recreating peer as configured and
1704 * not locking primary nid, no need to
1705 * do anything if primary nid is not being changed
1707 if (nid_same(&lp->lp_primary_nid, nid)) {
1712 /* Delete and recreate the peer.
1714 * 1. If the peer is being recreated as a configured NID
1715 * 2. if there already exists a peer which
1716 * was discovered manually, but is recreated via Lustre
1719 rc = lnet_peer_del(lp);
1724 /* Create peer, peer_net, and peer_ni. */
1726 lp = lnet_peer_alloc(nid);
1729 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1732 lpni = lnet_peer_ni_alloc(nid);
1736 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1739 LIBCFS_FREE(lpn, sizeof(*lpn));
1741 LIBCFS_FREE(lp, sizeof(*lp));
1743 CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1744 libcfs_nidstr(nid), flags, rc);
1749 * Add a NID to a peer. Call with ln_api_mutex held.
1752 * -EPERM: Non-DLC addition to a DLC-configured peer.
1753 * -EEXIST: The NID was configured by DLC for a different peer.
1754 * -ENOMEM: Out of memory.
1755 * -ENOTUNIQ: Adding a second peer NID on a single network on a
1756 * non-multi-rail peer.
1759 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1762 struct lnet_peer_net *lpn;
1763 struct lnet_peer_ni *lpni;
1769 /* A configured peer can only be updated through configuration. */
1770 if (!(flags & LNET_PEER_CONFIGURED)) {
1771 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1778 * The MULTI_RAIL flag can be set but not cleared, because
1779 * that would leave the peer struct in an invalid state.
1781 if (flags & LNET_PEER_MULTI_RAIL) {
1782 spin_lock(&lp->lp_lock);
1783 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1784 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1785 lnet_peer_clr_non_mr_pref_nids(lp);
1787 spin_unlock(&lp->lp_lock);
1788 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1793 lpni = lnet_peer_ni_find_locked(nid);
1796 * A peer_ni already exists. This is only a problem if
1797 * it is not connected to this peer and was configured
1800 if (lpni->lpni_peer_net->lpn_peer == lp)
1802 if (lnet_peer_ni_is_configured(lpni)) {
1806 /* If this is the primary NID, destroy the peer. */
1807 if (lnet_peer_ni_is_primary(lpni)) {
1808 struct lnet_peer *lp2 =
1809 lpni->lpni_peer_net->lpn_peer;
1810 int rtr_refcount = lp2->lp_rtr_refcount;
1811 unsigned int peer2_state;
1812 __u64 peer2_prim_lock_ts;
1814 /* If there's another peer that this NID belongs to
1815 * and the primary NID for that peer is locked,
1816 * then, unless it is the only NID, we don't want
1818 * But the configuration is wrong at this point,
1819 * so we should flag both of these peers as in a bad
1822 spin_lock(&lp2->lp_lock);
1823 if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1825 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1826 spin_unlock(&lp2->lp_lock);
1827 spin_lock(&lp->lp_lock);
1828 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1829 spin_unlock(&lp->lp_lock);
1830 CERROR("Peer %s NID %s is already locked with peer %s\n",
1831 libcfs_nidstr(&lp->lp_primary_nid),
1833 libcfs_nidstr(&lp2->lp_primary_nid));
1836 peer2_state = lp2->lp_state;
1837 peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1838 spin_unlock(&lp2->lp_lock);
1840 /* NID which got locked the earliest should be
1841 * kept as primary. In case if the peers were
1842 * created by Lustre, this allows the
1843 * first listed NID to stay primary as intended
1844 * for the purpose of communicating with Lustre
1845 * even if peer discovery succeeded using
1846 * a different NID of MR peer.
1848 spin_lock(&lp->lp_lock);
1849 if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1850 ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1851 peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1852 !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1853 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1854 lp->lp_primary_nid = *nid;
1855 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1857 spin_unlock(&lp->lp_lock);
1859 * if we're trying to delete a router it means
1860 * we're moving this peer NI to a new peer so must
1861 * transfer router properties to the new peer
1863 if (rtr_refcount > 0) {
1864 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1865 lnet_rtr_transfer_to_peer(lp2, lp);
1868 lnet_peer_ni_decref_locked(lpni);
1869 lpni = lnet_peer_ni_alloc(nid);
1876 lpni = lnet_peer_ni_alloc(nid);
1884 * Get the peer_net. Check that we're not adding a second
1885 * peer_ni on a peer_net of a non-multi-rail peer.
1887 lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1889 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1894 } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1899 return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1902 lnet_peer_ni_decref_locked(lpni);
1904 CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1905 libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1911 * Update the primary NID of a peer, if possible.
1913 * Call with the lnet_api_mutex held.
1916 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1919 struct lnet_nid old = lp->lp_primary_nid;
1922 if (nid_same(&lp->lp_primary_nid, nid))
1925 if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1926 lp->lp_primary_nid = *nid;
1928 rc = lnet_peer_add_nid(lp, nid, flags);
1930 lp->lp_primary_nid = old;
1934 /* if this is a configured peer or the primary for that peer has
1935 * been locked, then we don't want to flag this scenario as
1938 if (lp->lp_state & LNET_PEER_CONFIGURED ||
1939 lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1942 CDEBUG(D_NET, "peer %s NID %s: %d\n",
1943 libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1949 * lpni creation initiated due to traffic either sending or receiving.
1950 * Callers must hold ln_api_mutex
1951 * Ref taken on lnet_peer_ni returned by this function
1953 static struct lnet_peer_ni *
1954 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1955 __must_hold(&the_lnet.ln_api_mutex)
1957 struct lnet_peer *lp = NULL;
1958 struct lnet_peer_net *lpn = NULL;
1959 struct lnet_peer_ni *lpni;
1963 if (LNET_NID_IS_ANY(nid)) {
1968 /* lnet_net_lock is not needed here because ln_api_lock is held */
1969 lpni = lnet_peer_ni_find_locked(nid);
1972 * We must have raced with another thread. Since we
1973 * know next to nothing about a peer_ni created by
1974 * traffic, we just assume everything is ok and
1980 /* Create peer, peer_net, and peer_ni. */
1982 lp = lnet_peer_alloc(nid);
1985 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1988 lpni = lnet_peer_ni_alloc(nid);
1991 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1993 /* lnet_peer_attach_peer_ni() always returns 0 */
1994 rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1996 lnet_peer_ni_addref_locked(lpni);
2001 LIBCFS_FREE(lpn, sizeof(*lpn));
2003 LIBCFS_FREE(lp, sizeof(*lp));
2007 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
2012 * Implementation of IOC_LIBCFS_ADD_PEER_NI.
2014 * This API handles the following combinations:
2015 * Create a peer with its primary NI if only the prim_nid is provided
2016 * Add a NID to a peer identified by the prim_nid. The peer identified
2017 * by the prim_nid must already exist.
2018 * The peer being created may be non-MR.
2020 * The caller must hold ln_api_mutex. This prevents the peer from
2021 * being created/modified/deleted by a different thread.
2024 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
2026 __must_hold(&the_lnet.ln_api_mutex)
2028 struct lnet_peer *lp = NULL;
2029 struct lnet_peer_ni *lpni;
2031 /* The prim_nid must always be specified */
2032 if (LNET_NID_IS_ANY(prim_nid))
2036 flags |= LNET_PEER_MULTI_RAIL;
2039 * If nid isn't specified, we must create a new peer with
2040 * prim_nid as its primary nid.
2042 if (LNET_NID_IS_ANY(nid))
2043 return lnet_peer_add(prim_nid, flags);
2045 /* Look up the prim_nid, which must exist. */
2046 lpni = lnet_peer_ni_find_locked(prim_nid);
2049 lp = lpni->lpni_peer_net->lpn_peer;
2050 lnet_peer_ni_decref_locked(lpni);
2052 /* Peer must have been configured. */
2053 if ((flags & LNET_PEER_CONFIGURED) &&
2054 !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2055 CDEBUG(D_NET, "peer %s was not configured\n",
2056 libcfs_nidstr(prim_nid));
2060 /* Primary NID must match */
2061 if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2062 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2063 libcfs_nidstr(prim_nid),
2064 libcfs_nidstr(&lp->lp_primary_nid));
2068 /* Multi-Rail flag must match. */
2069 if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2070 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2071 libcfs_nidstr(prim_nid));
2075 if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2077 "Don't add temporary peer NI for uptodate peer %s\n",
2078 libcfs_nidstr(&lp->lp_primary_nid));
2082 return lnet_peer_add_nid(lp, nid, flags);
2085 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2086 bool mr, bool lock_prim)
2088 int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2090 return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2094 lnet_reset_peer(struct lnet_peer *lp)
2096 struct lnet_peer_net *lpn, *lpntmp;
2097 struct lnet_peer_ni *lpni, *lpnitmp;
2101 lnet_peer_cancel_discovery(lp);
2103 flags = LNET_PEER_CONFIGURED;
2104 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2105 flags |= LNET_PEER_MULTI_RAIL;
2107 list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2108 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2110 if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2113 rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2115 CERROR("Failed to delete %s from peer %s\n",
2116 libcfs_nidstr(&lpni->lpni_nid),
2117 libcfs_nidstr(&lp->lp_primary_nid));
2122 /* mark it for discovery the next time we use it */
2123 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2128 * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2130 * This API handles the following combinations:
2131 * Delete a NI from a peer if both prim_nid and nid are provided.
2132 * Delete a peer if only prim_nid is provided.
2133 * Delete a peer if its primary nid is provided.
2135 * The caller must hold ln_api_mutex. This prevents the peer from
2136 * being modified/deleted by a different thread.
2139 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2142 struct lnet_peer *lp;
2143 struct lnet_peer_ni *lpni;
2146 if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2149 lpni = lnet_peer_ni_find_locked(prim_nid);
2152 lp = lpni->lpni_peer_net->lpn_peer;
2153 lnet_peer_ni_decref_locked(lpni);
2155 if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2156 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2157 libcfs_nidstr(prim_nid),
2158 libcfs_nidstr(&lp->lp_primary_nid));
2162 lnet_net_lock(LNET_LOCK_EX);
2163 if (lp->lp_rtr_refcount > 0) {
2164 lnet_net_unlock(LNET_LOCK_EX);
2165 CERROR("%s is a router. Can not be deleted\n",
2166 libcfs_nidstr(prim_nid));
2169 lnet_net_unlock(LNET_LOCK_EX);
2171 if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2172 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2173 CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2174 libcfs_nidstr(&lp->lp_primary_nid));
2175 return lnet_reset_peer(lp);
2177 return lnet_peer_del(lp);
2181 flags = LNET_PEER_CONFIGURED;
2182 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2183 flags |= LNET_PEER_MULTI_RAIL;
2185 return lnet_peer_del_nid(lp, nid, flags);
2189 lnet_destroy_peer_ni_locked(struct kref *ref)
2191 struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2193 struct lnet_peer_table *ptable;
2194 struct lnet_peer_net *lpn;
2196 CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2198 LASSERT(kref_read(&lpni->lpni_kref) == 0);
2199 LASSERT(list_empty(&lpni->lpni_txq));
2200 LASSERT(lpni->lpni_txqnob == 0);
2201 LASSERT(list_empty(&lpni->lpni_peer_nis));
2202 LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2204 lpn = lpni->lpni_peer_net;
2205 lpni->lpni_peer_net = NULL;
2206 lpni->lpni_net = NULL;
2208 if (!list_empty(&lpni->lpni_hashlist)) {
2209 /* remove the peer ni from the zombie list */
2210 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2211 spin_lock(&ptable->pt_zombie_lock);
2212 list_del_init(&lpni->lpni_hashlist);
2213 ptable->pt_zombies--;
2214 spin_unlock(&ptable->pt_zombie_lock);
2217 if (lpni->lpni_pref_nnids > 1) {
2218 struct lnet_nid_list *ne, *tmp;
2220 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2222 list_del_init(&ne->nl_list);
2223 LIBCFS_FREE(ne, sizeof(*ne));
2226 LIBCFS_FREE(lpni, sizeof(*lpni));
2229 lnet_peer_net_decref_locked(lpn);
2232 struct lnet_peer_ni *
2233 lnet_nid2peerni_ex(struct lnet_nid *nid)
2234 __must_hold(&the_lnet.ln_api_mutex)
2236 struct lnet_peer_ni *lpni = NULL;
2238 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2239 return ERR_PTR(-ESHUTDOWN);
2242 * find if a peer_ni already exists.
2243 * If so then just return that.
2245 lpni = lnet_peer_ni_find_locked(nid);
2249 lnet_net_unlock(LNET_LOCK_EX);
2251 lpni = lnet_peer_ni_traffic_add(nid, NULL);
2253 lnet_net_lock(LNET_LOCK_EX);
2259 * Get a peer_ni for the given nid, create it if necessary. Takes a
2260 * hold on the peer_ni.
2262 struct lnet_peer_ni *
2263 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2264 struct lnet_nid *pref, int cpt)
2266 struct lnet_peer_ni *lpni = NULL;
2268 if (the_lnet.ln_state != LNET_STATE_RUNNING)
2269 return ERR_PTR(-ESHUTDOWN);
2272 * find if a peer_ni already exists.
2273 * If so then just return that.
2275 lpni = lnet_peer_ni_find_locked(nid);
2281 * use the lnet_api_mutex to serialize the creation of the peer_ni
2282 * and the creation/deletion of the local ni/net. When a local ni is
2283 * created, if there exists a set of peer_nis on that network,
2284 * they need to be traversed and updated. When a local NI is
2285 * deleted, which could result in a network being deleted, then
2286 * all peer nis on that network need to be removed as well.
2288 * Creation through traffic should also be serialized with
2289 * creation through DLC.
2291 lnet_net_unlock(cpt);
2292 mutex_lock(&the_lnet.ln_api_mutex);
2294 * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2295 * check here is sufficent.
2297 if (the_lnet.ln_state == LNET_STATE_RUNNING)
2298 lpni = lnet_peer_ni_traffic_add(nid, pref);
2300 mutex_unlock(&the_lnet.ln_api_mutex);
2303 /* Lock has been dropped, check again for shutdown. */
2304 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2305 if (!IS_ERR_OR_NULL(lpni))
2306 lnet_peer_ni_decref_locked(lpni);
2307 lpni = ERR_PTR(-ESHUTDOWN);
2314 lnet_peer_gw_discovery(struct lnet_peer *lp)
2318 spin_lock(&lp->lp_lock);
2319 if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2321 spin_unlock(&lp->lp_lock);
2327 lnet_peer_is_uptodate(struct lnet_peer *lp)
2331 spin_lock(&lp->lp_lock);
2332 rc = lnet_peer_is_uptodate_locked(lp);
2333 spin_unlock(&lp->lp_lock);
2338 * Is a peer uptodate from the point of view of discovery?
2340 * If it is currently being processed, obviously not.
2341 * A forced Ping or Push is also handled by the discovery thread.
2343 * Otherwise look at whether the peer needs rediscovering.
2346 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2347 __must_hold(&lp->lp_lock)
2351 if (lp->lp_state & (LNET_PEER_DISCOVERING |
2352 LNET_PEER_FORCE_PING |
2353 LNET_PEER_FORCE_PUSH)) {
2355 } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2357 } else if (lnet_peer_needs_push(lp)) {
2359 } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2360 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2371 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2373 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2375 /* The discovery thread holds net_lock/EX and lp_lock when it splices
2376 * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2377 * when adding to the list and queuing the peer to ensure that we do not
2378 * strand any messages on the lp_dc_pendq. This scheme ensures the
2379 * message will be resent even if the peer is already being discovered.
2380 * Therefore we needn't check the return value of
2381 * lnet_peer_queue_for_discovery(lp).
2383 lnet_net_lock(LNET_LOCK_EX);
2384 spin_lock(&lp->lp_lock);
2385 list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2386 spin_unlock(&lp->lp_lock);
2387 lnet_peer_queue_for_discovery(lp);
2388 lnet_net_unlock(LNET_LOCK_EX);
2392 * Queue a peer for the attention of the discovery thread. Call with
2393 * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2394 * -EALREADY if the peer was already queued.
2396 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2400 spin_lock(&lp->lp_lock);
2401 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2402 lp->lp_state |= LNET_PEER_DISCOVERING;
2403 spin_unlock(&lp->lp_lock);
2404 if (list_empty(&lp->lp_dc_list)) {
2405 lnet_peer_addref_locked(lp);
2406 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2407 wake_up(&the_lnet.ln_dc_waitq);
2413 CDEBUG(D_NET, "Queue peer %s: %d\n",
2414 libcfs_nidstr(&lp->lp_primary_nid), rc);
2420 * Discovery of a peer is complete. Wake all waiters on the peer.
2421 * Call with lnet_net_lock/EX held.
2423 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2425 struct lnet_msg *msg, *tmp;
2427 LIST_HEAD(pending_msgs);
2429 CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2430 libcfs_nidstr(&lp->lp_primary_nid));
2432 spin_lock(&lp->lp_lock);
2433 /* Our caller dropped lp_lock which may have allowed another thread to
2434 * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2435 * Ensure it is cleared.
2437 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2439 lp->lp_dc_error = dc_error;
2440 lp->lp_state |= LNET_PEER_REDISCOVER;
2442 list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2443 spin_unlock(&lp->lp_lock);
2444 list_del_init(&lp->lp_dc_list);
2445 wake_up(&lp->lp_dc_waitq);
2447 if (lp->lp_rtr_refcount > 0)
2448 lnet_router_discovery_complete(lp);
2450 lnet_net_unlock(LNET_LOCK_EX);
2452 /* iterate through all pending messages and send them again */
2453 list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2454 list_del_init(&msg->msg_list);
2456 lnet_finalize(msg, dc_error);
2460 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2461 lnet_msgtyp2str(msg->msg_type),
2462 libcfs_idstr(&msg->msg_target));
2463 rc = lnet_send(&msg->msg_src_nid_param, msg,
2464 &msg->msg_rtr_nid_param);
2466 CNETERR("Error sending %s to %s: %d\n",
2467 lnet_msgtyp2str(msg->msg_type),
2468 libcfs_idstr(&msg->msg_target), rc);
2469 lnet_finalize(msg, rc);
2472 lnet_net_lock(LNET_LOCK_EX);
2473 lnet_peer_decref_locked(lp);
2477 * Handle inbound push.
2478 * Like any event handler, called with lnet_res_lock/CPT held.
2480 void lnet_peer_push_event(struct lnet_event *ev)
2482 struct lnet_ping_buffer *pbuf;
2483 struct lnet_peer *lp;
2486 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2488 /* lnet_find_peer() adds a refcount */
2489 lp = lnet_find_peer(&ev->source.nid);
2491 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2492 libcfs_nidstr(&ev->initiator.nid),
2493 libcfs_nidstr(&ev->source.nid));
2494 pbuf->pb_needs_post = true;
2498 /* Ensure peer state remains consistent while we modify it. */
2499 spin_lock(&lp->lp_lock);
2502 * If some kind of error happened the contents of the message
2503 * cannot be used. Clear the NIDS_UPTODATE and set the
2504 * FORCE_PING flag to trigger a ping.
2507 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2508 lp->lp_state |= LNET_PEER_FORCE_PING;
2509 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2511 libcfs_nidstr(&lp->lp_primary_nid),
2512 libcfs_nidstr(&ev->source.nid));
2517 * A push with invalid or corrupted info. Clear the UPTODATE
2518 * flag to trigger a ping.
2520 if (lnet_ping_info_validate(&pbuf->pb_info)) {
2521 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2522 lp->lp_state |= LNET_PEER_FORCE_PING;
2523 CDEBUG(D_NET, "Corrupted Push from %s\n",
2524 libcfs_nidstr(&lp->lp_primary_nid));
2528 /* Make sure we'll allocate the correct size ping buffer when
2531 infobytes = lnet_ping_info_size(&pbuf->pb_info);
2532 if (lp->lp_data_bytes < infobytes)
2533 lp->lp_data_bytes = infobytes;
2536 * A non-Multi-Rail peer is not supposed to be capable of
2539 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2540 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2541 libcfs_nidstr(&lp->lp_primary_nid));
2546 * The peer may have discovery disabled at its end. Set
2547 * NO_DISCOVERY as appropriate.
2549 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2550 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2551 libcfs_nidstr(&lp->lp_primary_nid));
2553 * Mark the peer for deletion if we already know about it
2554 * and it's going from discovery set to no discovery set
2556 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2557 LNET_PEER_DISCOVERING)) &&
2558 lp->lp_state & LNET_PEER_DISCOVERED) {
2559 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2560 libcfs_nidstr(&lp->lp_primary_nid),
2562 lp->lp_state |= LNET_PEER_MARK_DELETION;
2564 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2565 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2566 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2567 libcfs_nidstr(&lp->lp_primary_nid));
2568 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2572 * Update the MULTI_RAIL flag based on the push. If the peer
2573 * was configured with DLC then the setting should match what
2575 * NB: We verified above that the MR feature bit is set in pi_features
2577 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2578 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2579 libcfs_nidstr(&lp->lp_primary_nid), lp);
2580 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2581 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2582 libcfs_nidstr(&lp->lp_primary_nid));
2583 } else if (lnet_peer_discovery_disabled) {
2584 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2585 libcfs_nidstr(&lp->lp_primary_nid), lp);
2586 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2587 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2588 libcfs_nidstr(&lp->lp_primary_nid), lp);
2590 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2591 libcfs_nidstr(&lp->lp_primary_nid), lp);
2592 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2593 lnet_peer_clr_non_mr_pref_nids(lp);
2596 /* Check for truncation of the Put message. Clear the
2597 * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2598 * and tell discovery to allocate a bigger buffer.
2600 if (ev->mlength < ev->rlength) {
2601 if (the_lnet.ln_push_target_nbytes < infobytes)
2602 the_lnet.ln_push_target_nbytes = infobytes;
2603 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2604 lp->lp_state |= LNET_PEER_FORCE_PING;
2605 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2606 libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2610 /* always assume new data */
2611 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2612 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2614 /* If there is data present that hasn't been processed yet,
2615 * we'll replace it if the Put contained newer data and it
2616 * fits. We're racing with a Ping or earlier Push in this
2619 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2620 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2621 LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2622 infobytes <= lp->lp_data->pb_nbytes) {
2623 unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2624 infobytes, FLEXIBLE_OBJECT);
2625 CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2626 libcfs_nidstr(&lp->lp_primary_nid),
2627 LNET_PING_BUFFER_SEQNO(pbuf),
2628 LNET_PING_BUFFER_SEQNO(lp->lp_data));
2634 * Allocate a buffer to copy the data. On a failure we drop
2635 * the Push and set FORCE_PING to force the discovery
2636 * thread to fix the problem by pinging the peer.
2638 lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2640 lp->lp_state |= LNET_PEER_FORCE_PING;
2641 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2642 libcfs_nidstr(&lp->lp_primary_nid),
2643 LNET_PING_BUFFER_SEQNO(pbuf));
2648 unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2650 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2651 CDEBUG(D_NET, "Received Push %s %u\n",
2652 libcfs_nidstr(&lp->lp_primary_nid),
2653 LNET_PING_BUFFER_SEQNO(pbuf));
2656 /* We've processed this buffer. It can be reposted */
2657 pbuf->pb_needs_post = true;
2660 * Queue the peer for discovery if not done, force it on the request
2661 * queue and wake the discovery thread if the peer was already queued,
2662 * because its status changed.
2664 spin_unlock(&lp->lp_lock);
2665 lnet_net_lock(LNET_LOCK_EX);
2666 if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2667 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2668 wake_up(&the_lnet.ln_dc_waitq);
2670 /* Drop refcount from lookup */
2671 lnet_peer_decref_locked(lp);
2672 lnet_net_unlock(LNET_LOCK_EX);
2676 * Clear the discovery error state, unless we're already discovering
2677 * this peer, in which case the error is current.
2679 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2681 spin_lock(&lp->lp_lock);
2682 if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2683 lp->lp_dc_error = 0;
2684 spin_unlock(&lp->lp_lock);
2688 * Peer discovery slow path. The ln_api_mutex is held on entry, and
2689 * dropped/retaken within this function. An lnet_peer_ni is passed in
2690 * because discovery could tear down an lnet_peer.
2693 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2696 struct lnet_peer *lp = NULL;
2702 lnet_peer_decref_locked(lp);
2703 lnet_net_unlock(cpt);
2704 lnet_net_lock(LNET_LOCK_EX);
2705 lp = lpni->lpni_peer_net->lpn_peer;
2706 lnet_peer_clear_discovery_error(lp);
2709 * We're willing to be interrupted. The lpni can become a
2710 * zombie if we race with DLC, so we must check for that.
2713 /* Keep lp alive when the lnet_net_lock is unlocked */
2714 lnet_peer_addref_locked(lp);
2715 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2716 if (signal_pending(current))
2718 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2721 * Don't repeat discovery if discovery is disabled. This is
2722 * done to ensure we can use discovery as a standard ping as
2723 * well for backwards compatibility with routers which do not
2724 * have discovery or have discovery disabled
2726 if (lnet_is_discovery_disabled(lp) && count > 0)
2728 if (lp->lp_dc_error)
2730 if (lnet_peer_is_uptodate(lp))
2732 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2734 lnet_peer_queue_for_discovery(lp);
2736 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2739 * If caller requested a non-blocking operation then
2740 * return immediately. Once discovery is complete any
2741 * pending messages that were stopped due to discovery
2742 * will be transmitted.
2747 lnet_net_unlock(LNET_LOCK_EX);
2749 finish_wait(&lp->lp_dc_waitq, &wait);
2750 lnet_net_lock(LNET_LOCK_EX);
2751 lnet_peer_decref_locked(lp);
2752 /* Peer may have changed */
2753 lp = lpni->lpni_peer_net->lpn_peer;
2755 finish_wait(&lp->lp_dc_waitq, &wait);
2757 lnet_net_unlock(LNET_LOCK_EX);
2760 * The peer may have changed, so re-check and rediscover if that turns
2761 * out to have been the case. The reference count on lp ensured that
2762 * even if it was unlinked from lpni the memory could not be recycled.
2763 * Thus the check below is sufficient to determine whether the peer
2764 * changed. If the peer changed, then lp must not be dereferenced.
2766 if (lp != lpni->lpni_peer_net->lpn_peer)
2769 if (signal_pending(current))
2771 else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2773 else if (lp->lp_dc_error)
2774 rc = lp->lp_dc_error;
2776 CDEBUG(D_NET, "non-blocking discovery\n");
2777 else if (!lnet_peer_is_uptodate(lp) &&
2778 !(lnet_is_discovery_disabled(lp) ||
2779 (lp->lp_state & LNET_PEER_MARK_DELETED)))
2782 CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2783 (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2784 libcfs_nidstr(&lpni->lpni_nid), rc,
2785 (!block) ? "pending discovery" : "discovery complete");
2786 lnet_peer_decref_locked(lp);
2791 /* Handle an incoming ack for a push. */
2793 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2795 struct lnet_ping_buffer *pbuf;
2797 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2798 spin_lock(&lp->lp_lock);
2799 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2800 lp->lp_push_error = ev->status;
2802 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2804 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2805 spin_unlock(&lp->lp_lock);
2807 CDEBUG(D_NET, "peer %s ev->status %d\n",
2808 libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2811 static bool find_primary(struct lnet_nid *nid,
2812 struct lnet_ping_buffer *pbuf)
2814 struct lnet_ping_info *pi = &pbuf->pb_info;
2815 struct lnet_ping_iter piter;
2818 if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2819 /* First large nid is primary */
2820 for (stp = ping_iter_first(&piter, pbuf, nid);
2822 stp = ping_iter_next(&piter, nid)) {
2823 if (nid_is_nid4(nid))
2825 /* nid has already been copied in */
2828 /* no large nids ... weird ... ignore the flag
2829 * and use first nid.
2832 /* pi_nids[1] is primary */
2833 if (pi->pi_nnis < 2)
2835 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2839 /* Handle a Reply message. This is the reply to a Ping message. */
2841 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2843 struct lnet_ping_buffer *pbuf;
2844 struct lnet_nid primary;
2847 bool ping_feat_disc;
2849 spin_lock(&lp->lp_lock);
2851 lp->lp_disc_src_nid = ev->target.nid;
2852 lp->lp_disc_dst_nid = ev->source.nid;
2855 * If some kind of error happened the contents of message
2856 * cannot be used. Set PING_FAILED to trigger a retry.
2859 lp->lp_state |= LNET_PEER_PING_FAILED;
2860 lp->lp_ping_error = ev->status;
2861 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2863 libcfs_nidstr(&lp->lp_primary_nid),
2864 libcfs_nidstr(&ev->source.nid));
2868 pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2869 if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2870 lnet_swap_pinginfo(pbuf);
2873 * A reply with invalid or corrupted info. Set PING_FAILED to
2876 rc = lnet_ping_info_validate(&pbuf->pb_info);
2878 lp->lp_state |= LNET_PEER_PING_FAILED;
2879 lp->lp_ping_error = 0;
2880 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2881 libcfs_nidstr(&lp->lp_primary_nid), rc);
2886 * The peer may have discovery disabled at its end. Set
2887 * NO_DISCOVERY as appropriate.
2889 ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2890 if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2891 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2892 libcfs_nidstr(&lp->lp_primary_nid),
2893 ping_feat_disc ? "enabled" : "disabled",
2894 lnet_peer_discovery_disabled ? "disabled" : "enabled");
2896 /* Detect whether this peer has toggled discovery from on to
2897 * off and whether we can delete and re-create the peer. Peers
2898 * that were manually configured cannot be deleted by discovery.
2899 * We need to delete this peer and re-create it if the peer was
2900 * not configured manually, is currently considered DD capable,
2902 * 1. We've already discovered the peer (the peer has toggled
2903 * the discovery feature from on to off), or
2904 * 2. The peer is considered MR, but it was not user configured
2905 * (this was a "temporary" peer created via the kernel APIs
2906 * that we're discovering for the first time)
2908 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2909 LNET_PEER_NO_DISCOVERY)) &&
2910 (lp->lp_state & (LNET_PEER_DISCOVERED |
2911 LNET_PEER_MULTI_RAIL))) {
2912 CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2913 libcfs_nidstr(&lp->lp_primary_nid),
2915 lp->lp_state |= LNET_PEER_MARK_DELETION;
2917 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2919 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2920 libcfs_nidstr(&lp->lp_primary_nid));
2921 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2925 * Update the MULTI_RAIL flag based on the reply. If the peer
2926 * was configured with DLC then the setting should match what
2929 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2930 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2931 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2932 libcfs_nidstr(&lp->lp_primary_nid), lp);
2933 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2934 CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2935 libcfs_nidstr(&lp->lp_primary_nid));
2936 } else if (lnet_peer_discovery_disabled) {
2938 "peer %s(%p) not MR: DD disabled locally\n",
2939 libcfs_nidstr(&lp->lp_primary_nid), lp);
2940 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2942 "peer %s(%p) not MR: DD disabled remotely\n",
2943 libcfs_nidstr(&lp->lp_primary_nid), lp);
2945 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2946 libcfs_nidstr(&lp->lp_primary_nid), lp);
2947 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2948 lnet_peer_clr_non_mr_pref_nids(lp);
2950 } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2951 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2952 CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2953 libcfs_nidstr(&lp->lp_primary_nid));
2955 CERROR("Multi-Rail state vanished from %s\n",
2956 libcfs_nidstr(&lp->lp_primary_nid));
2957 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2961 infobytes = lnet_ping_info_size(&pbuf->pb_info);
2963 * Make sure we'll allocate the correct size ping buffer when
2966 if (lp->lp_data_bytes < infobytes)
2967 lp->lp_data_bytes = infobytes;
2969 /* Check for truncation of the Reply. Clear PING_SENT and set
2970 * PING_FAILED to trigger a retry.
2972 if (pbuf->pb_nbytes < infobytes) {
2973 if (the_lnet.ln_push_target_nbytes < infobytes)
2974 the_lnet.ln_push_target_nbytes = infobytes;
2975 lp->lp_state |= LNET_PEER_PING_FAILED;
2976 lp->lp_ping_error = 0;
2977 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2978 libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2983 * Check the sequence numbers in the reply. These are only
2984 * available if the reply came from a Multi-Rail peer.
2986 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2987 find_primary(&primary, pbuf) &&
2988 nid_same(&lp->lp_primary_nid, &primary)) {
2989 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2990 CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2991 libcfs_nidstr(&lp->lp_primary_nid),
2992 LNET_PING_BUFFER_SEQNO(pbuf),
2995 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2998 /* We're happy with the state of the data in the buffer. */
2999 CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
3000 libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
3002 if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3003 lnet_ping_buffer_decref(lp->lp_data);
3005 lp->lp_state |= LNET_PEER_DATA_PRESENT;
3006 lnet_ping_buffer_addref(pbuf);
3009 lp->lp_state &= ~LNET_PEER_PING_SENT;
3010 spin_unlock(&lp->lp_lock);
3014 * Send event handling. Only matters for error cases, where we clean
3015 * up state on the peer and peer_ni that would otherwise be updated in
3016 * the REPLY event handler for a successful Ping, and the ACK event
3017 * handler for a successful Push.
3020 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
3027 spin_lock(&lp->lp_lock);
3028 if (ev->msg_type == LNET_MSG_GET) {
3029 lp->lp_state &= ~LNET_PEER_PING_SENT;
3030 lp->lp_state |= LNET_PEER_PING_FAILED;
3031 lp->lp_ping_error = ev->status;
3032 } else { /* ev->msg_type == LNET_MSG_PUT */
3033 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3034 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3035 lp->lp_push_error = ev->status;
3037 spin_unlock(&lp->lp_lock);
3038 rc = LNET_REDISCOVER_PEER;
3040 CDEBUG(D_NET, "%s Send to %s: %d\n",
3041 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3042 libcfs_nidstr(&ev->target.nid), rc);
3047 * Unlink event handling. This event is only seen if a call to
3048 * LNetMDUnlink() caused the event to be unlinked. If this call was
3049 * made after the event was set up in LNetGet() or LNetPut() then we
3050 * assume the Ping or Push timed out.
3053 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3055 spin_lock(&lp->lp_lock);
3056 /* We've passed through LNetGet() */
3057 if (lp->lp_state & LNET_PEER_PING_SENT) {
3058 lp->lp_state &= ~LNET_PEER_PING_SENT;
3059 lp->lp_state |= LNET_PEER_PING_FAILED;
3060 lp->lp_ping_error = -ETIMEDOUT;
3061 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3062 libcfs_nidstr(&lp->lp_primary_nid));
3064 /* We've passed through LNetPut() */
3065 if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3066 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3067 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3068 lp->lp_push_error = -ETIMEDOUT;
3069 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3070 libcfs_nidstr(&lp->lp_primary_nid));
3072 spin_unlock(&lp->lp_lock);
3076 * Event handler for the discovery EQ.
3078 * Called with lnet_res_lock(cpt) held. The cpt is the
3079 * lnet_cpt_of_cookie() of the md handle cookie.
3081 static void lnet_discovery_event_handler(struct lnet_event *event)
3083 struct lnet_peer *lp = event->md_user_ptr;
3084 struct lnet_ping_buffer *pbuf;
3087 /* discovery needs to take another look */
3088 rc = LNET_REDISCOVER_PEER;
3090 CDEBUG(D_NET, "Received event: %d\n", event->type);
3092 switch (event->type) {
3093 case LNET_EVENT_ACK:
3094 lnet_discovery_event_ack(lp, event);
3096 case LNET_EVENT_REPLY:
3097 lnet_discovery_event_reply(lp, event);
3099 case LNET_EVENT_SEND:
3100 /* Only send failure triggers a retry. */
3101 rc = lnet_discovery_event_send(lp, event);
3103 case LNET_EVENT_UNLINK:
3104 /* LNetMDUnlink() was called */
3105 lnet_discovery_event_unlink(lp, event);
3108 /* Invalid events. */
3111 lnet_net_lock(LNET_LOCK_EX);
3113 /* put peer back at end of request queue, if discovery not already
3115 if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3116 lnet_peer_queue_for_discovery(lp)) {
3117 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3118 wake_up(&the_lnet.ln_dc_waitq);
3120 if (event->unlinked) {
3121 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3122 lnet_ping_buffer_decref(pbuf);
3123 lnet_peer_decref_locked(lp);
3125 lnet_net_unlock(LNET_LOCK_EX);
3128 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3129 struct lnet_ping_buffer *pbuf,
3130 struct lnet_nid *nid)
3132 pi->pinfo = &pbuf->pb_info;
3133 pi->pos = &pbuf->pb_info.pi_ni;
3134 pi->end = (void *)pi->pinfo +
3135 min_t(int, pbuf->pb_nbytes,
3136 lnet_ping_info_size(pi->pinfo));
3137 /* lnet_ping_info_validiate ensures there will be one
3138 * lnet_ni_status at the start
3141 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3143 pi->pos += sizeof(struct lnet_ni_status);
3144 return &pbuf->pb_info.pi_ni[0].ns_status;
3147 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3149 int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3151 if (pi->pos < ((void *)pi->pinfo + off)) {
3152 struct lnet_ni_status *ns = pi->pos;
3155 if (pi->pos > pi->end)
3158 lnet_nid4_to_nid(ns->ns_nid, nid);
3159 return &ns->ns_status;
3162 while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3163 struct lnet_ni_large_status *lns = pi->pos;
3165 if (pi->pos + 8 > pi->end)
3166 /* Not safe to examine next */
3168 pi->pos = lnet_ping_sts_next(lns);
3169 if (pi->pos > pi->end)
3171 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3175 return &lns->ns_status;
3180 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3182 struct lnet_ping_iter pi;
3186 for (st = ping_iter_first(&pi, pbuf, NULL); st;
3187 st = ping_iter_next(&pi, NULL))
3193 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3195 if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN) {
3197 lnet_handle_remote_failure_locked(lpni);
3199 } else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3200 !lpni->lpni_last_alive)
3201 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3205 * Build a peer from incoming data.
3207 * The NIDs in the incoming data are supposed to be structured as follows:
3210 * - other NIDs in same net
3211 * - NIDs in second net
3212 * - NIDs in third net
3214 * This due to the way the list of NIDs in the data is created.
3216 * Note that this function will mark the peer uptodate unless an
3217 * ENOMEM is encontered. All other errors are due to a conflict
3218 * between the DLC configuration and what discovery sees. We treat DLC
3219 * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3220 * peer from becoming stuck in discovery.
3222 static int lnet_peer_merge_data(struct lnet_peer *lp,
3223 struct lnet_ping_buffer *pbuf)
3225 struct lnet_peer_net *lpn;
3226 struct lnet_peer_ni *lpni;
3227 struct lnet_nid *curnis = NULL;
3228 struct lnet_ni_large_status *addnis = NULL;
3229 struct lnet_nid *delnis = NULL;
3230 struct lnet_ping_iter pi;
3231 struct lnet_nid nid;
3233 struct lnet_nid primary = {};
3234 bool want_large_primary;
3245 flags = LNET_PEER_DISCOVERED;
3246 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3247 flags |= LNET_PEER_MULTI_RAIL;
3250 * Cache the routing feature for the peer; whether it is enabled
3251 * for disabled as reported by the remote peer.
3253 spin_lock(&lp->lp_lock);
3254 if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3255 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3257 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3258 spin_unlock(&lp->lp_lock);
3260 nnis = ping_info_count_entries(pbuf);
3261 nnis = max_t(int, lp->lp_nnis, nnis);
3262 CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3263 CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3264 CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3265 if (!curnis || !addnis || !delnis) {
3273 /* Construct the list of NIDs present in peer. */
3275 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3276 curnis[ncurnis++] = lpni->lpni_nid;
3278 /* Check for NIDs in pbuf not present in curnis[].
3279 * Skip the first, which is loop-back. Take second as
3280 * primary, unless a large primary is found.
3282 ping_iter_first(&pi, pbuf, NULL);
3283 stp = ping_iter_next(&pi, &nid);
3286 want_large_primary = (pbuf->pb_info.pi_features &
3287 LNET_PING_FEAT_PRIMARY_LARGE);
3288 for (; stp; stp = ping_iter_next(&pi, &nid)) {
3289 for (j = 0; j < ncurnis; j++)
3290 if (nid_same(&nid, &curnis[j]))
3293 addnis[naddnis].ns_nid = nid;
3294 addnis[naddnis].ns_status = *stp;
3297 if (want_large_primary && nid.nid_size) {
3299 want_large_primary = false;
3303 * Check for NIDs in curnis[] not present in pbuf.
3304 * The nested loop starts at 1 to skip the loopback NID.
3306 * But never add the loopback NID to delnis[]: if it is
3307 * present in curnis[] then this peer is for this node.
3309 for (i = 0; i < ncurnis; i++) {
3310 if (nid_is_lo0(&curnis[i]))
3312 ping_iter_first(&pi, pbuf, NULL);
3313 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3314 if (nid_same(&curnis[i], &nid)) {
3316 * update the information we cache for the
3317 * peer with the latest information we
3320 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3322 old_st = lpni->lpni_ns_status;
3323 lpni->lpni_ns_status = *stp;
3324 if (old_st != lpni->lpni_ns_status)
3325 handle_disc_lpni_health(lpni);
3326 lnet_peer_ni_decref_locked(lpni);
3332 delnis[ndelnis++] = curnis[i];
3336 * If we get here and the discovery is disabled then we don't want
3337 * to add or delete any NIs. We just updated the ones we have some
3338 * information on, and call it a day
3341 if (lnet_is_discovery_disabled(lp))
3344 for (i = 0; i < naddnis; i++) {
3345 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3347 CERROR("Error adding NID %s to peer %s: %d\n",
3348 libcfs_nidstr(&addnis[i].ns_nid),
3349 libcfs_nidstr(&lp->lp_primary_nid), rc);
3353 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3355 lpni->lpni_ns_status = addnis[i].ns_status;
3356 handle_disc_lpni_health(lpni);
3357 lnet_peer_ni_decref_locked(lpni);
3361 for (i = 0; i < ndelnis; i++) {
3363 * for routers it's okay to delete the primary_nid because
3364 * the upper layers don't really rely on it. So if we're
3365 * being told that the router changed its primary_nid
3366 * then it's okay to delete it.
3368 if (lp->lp_rtr_refcount > 0)
3369 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3370 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3372 CERROR("Error deleting NID %s from peer %s: %d\n",
3373 libcfs_nidstr(&delnis[i]),
3374 libcfs_nidstr(&lp->lp_primary_nid), rc);
3380 /* The peer net for the primary NID should be the first entry in the
3381 * peer's lp_peer_nets list, and the peer NI for the primary NID should
3382 * be the first entry in its peer net's lpn_peer_nis list.
3384 find_primary(&nid, pbuf);
3385 lpni = lnet_peer_ni_find_locked(&nid);
3387 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3388 libcfs_nidstr(&nid));
3392 lpn = lpni->lpni_peer_net;
3393 if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3394 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3396 if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3397 list_move(&lpni->lpni_peer_nis,
3398 &lpni->lpni_peer_net->lpn_peer_nis);
3400 lnet_peer_ni_decref_locked(lpni);
3402 * Errors other than -ENOMEM are due to peers having been
3403 * configured with DLC. Ignore these because DLC overrides
3408 /* If this peer is a gateway, invoke the routing callback to update
3409 * the associated route status
3411 if (lp->lp_rtr_refcount > 0)
3412 lnet_router_discovery_ping_reply(lp, pbuf);
3414 CFS_FREE_PTR_ARRAY(curnis, nnis);
3415 CFS_FREE_PTR_ARRAY(addnis, nnis);
3416 CFS_FREE_PTR_ARRAY(delnis, nnis);
3417 lnet_ping_buffer_decref(pbuf);
3418 CDEBUG(D_NET, "peer %s (%p): %d\n",
3419 libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3422 spin_lock(&lp->lp_lock);
3423 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3424 lp->lp_state |= LNET_PEER_FORCE_PING;
3425 spin_unlock(&lp->lp_lock);
3431 * The data in pbuf says lp is its primary peer, but the data was
3432 * received by a different peer. Try to update lp with the data.
3435 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3437 struct lnet_handle_md mdh;
3439 /* Queue lp for discovery, and force it on the request queue. */
3440 lnet_net_lock(LNET_LOCK_EX);
3441 if (lnet_peer_queue_for_discovery(lp))
3442 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3443 lnet_net_unlock(LNET_LOCK_EX);
3445 LNetInvalidateMDHandle(&mdh);
3448 * Decide whether we can move the peer to the DATA_PRESENT state.
3450 * We replace stale data for a multi-rail peer, repair PING_FAILED
3451 * status, and preempt FORCE_PING.
3453 * If after that we have DATA_PRESENT, we merge it into this peer.
3455 spin_lock(&lp->lp_lock);
3456 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3457 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3458 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3459 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3460 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3461 lnet_ping_buffer_decref(pbuf);
3466 if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3467 lnet_ping_buffer_decref(lp->lp_data);
3469 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3471 if (lp->lp_state & LNET_PEER_PING_FAILED) {
3472 mdh = lp->lp_ping_mdh;
3473 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3474 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3475 lp->lp_ping_error = 0;
3477 if (lp->lp_state & LNET_PEER_FORCE_PING)
3478 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3479 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3480 spin_unlock(&lp->lp_lock);
3482 if (!LNetMDHandleIsInvalid(mdh))
3486 return lnet_peer_merge_data(lp, pbuf);
3488 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3492 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3493 struct lnet_ping_buffer *pbuf)
3495 struct lnet_ping_iter pi;
3496 struct lnet_nid pnid;
3499 for (st = ping_iter_first(&pi, pbuf, &pnid);
3501 st = ping_iter_next(&pi, &pnid))
3502 if (nid_same(nid, &pnid))
3507 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3508 * to the discovery queue a reference was taken that will prevent the peer from
3509 * actually being freed by this function. After this function exits the
3510 * discovery thread should call lnet_peer_discovery_complete() which will
3511 * drop that reference as well as wake any waiters that may also be holding a
3514 static int lnet_peer_deletion(struct lnet_peer *lp)
3515 __must_hold(&lp->lp_lock)
3517 struct list_head rlist;
3518 struct lnet_route *route, *tmp;
3519 int sensitivity = lp->lp_health_sensitivity;
3522 INIT_LIST_HEAD(&rlist);
3524 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3525 libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3527 /* no-op if lnet_peer_del() has already been called on this peer */
3528 if (lp->lp_state & LNET_PEER_MARK_DELETED)
3529 goto clear_discovering;
3531 spin_unlock(&lp->lp_lock);
3533 mutex_lock(&the_lnet.ln_api_mutex);
3534 if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3535 the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3536 mutex_unlock(&the_lnet.ln_api_mutex);
3537 spin_lock(&lp->lp_lock);
3539 goto clear_discovering;
3542 lnet_peer_cancel_discovery(lp);
3543 lnet_net_lock(LNET_LOCK_EX);
3544 list_for_each_entry_safe(route, tmp,
3547 lnet_move_route(route, NULL, &rlist);
3549 /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3550 rc = lnet_peer_del_locked(lp);
3552 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3553 libcfs_nidstr(&lp->lp_primary_nid), rc);
3555 lnet_net_unlock(LNET_LOCK_EX);
3557 list_for_each_entry_safe(route, tmp,
3559 /* re-add these routes */
3560 lnet_add_route(route->lr_net,
3565 LIBCFS_FREE(route, sizeof(*route));
3568 mutex_unlock(&the_lnet.ln_api_mutex);
3570 spin_lock(&lp->lp_lock);
3575 lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3576 LNET_PEER_FORCE_PUSH);
3582 * Update a peer using the data received.
3584 static int lnet_peer_data_present(struct lnet_peer *lp)
3585 __must_hold(&lp->lp_lock)
3587 struct lnet_ping_buffer *pbuf;
3588 struct lnet_peer_ni *lpni;
3589 struct lnet_nid nid;
3595 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3596 lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3597 spin_unlock(&lp->lp_lock);
3600 * Modifications of peer structures are done while holding the
3601 * ln_api_mutex. A global lock is required because we may be
3602 * modifying multiple peer structures, and a mutex greatly
3603 * simplifies memory management.
3605 * The actual changes to the data structures must also protect
3606 * against concurrent lookups, for which the lnet_net_lock in
3607 * LNET_LOCK_EX mode is used.
3609 mutex_lock(&the_lnet.ln_api_mutex);
3610 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3611 lnet_ping_buffer_decref(pbuf);
3617 * If this peer is not on the peer list then it is being torn
3618 * down, and our reference count may be all that is keeping it
3619 * alive. Don't do any work on it.
3621 if (list_empty(&lp->lp_peer_list)) {
3622 lnet_ping_buffer_decref(pbuf);
3626 flags = LNET_PEER_DISCOVERED;
3627 if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3628 flags |= LNET_PEER_MULTI_RAIL;
3630 * Check whether the primary NID in the message matches the
3631 * primary NID of the peer. If it does, update the peer, if
3632 * it it does not, check whether there is already a peer with
3633 * that primary NID. If no such peer exists, try to update
3634 * the primary NID of the current peer (allowed if it was
3635 * created due to message traffic) and complete the update.
3636 * If the peer did exist, hand off the data to it.
3638 * The peer for the loopback interface is a special case: this
3639 * is the peer for the local node, and we want to set its
3640 * primary NID to the correct value here. Moreover, this peer
3641 * can show up with only the loopback NID in the ping buffer.
3643 if (!find_primary(&nid, pbuf)) {
3644 lnet_ping_buffer_decref(pbuf);
3647 /* If lp_merge_primary_nid is set, assign it as primary,
3648 * which causes the peers to merge.
3650 if (!LNET_NID_IS_ANY(&lp->lp_merge_primary_nid)) {
3652 rc = lnet_peer_set_primary_nid(lp, &lp->lp_merge_primary_nid,
3654 lp->lp_merge_primary_nid = LNET_ANY_NID;
3657 if (nid_is_lo0(&lp->lp_primary_nid)) {
3658 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3660 lnet_ping_buffer_decref(pbuf);
3662 rc = lnet_peer_merge_data(lp, pbuf);
3664 * if the primary nid of the peer is present in the ping info returned
3665 * from the peer, but it's not the local primary peer we have
3666 * cached and discovery is disabled, then we don't want to update
3667 * our local peer info, by adding or removing NIDs, we just want
3668 * to update the status of the nids that we currently have
3669 * recorded in that peer.
3671 } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3672 (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3673 lnet_is_discovery_disabled(lp))) {
3674 rc = lnet_peer_merge_data(lp, pbuf);
3676 lpni = lnet_peer_ni_find_locked(&nid);
3677 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3678 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3680 CERROR("Primary NID error %s versus %s: %d\n",
3681 libcfs_nidstr(&lp->lp_primary_nid),
3682 libcfs_nidstr(&nid), rc);
3683 lnet_ping_buffer_decref(pbuf);
3685 rc = lnet_peer_merge_data(lp, pbuf);
3688 lnet_peer_ni_decref_locked(lpni);
3690 struct lnet_peer *new_lp;
3691 new_lp = lpni->lpni_peer_net->lpn_peer;
3693 * if lp has discovery/MR enabled that means new_lp
3694 * should have discovery/MR enabled as well, since
3695 * it's the same peer, which we're about to merge
3697 spin_lock(&lp->lp_lock);
3698 spin_lock(&new_lp->lp_lock);
3699 if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3700 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3701 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3702 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3703 /* If we're processing a ping reply then we may be
3704 * about to send a push to the peer that we ping'd.
3705 * Since the ping reply that we're processing was
3706 * received by lp, we need to set the discovery source
3707 * NID for new_lp to the NID stored in lp.
3709 if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3710 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3711 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3713 spin_unlock(&new_lp->lp_lock);
3714 spin_unlock(&lp->lp_lock);
3716 rc = lnet_peer_set_primary_data(new_lp, pbuf);
3717 lnet_consolidate_routes_locked(lp, new_lp);
3718 lnet_peer_ni_decref_locked(lpni);
3722 CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3723 libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3725 mutex_unlock(&the_lnet.ln_api_mutex);
3727 spin_lock(&lp->lp_lock);
3728 /* Tell discovery to re-check the peer immediately. */
3730 rc = LNET_REDISCOVER_PEER;
3735 * A ping failed. Clear the PING_FAILED state and set the
3736 * FORCE_PING state, to ensure a retry even if discovery is
3737 * disabled. This avoids being left with incorrect state.
3739 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3740 __must_hold(&lp->lp_lock)
3742 struct lnet_handle_md mdh;
3745 mdh = lp->lp_ping_mdh;
3746 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3747 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3748 lp->lp_state |= LNET_PEER_FORCE_PING;
3749 rc = lp->lp_ping_error;
3750 lp->lp_ping_error = 0;
3751 spin_unlock(&lp->lp_lock);
3753 if (!LNetMDHandleIsInvalid(mdh))
3756 CDEBUG(D_NET, "peer %s:%d\n",
3757 libcfs_nidstr(&lp->lp_primary_nid), rc);
3759 spin_lock(&lp->lp_lock);
3760 return rc ? rc : LNET_REDISCOVER_PEER;
3763 /* Active side of ping. */
3764 static int lnet_peer_send_ping(struct lnet_peer *lp)
3765 __must_hold(&lp->lp_lock)
3771 lp->lp_state |= LNET_PEER_PING_SENT;
3772 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3773 spin_unlock(&lp->lp_lock);
3775 cpt = lnet_net_lock_current();
3776 /* Refcount for MD. */
3777 lnet_peer_addref_locked(lp);
3778 lnet_net_unlock(cpt);
3780 bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3782 rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3783 the_lnet.ln_dc_handler, false);
3784 /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3785 * refcount on the peer, otherwise LNetMDUnlink will be called
3786 * which will eventually do that.
3790 lnet_peer_decref_locked(lp);
3791 lnet_net_unlock(cpt);
3792 rc = -rc; /* change the rc to negative value */
3794 } else if (rc < 0) {
3798 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3800 spin_lock(&lp->lp_lock);
3804 CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3806 * The errors that get us here are considered hard errors and
3807 * cause Discovery to terminate. So we clear PING_SENT, but do
3808 * not set either PING_FAILED or FORCE_PING. In fact we need
3809 * to clear PING_FAILED, because the unlink event handler will
3810 * have set it if we called LNetMDUnlink() above.
3812 spin_lock(&lp->lp_lock);
3813 lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3818 * This function exists because you cannot call LNetMDUnlink() from an
3821 static int lnet_peer_push_failed(struct lnet_peer *lp)
3822 __must_hold(&lp->lp_lock)
3824 struct lnet_handle_md mdh;
3827 mdh = lp->lp_push_mdh;
3828 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3829 lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3830 rc = lp->lp_push_error;
3831 lp->lp_push_error = 0;
3832 spin_unlock(&lp->lp_lock);
3834 if (!LNetMDHandleIsInvalid(mdh))
3837 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3838 spin_lock(&lp->lp_lock);
3839 return rc ? rc : LNET_REDISCOVER_PEER;
3843 * Mark the peer as discovered.
3845 static int lnet_peer_discovered(struct lnet_peer *lp)
3846 __must_hold(&lp->lp_lock)
3848 lp->lp_state |= LNET_PEER_DISCOVERED;
3849 lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3850 LNET_PEER_REDISCOVER);
3852 lp->lp_dc_error = 0;
3854 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3859 /* Active side of push. */
3860 static int lnet_peer_send_push(struct lnet_peer *lp)
3861 __must_hold(&lp->lp_lock)
3863 struct lnet_ping_buffer *pbuf;
3864 struct lnet_processid id;
3869 /* Don't push to a non-multi-rail peer. */
3870 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3871 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3872 /* if peer's NIDs are uptodate then peer is discovered */
3873 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3874 rc = lnet_peer_discovered(lp);
3881 lp->lp_state |= LNET_PEER_PUSH_SENT;
3882 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3883 spin_unlock(&lp->lp_lock);
3885 cpt = lnet_net_lock_current();
3886 pbuf = the_lnet.ln_ping_target;
3887 lnet_ping_buffer_addref(pbuf);
3888 lnet_net_unlock(cpt);
3890 /* Push source MD */
3891 md.start = &pbuf->pb_info;
3892 md.length = pbuf->pb_nbytes;
3893 md.threshold = 2; /* Put/Ack */
3895 md.options = LNET_MD_TRACK_RESPONSE;
3896 md.handler = the_lnet.ln_dc_handler;
3899 rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3901 lnet_ping_buffer_decref(pbuf);
3902 CERROR("Can't bind push source MD: %d\n", rc);
3906 cpt = lnet_net_lock_current();
3907 /* Refcount for MD. */
3908 lnet_peer_addref_locked(lp);
3909 id.pid = LNET_PID_LUSTRE;
3910 if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3911 id.nid = lp->lp_disc_dst_nid;
3913 id.nid = lp->lp_primary_nid;
3914 lnet_net_unlock(cpt);
3916 rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3917 LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3918 LNET_PROTO_PING_MATCHBITS, 0, 0);
3921 * reset the discovery nid. There is no need to restrict sending
3922 * from that source, if we call lnet_push_update_to_peers(). It'll
3923 * get set to a specific NID, if we initiate discovery from the
3926 lp->lp_disc_src_nid = LNET_ANY_NID;
3927 lp->lp_disc_dst_nid = LNET_ANY_NID;
3932 CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3934 spin_lock(&lp->lp_lock);
3938 LNetMDUnlink(lp->lp_push_mdh);
3939 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3941 CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3944 * The errors that get us here are considered hard errors and
3945 * cause Discovery to terminate. So we clear PUSH_SENT, but do
3946 * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3947 * because the unlink event handler will have set it if we
3948 * called LNetMDUnlink() above.
3950 spin_lock(&lp->lp_lock);
3951 lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3956 * Wait for work to be queued or some other change that must be
3957 * attended to. Returns non-zero if the discovery thread should shut
3960 static int lnet_peer_discovery_wait_for_work(void)
3967 cpt = lnet_net_lock_current();
3969 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3970 TASK_INTERRUPTIBLE);
3971 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3973 if (lnet_push_target_resize_needed() ||
3974 the_lnet.ln_push_target->pb_needs_post)
3976 if (!list_empty(&the_lnet.ln_dc_request))
3978 if (!list_empty(&the_lnet.ln_msg_resend))
3980 lnet_net_unlock(cpt);
3983 * wakeup max every second to check if there are peers that
3984 * have been stuck on the working queue for greater than
3987 schedule_timeout(cfs_time_seconds(1));
3988 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3989 cpt = lnet_net_lock_current();
3991 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3993 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3996 lnet_net_unlock(cpt);
3998 CDEBUG(D_NET, "woken: %d\n", rc);
4004 * Messages that were pending on a destroyed peer will be put on a global
4005 * resend list. The message resend list will be checked by
4006 * the discovery thread when it wakes up, and will resend messages. These
4007 * messages can still be sendable in the case the lpni which was the initial
4008 * cause of the message re-queue was transfered to another peer.
4010 * It is possible that LNet could be shutdown while we're iterating
4011 * through the list. lnet_shudown_lndnets() will attempt to access the
4012 * resend list, but will have to wait until the spinlock is released, by
4013 * which time there shouldn't be any more messages on the resend list.
4014 * During shutdown lnet_send() will fail and lnet_finalize() will be called
4015 * for the messages so they can be released. The other case is that
4016 * lnet_shudown_lndnets() can finalize all the messages before this
4017 * function can visit the resend list, in which case this function will be
4020 static void lnet_resend_msgs(void)
4022 struct lnet_msg *msg, *tmp;
4026 spin_lock(&the_lnet.ln_msg_resend_lock);
4027 list_splice(&the_lnet.ln_msg_resend, &resend);
4028 spin_unlock(&the_lnet.ln_msg_resend_lock);
4030 list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
4031 list_del_init(&msg->msg_list);
4032 rc = lnet_send(&msg->msg_src_nid_param, msg,
4033 &msg->msg_rtr_nid_param);
4035 CNETERR("Error sending %s to %s: %d\n",
4036 lnet_msgtyp2str(msg->msg_type),
4037 libcfs_idstr(&msg->msg_target), rc);
4038 lnet_finalize(msg, rc);
4043 /* The discovery thread. */
4044 static int lnet_peer_discovery(void *arg)
4046 struct lnet_peer *lp;
4050 wait_for_completion(&the_lnet.ln_started);
4052 CDEBUG(D_NET, "started\n");
4055 if (lnet_peer_discovery_wait_for_work())
4058 if (lnet_push_target_resize_needed())
4059 lnet_push_target_resize();
4060 else if (the_lnet.ln_push_target->pb_needs_post)
4061 lnet_push_target_post(the_lnet.ln_push_target,
4062 &the_lnet.ln_push_target_md);
4066 lnet_net_lock(LNET_LOCK_EX);
4067 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4068 lnet_net_unlock(LNET_LOCK_EX);
4073 * Process all incoming discovery work requests. When
4074 * discovery must wait on a peer to change state, it
4075 * is added to the tail of the ln_dc_working queue. A
4076 * timestamp keeps track of when the peer was added,
4077 * so we can time out discovery requests that take too
4080 while (!list_empty(&the_lnet.ln_dc_request)) {
4081 lp = list_first_entry(&the_lnet.ln_dc_request,
4082 struct lnet_peer, lp_dc_list);
4083 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4085 * set the time the peer was put on the dc_working
4086 * queue. It shouldn't remain on the queue
4087 * forever, in case the GET message (for ping)
4088 * doesn't get a REPLY or the PUT message (for
4089 * push) doesn't get an ACK.
4091 lp->lp_last_queued = ktime_get_real_seconds();
4092 lnet_net_unlock(LNET_LOCK_EX);
4094 if (lnet_push_target_resize_needed())
4095 lnet_push_target_resize();
4096 else if (the_lnet.ln_push_target->pb_needs_post)
4097 lnet_push_target_post(the_lnet.ln_push_target,
4098 &the_lnet.ln_push_target_md);
4101 * Select an action depending on the state of
4102 * the peer and whether discovery is disabled.
4103 * The check whether discovery is disabled is
4104 * done after the code that handles processing
4105 * for arrived data, cleanup for failures, and
4106 * forcing a Ping or Push.
4108 spin_lock(&lp->lp_lock);
4109 CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4110 libcfs_nidstr(&lp->lp_primary_nid), lp,
4112 if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4113 LNET_PEER_MARK_DELETED))
4114 rc = lnet_peer_deletion(lp);
4115 else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4116 rc = lnet_peer_data_present(lp);
4117 else if (lp->lp_state & LNET_PEER_PING_FAILED)
4118 rc = lnet_peer_ping_failed(lp);
4119 else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4120 rc = lnet_peer_push_failed(lp);
4121 else if (lp->lp_state & LNET_PEER_FORCE_PING)
4122 rc = lnet_peer_send_ping(lp);
4123 else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4124 rc = lnet_peer_send_push(lp);
4125 else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4126 rc = lnet_peer_send_ping(lp);
4127 else if (lnet_peer_needs_push(lp))
4128 rc = lnet_peer_send_push(lp);
4130 rc = lnet_peer_discovered(lp);
4131 CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4132 libcfs_nidstr(&lp->lp_primary_nid), lp,
4135 if (rc == LNET_REDISCOVER_PEER) {
4136 spin_unlock(&lp->lp_lock);
4137 lnet_net_lock(LNET_LOCK_EX);
4138 list_move(&lp->lp_dc_list,
4139 &the_lnet.ln_dc_request);
4141 !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4142 spin_unlock(&lp->lp_lock);
4143 lnet_net_lock(LNET_LOCK_EX);
4144 lnet_peer_discovery_complete(lp, rc);
4146 spin_unlock(&lp->lp_lock);
4147 lnet_net_lock(LNET_LOCK_EX);
4150 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4155 lnet_net_unlock(LNET_LOCK_EX);
4159 CDEBUG(D_NET, "stopping\n");
4161 * Clean up before telling lnet_peer_discovery_stop() that
4162 * we're done. Use wake_up() below to somewhat reduce the
4163 * size of the thundering herd if there are multiple threads
4164 * waiting on discovery of a single peer.
4167 /* Queue cleanup 1: stop all pending pings and pushes. */
4168 lnet_net_lock(LNET_LOCK_EX);
4169 while (!list_empty(&the_lnet.ln_dc_working)) {
4170 lp = list_first_entry(&the_lnet.ln_dc_working,
4171 struct lnet_peer, lp_dc_list);
4172 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4173 lnet_net_unlock(LNET_LOCK_EX);
4174 lnet_peer_cancel_discovery(lp);
4175 lnet_net_lock(LNET_LOCK_EX);
4177 lnet_net_unlock(LNET_LOCK_EX);
4179 /* Queue cleanup 2: wait for the expired queue to clear. */
4180 while (!list_empty(&the_lnet.ln_dc_expired))
4181 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4183 /* Queue cleanup 3: clear the request queue. */
4184 lnet_net_lock(LNET_LOCK_EX);
4185 while (!list_empty(&the_lnet.ln_dc_request)) {
4186 lp = list_first_entry(&the_lnet.ln_dc_request,
4187 struct lnet_peer, lp_dc_list);
4188 lnet_net_unlock(LNET_LOCK_EX);
4189 spin_lock(&lp->lp_lock);
4190 if (lp->lp_state & LNET_PEER_PING_FAILED)
4191 (void)lnet_peer_ping_failed(lp);
4192 if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4193 (void)lnet_peer_push_failed(lp);
4194 spin_unlock(&lp->lp_lock);
4195 lnet_net_lock(LNET_LOCK_EX);
4196 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4198 lnet_net_unlock(LNET_LOCK_EX);
4200 if (lnet_assert_handler_unused(the_lnet.ln_dc_handler, --retry <= 0))
4203 the_lnet.ln_dc_handler = NULL;
4205 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4206 wake_up(&the_lnet.ln_dc_waitq);
4208 CDEBUG(D_NET, "stopped\n");
4213 /* ln_api_mutex is held on entry. */
4214 int lnet_peer_discovery_start(void)
4216 struct task_struct *task;
4219 if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4222 the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4223 the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4224 task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4227 CERROR("Can't start peer discovery thread: %d\n", rc);
4229 the_lnet.ln_dc_handler = NULL;
4231 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4234 CDEBUG(D_NET, "discovery start: %d\n", rc);
4239 /* ln_api_mutex is held on entry. */
4240 void lnet_peer_discovery_stop(void)
4242 if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4245 LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4246 the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4248 /* In the LNetNIInit() path we may be stopping discovery before it
4249 * entered its work loop
4251 if (!completion_done(&the_lnet.ln_started))
4252 complete(&the_lnet.ln_started);
4254 wake_up(&the_lnet.ln_dc_waitq);
4256 mutex_unlock(&the_lnet.ln_api_mutex);
4257 wait_event(the_lnet.ln_dc_waitq,
4258 the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4259 mutex_lock(&the_lnet.ln_api_mutex);
4261 LASSERT(list_empty(&the_lnet.ln_dc_request));
4262 LASSERT(list_empty(&the_lnet.ln_dc_working));
4263 LASSERT(list_empty(&the_lnet.ln_dc_expired));
4265 CDEBUG(D_NET, "discovery stopped\n");
4271 lnet_debug_peer(struct lnet_nid *nid)
4273 char *aliveness = "NA";
4274 struct lnet_peer_ni *lp;
4277 cpt = lnet_nid2cpt(nid, NULL);
4280 lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4282 lnet_net_unlock(cpt);
4283 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4287 if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4288 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4290 CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4291 libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4292 aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4293 lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4294 lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4296 lnet_peer_ni_decref_locked(lp);
4298 lnet_net_unlock(cpt);
4301 /* Gathering information for userspace. */
4303 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4304 char aliveness[LNET_MAX_STR_LEN],
4305 __u32 *cpt_iter, __u32 *refcount,
4306 __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4307 __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4308 __u32 *peer_tx_qnob)
4310 struct lnet_peer_table *peer_table;
4311 struct lnet_peer_ni *lp;
4316 /* get the number of CPTs */
4317 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4319 /* if the cpt number to be examined is >= the number of cpts in
4320 * the system then indicate that there are no more cpts to examin
4322 if (*cpt_iter >= lncpt)
4325 /* get the current table */
4326 peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4327 /* if the ptable is NULL then there are no more cpts to examine */
4328 if (peer_table == NULL)
4331 lnet_net_lock(*cpt_iter);
4333 for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4334 struct list_head *peers = &peer_table->pt_hash[j];
4336 list_for_each_entry(lp, peers, lpni_hashlist) {
4337 if (!nid_is_nid4(&lp->lpni_nid))
4339 if (peer_index-- > 0)
4342 snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4343 if (lnet_isrouter(lp) ||
4344 lnet_peer_aliveness_enabled(lp))
4345 snprintf(aliveness, LNET_MAX_STR_LEN,
4346 lnet_is_peer_ni_alive(lp) ? "up" : "down");
4348 *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4349 *refcount = kref_read(&lp->lpni_kref);
4350 *ni_peer_tx_credits =
4351 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4352 *peer_tx_credits = lp->lpni_txcredits;
4353 *peer_rtr_credits = lp->lpni_rtrcredits;
4354 *peer_min_rtr_credits = lp->lpni_mintxcredits;
4355 *peer_tx_qnob = lp->lpni_txqnob;
4361 lnet_net_unlock(*cpt_iter);
4365 return found ? 0 : -ENOENT;
4368 /* ln_api_mutex is held, which keeps the peer list stable */
4369 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4371 struct lnet_ioctl_element_stats *lpni_stats;
4372 struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4373 struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4374 struct lnet_peer_ni_credit_info *lpni_info;
4375 struct lnet_peer_ni *lpni;
4376 struct lnet_peer *lp;
4378 struct lnet_nid nid;
4382 lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4383 lp = lnet_find_peer(&nid);
4389 size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4390 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4391 size *= lp->lp_nnis;
4392 if (size > cfg->prcfg_size) {
4393 cfg->prcfg_size = size;
4398 cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4399 cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4400 cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4401 cfg->prcfg_count = lp->lp_nnis;
4402 cfg->prcfg_size = size;
4403 cfg->prcfg_state = lp->lp_state;
4405 /* Allocate helper buffers. */
4407 LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4410 LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4413 LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4414 if (!lpni_msg_stats)
4415 goto out_free_stats;
4416 LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4418 goto out_free_msg_stats;
4423 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4424 if (!nid_is_nid4(&lpni->lpni_nid))
4426 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4427 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4428 goto out_free_hstats;
4429 bulk += sizeof(nid4);
4431 memset(lpni_info, 0, sizeof(*lpni_info));
4432 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4433 if (lnet_isrouter(lpni) ||
4434 lnet_peer_aliveness_enabled(lpni))
4435 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4436 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4438 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4439 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4440 lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4441 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4442 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4443 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4444 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4445 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4446 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4447 goto out_free_hstats;
4448 bulk += sizeof(*lpni_info);
4450 memset(lpni_stats, 0, sizeof(*lpni_stats));
4451 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4452 LNET_STATS_TYPE_SEND);
4453 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4454 LNET_STATS_TYPE_RECV);
4455 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4456 LNET_STATS_TYPE_DROP);
4457 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4458 goto out_free_hstats;
4459 bulk += sizeof(*lpni_stats);
4460 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4461 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4462 goto out_free_hstats;
4463 bulk += sizeof(*lpni_msg_stats);
4464 lpni_hstats->hlpni_network_timeout =
4465 atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4466 lpni_hstats->hlpni_remote_dropped =
4467 atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4468 lpni_hstats->hlpni_remote_timeout =
4469 atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4470 lpni_hstats->hlpni_remote_error =
4471 atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4472 lpni_hstats->hlpni_health_value =
4473 atomic_read(&lpni->lpni_healthv);
4474 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4475 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4476 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4477 goto out_free_hstats;
4478 bulk += sizeof(*lpni_hstats);
4483 LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4485 LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4487 LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4489 LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4491 lnet_peer_decref_locked(lp);
4496 /* must hold net_lock/0 */
4498 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4499 struct list_head *recovery_queue,
4502 /* the mt could've shutdown and cleaned up the queues */
4503 if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4506 if (!list_empty(&lpni->lpni_recovery))
4509 if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4512 if (!lpni->lpni_last_alive) {
4514 "lpni %s(%p) not eligible for recovery last alive %lld\n",
4515 libcfs_nidstr(&lpni->lpni_nid), lpni,
4516 lpni->lpni_last_alive);
4520 if (lnet_recovery_limit &&
4521 now > lpni->lpni_last_alive + lnet_recovery_limit) {
4522 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4523 libcfs_nidstr(&lpni->lpni_nid),
4524 lpni->lpni_last_alive);
4525 /* Reset the ping count so that if this peer NI is added back to
4526 * the recovery queue we will send the first ping right away.
4528 lpni->lpni_ping_count = 0;
4532 /* This peer NI is going on the recovery queue, so take a ref on it */
4533 lnet_peer_ni_addref_locked(lpni);
4535 lnet_peer_ni_set_next_ping(lpni, now);
4537 CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4538 libcfs_nidstr(&lpni->lpni_nid),
4539 lpni->lpni_ping_count,
4540 lpni->lpni_next_ping,
4541 lpni->lpni_last_alive,
4542 atomic_read(&lpni->lpni_healthv));
4544 list_add_tail(&lpni->lpni_recovery, recovery_queue);
4547 /* Call with the ln_api_mutex held */
4549 lnet_peer_ni_set_healthv(struct lnet_nid *nid, int value, bool all)
4551 struct lnet_peer_table *ptable;
4552 struct lnet_peer *lp;
4553 struct lnet_peer_net *lpn;
4554 struct lnet_peer_ni *lpni;
4559 if (the_lnet.ln_state != LNET_STATE_RUNNING)
4562 now = ktime_get_seconds();
4565 lnet_net_lock(LNET_LOCK_EX);
4566 lpni = lnet_peer_ni_find_locked(nid);
4568 lnet_net_unlock(LNET_LOCK_EX);
4571 lnet_set_lpni_healthv_locked(lpni, value);
4572 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4573 &the_lnet.ln_mt_peerNIRecovq, now);
4574 lnet_peer_ni_decref_locked(lpni);
4575 lnet_net_unlock(LNET_LOCK_EX);
4579 lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4582 * Walk all the peers and reset the health value for each one to the
4585 lnet_net_lock(LNET_LOCK_EX);
4586 for (cpt = 0; cpt < lncpt; cpt++) {
4587 ptable = the_lnet.ln_peer_tables[cpt];
4588 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4589 list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4590 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4592 lnet_set_lpni_healthv_locked(lpni,
4594 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4595 &the_lnet.ln_mt_peerNIRecovq, now);
4600 lnet_net_unlock(LNET_LOCK_EX);