Whamcloud - gitweb
LU-13806 lnet: Ensure proper peer, peer NI, peer net hierarchy
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NIDNET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(lnet_nid_t nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = nid;
265         lp->lp_disc_src_nid = LNET_NID_ANY;
266         if (lnet_peers_start_down())
267                 lp->lp_alive = false;
268         else
269                 lp->lp_alive = true;
270
271         /*
272          * all peers created on a router should have health on
273          * if it's not already on.
274          */
275         if (the_lnet.ln_routing && !lnet_health_sensitivity)
276                 lp->lp_health_sensitivity = 1;
277
278         /*
279          * Turn off discovery for loopback peer. If you're creating a peer
280          * for the loopback interface then that was initiated when we
281          * attempted to send a message over the loopback. There is no need
282          * to ever use a different interface when sending messages to
283          * myself.
284          */
285         if (nid == LNET_NID_LO_0)
286                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
287         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
288
289         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
290
291         return lp;
292 }
293
294 void
295 lnet_destroy_peer_locked(struct lnet_peer *lp)
296 {
297         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
298
299         LASSERT(atomic_read(&lp->lp_refcount) == 0);
300         LASSERT(lp->lp_rtr_refcount == 0);
301         LASSERT(list_empty(&lp->lp_peer_nets));
302         LASSERT(list_empty(&lp->lp_peer_list));
303         LASSERT(list_empty(&lp->lp_dc_list));
304
305         if (lp->lp_data)
306                 lnet_ping_buffer_decref(lp->lp_data);
307
308         /*
309          * if there are messages still on the pending queue, then make
310          * sure to queue them on the ln_msg_resend list so they can be
311          * resent at a later point if the discovery thread is still
312          * running.
313          * If the discovery thread has stopped, then the wakeup will be a
314          * no-op, and it is expected the lnet_shutdown_lndnets() will
315          * eventually be called, which will traverse this list and
316          * finalize the messages on the list.
317          * We can not resend them now because we're holding the cpt lock.
318          * Releasing the lock can cause an inconsistent state
319          */
320         spin_lock(&the_lnet.ln_msg_resend_lock);
321         spin_lock(&lp->lp_lock);
322         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
323         spin_unlock(&lp->lp_lock);
324         spin_unlock(&the_lnet.ln_msg_resend_lock);
325         wake_up(&the_lnet.ln_dc_waitq);
326
327         LIBCFS_FREE(lp, sizeof(*lp));
328 }
329
330 /*
331  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
332  * that peer_net, detach the peer_net from the peer.
333  *
334  * Call with lnet_net_lock/EX held
335  */
336 static void
337 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
338 {
339         struct lnet_peer_table *ptable;
340         struct lnet_peer_net *lpn;
341         struct lnet_peer *lp;
342
343         /*
344          * Belts and suspenders: gracefully handle teardown of a
345          * partially connected peer_ni.
346          */
347         lpn = lpni->lpni_peer_net;
348
349         list_del_init(&lpni->lpni_peer_nis);
350         /*
351          * If there are no lpni's left, we detach lpn from
352          * lp_peer_nets, so it cannot be found anymore.
353          */
354         if (list_empty(&lpn->lpn_peer_nis))
355                 list_del_init(&lpn->lpn_peer_nets);
356
357         /* Update peer NID count. */
358         lp = lpn->lpn_peer;
359         lp->lp_nnis--;
360
361         /*
362          * If there are no more peer nets, make the peer unfindable
363          * via the peer_tables.
364          *
365          * Otherwise, if the peer is DISCOVERED, tell discovery to
366          * take another look at it. This is a no-op if discovery for
367          * this peer did the detaching.
368          */
369         if (list_empty(&lp->lp_peer_nets)) {
370                 list_del_init(&lp->lp_peer_list);
371                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
372                 ptable->pt_peers--;
373         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
374                 /* Discovery isn't running, nothing to do here. */
375         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
376                 lnet_peer_queue_for_discovery(lp);
377                 wake_up(&the_lnet.ln_dc_waitq);
378         }
379         CDEBUG(D_NET, "peer %s NID %s\n",
380                 libcfs_nid2str(lp->lp_primary_nid),
381                 libcfs_nid2str(lpni->lpni_nid));
382 }
383
384 /* called with lnet_net_lock LNET_LOCK_EX held */
385 static int
386 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
387 {
388         struct lnet_peer_table *ptable = NULL;
389
390         /* don't remove a peer_ni if it's also a gateway */
391         if (lnet_isrouter(lpni) && !force) {
392                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
393                        libcfs_nid2str(lpni->lpni_nid));
394                 return -EBUSY;
395         }
396
397         lnet_peer_remove_from_remote_list(lpni);
398
399         /* remove peer ni from the hash list. */
400         list_del_init(&lpni->lpni_hashlist);
401
402         /*
403          * indicate the peer is being deleted so the monitor thread can
404          * remove it from the recovery queue.
405          */
406         spin_lock(&lpni->lpni_lock);
407         lpni->lpni_state |= LNET_PEER_NI_DELETING;
408         spin_unlock(&lpni->lpni_lock);
409
410         /* decrement the ref count on the peer table */
411         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
412
413         /*
414          * The peer_ni can no longer be found with a lookup. But there
415          * can be current users, so keep track of it on the zombie
416          * list until the reference count has gone to zero.
417          *
418          * The last reference may be lost in a place where the
419          * lnet_net_lock locks only a single cpt, and that cpt may not
420          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
421          * has its own lock.
422          */
423         spin_lock(&ptable->pt_zombie_lock);
424         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
425         ptable->pt_zombies++;
426         spin_unlock(&ptable->pt_zombie_lock);
427
428         /* no need to keep this peer_ni on the hierarchy anymore */
429         lnet_peer_detach_peer_ni_locked(lpni);
430
431         /* remove hashlist reference on peer_ni */
432         lnet_peer_ni_decref_locked(lpni);
433
434         return 0;
435 }
436
437 void lnet_peer_uninit(void)
438 {
439         struct lnet_peer_ni *lpni, *tmp;
440
441         lnet_net_lock(LNET_LOCK_EX);
442
443         /* remove all peer_nis from the remote peer and the hash list */
444         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
445                                  lpni_on_remote_peer_ni_list)
446                 lnet_peer_ni_del_locked(lpni, false);
447
448         lnet_peer_tables_destroy();
449
450         lnet_net_unlock(LNET_LOCK_EX);
451 }
452
453 static int
454 lnet_peer_del_locked(struct lnet_peer *peer)
455 {
456         struct lnet_peer_ni *lpni = NULL, *lpni2;
457         int rc = 0, rc2 = 0;
458
459         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
460
461         spin_lock(&peer->lp_lock);
462         peer->lp_state |= LNET_PEER_MARK_DELETED;
463         spin_unlock(&peer->lp_lock);
464
465         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
466         while (lpni != NULL) {
467                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
468                 rc = lnet_peer_ni_del_locked(lpni, false);
469                 if (rc != 0)
470                         rc2 = rc;
471                 lpni = lpni2;
472         }
473
474         return rc2;
475 }
476
477 /*
478  * Discovering this peer is taking too long. Cancel any Ping or Push
479  * that discovery is waiting on by unlinking the relevant MDs. The
480  * lnet_discovery_event_handler() will proceed from here and complete
481  * the cleanup.
482  */
483 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
484 {
485         struct lnet_handle_md ping_mdh;
486         struct lnet_handle_md push_mdh;
487
488         LNetInvalidateMDHandle(&ping_mdh);
489         LNetInvalidateMDHandle(&push_mdh);
490
491         spin_lock(&lp->lp_lock);
492         if (lp->lp_state & LNET_PEER_PING_SENT) {
493                 ping_mdh = lp->lp_ping_mdh;
494                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
495         }
496         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
497                 push_mdh = lp->lp_push_mdh;
498                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
499         }
500         spin_unlock(&lp->lp_lock);
501
502         if (!LNetMDHandleIsInvalid(ping_mdh))
503                 LNetMDUnlink(ping_mdh);
504         if (!LNetMDHandleIsInvalid(push_mdh))
505                 LNetMDUnlink(push_mdh);
506 }
507
508 static int
509 lnet_peer_del(struct lnet_peer *peer)
510 {
511         lnet_peer_cancel_discovery(peer);
512         lnet_net_lock(LNET_LOCK_EX);
513         lnet_peer_del_locked(peer);
514         lnet_net_unlock(LNET_LOCK_EX);
515
516         return 0;
517 }
518
519 /*
520  * Delete a NID from a peer. Call with ln_api_mutex held.
521  *
522  * Error codes:
523  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
524  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
525  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
526  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
527  */
528 static int
529 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
530 {
531         struct lnet_peer_ni *lpni;
532         lnet_nid_t primary_nid = lp->lp_primary_nid;
533         int rc = 0;
534         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
535
536         if (!(flags & LNET_PEER_CONFIGURED)) {
537                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
538                         rc = -EPERM;
539                         goto out;
540                 }
541         }
542         lpni = lnet_find_peer_ni_locked(nid);
543         if (!lpni) {
544                 rc = -ENOENT;
545                 goto out;
546         }
547         lnet_peer_ni_decref_locked(lpni);
548         if (lp != lpni->lpni_peer_net->lpn_peer) {
549                 rc = -ECHILD;
550                 goto out;
551         }
552
553         /*
554          * This function only allows deletion of the primary NID if it
555          * is the only NID.
556          */
557         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
558                 rc = -EBUSY;
559                 goto out;
560         }
561
562         lnet_net_lock(LNET_LOCK_EX);
563
564         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
565                 struct lnet_peer_ni *lpni2;
566                 /* assign the next peer_ni to be the primary */
567                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
568                 LASSERT(lpni2);
569                 lp->lp_primary_nid = lpni2->lpni_nid;
570         }
571         rc = lnet_peer_ni_del_locked(lpni, force);
572
573         lnet_net_unlock(LNET_LOCK_EX);
574
575 out:
576         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
577                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
578
579         return rc;
580 }
581
582 static void
583 lnet_peer_table_cleanup_locked(struct lnet_net *net,
584                                struct lnet_peer_table *ptable)
585 {
586         int                      i;
587         struct lnet_peer_ni     *next;
588         struct lnet_peer_ni     *lpni;
589         struct lnet_peer        *peer;
590
591         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
592                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
593                                          lpni_hashlist) {
594                         if (net != NULL && net != lpni->lpni_net)
595                                 continue;
596
597                         peer = lpni->lpni_peer_net->lpn_peer;
598                         if (peer->lp_primary_nid != lpni->lpni_nid) {
599                                 lnet_peer_ni_del_locked(lpni, false);
600                                 continue;
601                         }
602                         /*
603                          * Removing the primary NID implies removing
604                          * the entire peer. Advance next beyond any
605                          * peer_ni that belongs to the same peer.
606                          */
607                         list_for_each_entry_from(next, &ptable->pt_hash[i],
608                                                  lpni_hashlist) {
609                                 if (next->lpni_peer_net->lpn_peer != peer)
610                                         break;
611                         }
612                         lnet_peer_del_locked(peer);
613                 }
614         }
615 }
616
617 static void
618 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
619 {
620         wait_var_event_warning(&ptable->pt_zombies,
621                                ptable->pt_zombies == 0,
622                                "Waiting for %d zombies on peer table\n",
623                                ptable->pt_zombies);
624 }
625
626 static void
627 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
628                                 struct lnet_peer_table *ptable)
629 {
630         struct lnet_peer_ni     *lp;
631         struct lnet_peer_ni     *tmp;
632         lnet_nid_t              gw_nid;
633         int                     i;
634
635         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
636                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
637                                          lpni_hashlist) {
638                         if (net != lp->lpni_net)
639                                 continue;
640
641                         if (!lnet_isrouter(lp))
642                                 continue;
643
644                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
645
646                         lnet_net_unlock(LNET_LOCK_EX);
647                         lnet_del_route(LNET_NET_ANY, gw_nid);
648                         lnet_net_lock(LNET_LOCK_EX);
649                 }
650         }
651 }
652
653 void
654 lnet_peer_tables_cleanup(struct lnet_net *net)
655 {
656         int i;
657         struct lnet_peer_table *ptable;
658
659         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
660         /* If just deleting the peers for a NI, get rid of any routes these
661          * peers are gateways for. */
662         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
663                 lnet_net_lock(LNET_LOCK_EX);
664                 lnet_peer_table_del_rtrs_locked(net, ptable);
665                 lnet_net_unlock(LNET_LOCK_EX);
666         }
667
668         /* Start the cleanup process */
669         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
670                 lnet_net_lock(LNET_LOCK_EX);
671                 lnet_peer_table_cleanup_locked(net, ptable);
672                 lnet_net_unlock(LNET_LOCK_EX);
673         }
674
675         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
676                 lnet_peer_ni_finalize_wait(ptable);
677 }
678
679 static struct lnet_peer_ni *
680 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
681 {
682         struct list_head        *peers;
683         struct lnet_peer_ni     *lp;
684
685         if (the_lnet.ln_state != LNET_STATE_RUNNING)
686                 return NULL;
687
688         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
689         list_for_each_entry(lp, peers, lpni_hashlist) {
690                 if (lp->lpni_nid == nid) {
691                         lnet_peer_ni_addref_locked(lp);
692                         return lp;
693                 }
694         }
695
696         return NULL;
697 }
698
699 struct lnet_peer_ni *
700 lnet_find_peer_ni_locked(lnet_nid_t nid)
701 {
702         struct lnet_peer_ni *lpni;
703         struct lnet_peer_table *ptable;
704         int cpt;
705
706         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
707
708         ptable = the_lnet.ln_peer_tables[cpt];
709         lpni = lnet_get_peer_ni_locked(ptable, nid);
710
711         return lpni;
712 }
713
714 struct lnet_peer_ni *
715 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
716 {
717         struct lnet_peer_net *lpn;
718         struct lnet_peer_ni *lpni;
719
720         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
721         if (!lpn)
722                 return NULL;
723
724         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
725                 if (lpni->lpni_nid == nid)
726                         return lpni;
727         }
728
729         return NULL;
730 }
731
732 struct lnet_peer *
733 lnet_find_peer(lnet_nid_t nid)
734 {
735         struct lnet_peer_ni *lpni;
736         struct lnet_peer *lp = NULL;
737         int cpt;
738
739         cpt = lnet_net_lock_current();
740         lpni = lnet_find_peer_ni_locked(nid);
741         if (lpni) {
742                 lp = lpni->lpni_peer_net->lpn_peer;
743                 lnet_peer_addref_locked(lp);
744                 lnet_peer_ni_decref_locked(lpni);
745         }
746         lnet_net_unlock(cpt);
747
748         return lp;
749 }
750
751 struct lnet_peer_net *
752 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
753 {
754         struct lnet_peer_net *net;
755
756         if (!prev_lpn_id) {
757                 /* no net id provided return the first net */
758                 net = list_first_entry_or_null(&lp->lp_peer_nets,
759                                                struct lnet_peer_net,
760                                                lpn_peer_nets);
761
762                 return net;
763         }
764
765         /* find the net after the one provided */
766         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
767                 if (net->lpn_net_id == prev_lpn_id) {
768                         /*
769                          * if we reached the end of the list loop to the
770                          * beginning.
771                          */
772                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
773                                 return list_first_entry_or_null(&lp->lp_peer_nets,
774                                                                 struct lnet_peer_net,
775                                                                 lpn_peer_nets);
776                         else
777                                 return list_next_entry(net, lpn_peer_nets);
778                 }
779         }
780
781         return NULL;
782 }
783
784 struct lnet_peer_ni *
785 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
786                              struct lnet_peer_net *peer_net,
787                              struct lnet_peer_ni *prev)
788 {
789         struct lnet_peer_ni *lpni;
790         struct lnet_peer_net *net = peer_net;
791
792         if (!prev) {
793                 if (!net) {
794                         if (list_empty(&peer->lp_peer_nets))
795                                 return NULL;
796
797                         net = list_entry(peer->lp_peer_nets.next,
798                                          struct lnet_peer_net,
799                                          lpn_peer_nets);
800                 }
801                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
802                                   lpni_peer_nis);
803
804                 return lpni;
805         }
806
807         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
808                 /*
809                  * if you reached the end of the peer ni list and the peer
810                  * net is specified then there are no more peer nis in that
811                  * net.
812                  */
813                 if (net)
814                         return NULL;
815
816                 /*
817                  * we reached the end of this net ni list. move to the
818                  * next net
819                  */
820                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
821                     &peer->lp_peer_nets)
822                         /* no more nets and no more NIs. */
823                         return NULL;
824
825                 /* get the next net */
826                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
827                                  struct lnet_peer_net,
828                                  lpn_peer_nets);
829                 /* get the ni on it */
830                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
831                                   lpni_peer_nis);
832
833                 return lpni;
834         }
835
836         /* there are more nis left */
837         lpni = list_entry(prev->lpni_peer_nis.next,
838                           struct lnet_peer_ni, lpni_peer_nis);
839
840         return lpni;
841 }
842
843 /* Call with the ln_api_mutex held */
844 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
845 {
846         struct lnet_process_id id;
847         struct lnet_peer_table *ptable;
848         struct lnet_peer *lp;
849         __u32 count = 0;
850         __u32 size = 0;
851         int lncpt;
852         int cpt;
853         __u32 i;
854         int rc;
855
856         rc = -ESHUTDOWN;
857         if (the_lnet.ln_state != LNET_STATE_RUNNING)
858                 goto done;
859
860         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
861
862         /*
863          * Count the number of peers, and return E2BIG if the buffer
864          * is too small. We'll also return the desired size.
865          */
866         rc = -E2BIG;
867         for (cpt = 0; cpt < lncpt; cpt++) {
868                 ptable = the_lnet.ln_peer_tables[cpt];
869                 count += ptable->pt_peers;
870         }
871         size = count * sizeof(*ids);
872         if (size > *sizep)
873                 goto done;
874
875         /*
876          * Walk the peer lists and copy out the primary nids.
877          * This is safe because the peer lists are only modified
878          * while the ln_api_mutex is held. So we don't need to
879          * hold the lnet_net_lock as well, and can therefore
880          * directly call copy_to_user().
881          */
882         rc = -EFAULT;
883         memset(&id, 0, sizeof(id));
884         id.pid = LNET_PID_LUSTRE;
885         i = 0;
886         for (cpt = 0; cpt < lncpt; cpt++) {
887                 ptable = the_lnet.ln_peer_tables[cpt];
888                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
889                         if (i >= count)
890                                 goto done;
891                         id.nid = lp->lp_primary_nid;
892                         if (copy_to_user(&ids[i], &id, sizeof(id)))
893                                 goto done;
894                         i++;
895                 }
896         }
897         rc = 0;
898 done:
899         *countp = count;
900         *sizep = size;
901         return rc;
902 }
903
904 /*
905  * Start pushes to peers that need to be updated for a configuration
906  * change on this node.
907  */
908 void
909 lnet_push_update_to_peers(int force)
910 {
911         struct lnet_peer_table *ptable;
912         struct lnet_peer *lp;
913         int lncpt;
914         int cpt;
915
916         lnet_net_lock(LNET_LOCK_EX);
917         if (lnet_peer_discovery_disabled)
918                 force = 0;
919         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
920         for (cpt = 0; cpt < lncpt; cpt++) {
921                 ptable = the_lnet.ln_peer_tables[cpt];
922                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
923                         if (force) {
924                                 spin_lock(&lp->lp_lock);
925                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
926                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
927                                 spin_unlock(&lp->lp_lock);
928                         }
929                         if (lnet_peer_needs_push(lp))
930                                 lnet_peer_queue_for_discovery(lp);
931                 }
932         }
933         lnet_net_unlock(LNET_LOCK_EX);
934         wake_up(&the_lnet.ln_dc_waitq);
935 }
936
937 /* find the NID in the preferred gateways for the remote peer
938  * return:
939  *      false: list is not empty and NID is not preferred
940  *      false: list is empty
941  *      true: nid is found in the list
942  */
943 bool
944 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
945                              lnet_nid_t gw_nid)
946 {
947         struct lnet_nid_list *ne;
948
949         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
950                libcfs_nid2str(lpni->lpni_nid),
951                list_empty(&lpni->lpni_rtr_pref_nids));
952
953         if (list_empty(&lpni->lpni_rtr_pref_nids))
954                 return false;
955
956         /* iterate through all the preferred NIDs and see if any of them
957          * matches the provided gw_nid
958          */
959         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
960                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
961                        libcfs_nid2str(ne->nl_nid),
962                        libcfs_nid2str(gw_nid));
963                 if (ne->nl_nid == gw_nid)
964                         return true;
965         }
966
967         return false;
968 }
969
970 void
971 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
972 {
973         struct list_head zombies;
974         struct lnet_nid_list *ne;
975         struct lnet_nid_list *tmp;
976         int cpt = lpni->lpni_cpt;
977
978         INIT_LIST_HEAD(&zombies);
979
980         lnet_net_lock(cpt);
981         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
982         lnet_net_unlock(cpt);
983
984         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
985                 list_del(&ne->nl_list);
986                 LIBCFS_FREE(ne, sizeof(*ne));
987         }
988 }
989
990 int
991 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
992                        lnet_nid_t gw_nid)
993 {
994         int cpt = lpni->lpni_cpt;
995         struct lnet_nid_list *ne = NULL;
996
997         /* This function is called with api_mutex held. When the api_mutex
998          * is held the list can not be modified, as it is only modified as
999          * a result of applying a UDSP and that happens under api_mutex
1000          * lock.
1001          */
1002         __must_hold(&the_lnet.ln_api_mutex);
1003
1004         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1005                 if (ne->nl_nid == gw_nid)
1006                         return -EEXIST;
1007         }
1008
1009         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1010         if (!ne)
1011                 return -ENOMEM;
1012
1013         ne->nl_nid = gw_nid;
1014
1015         /* Lock the cpt to protect against addition and checks in the
1016          * selection algorithm
1017          */
1018         lnet_net_lock(cpt);
1019         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1020         lnet_net_unlock(cpt);
1021
1022         return 0;
1023 }
1024
1025 /*
1026  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1027  * this is a preferred point-to-point path. Call with lnet_net_lock in
1028  * shared mmode.
1029  */
1030 bool
1031 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1032 {
1033         struct lnet_nid_list *ne;
1034
1035         if (lpni->lpni_pref_nnids == 0)
1036                 return false;
1037         if (lpni->lpni_pref_nnids == 1)
1038                 return lpni->lpni_pref.nid == nid;
1039         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1040                 if (ne->nl_nid == nid)
1041                         return true;
1042         }
1043         return false;
1044 }
1045
1046 /*
1047  * Set a single ni as preferred, provided no preferred ni is already
1048  * defined. Only to be used for non-multi-rail peer_ni.
1049  */
1050 int
1051 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1052 {
1053         int rc = 0;
1054
1055         spin_lock(&lpni->lpni_lock);
1056         if (nid == LNET_NID_ANY) {
1057                 rc = -EINVAL;
1058         } else if (lpni->lpni_pref_nnids > 0) {
1059                 rc = -EPERM;
1060         } else if (lpni->lpni_pref_nnids == 0) {
1061                 lpni->lpni_pref.nid = nid;
1062                 lpni->lpni_pref_nnids = 1;
1063                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1064         }
1065         spin_unlock(&lpni->lpni_lock);
1066
1067         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1068                libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1069         return rc;
1070 }
1071
1072 /*
1073  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1074  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1075  */
1076 int
1077 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1078 {
1079         int rc = 0;
1080
1081         spin_lock(&lpni->lpni_lock);
1082         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1083                 lpni->lpni_pref_nnids = 0;
1084                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1085         } else if (lpni->lpni_pref_nnids == 0) {
1086                 rc = -ENOENT;
1087         } else {
1088                 rc = -EPERM;
1089         }
1090         spin_unlock(&lpni->lpni_lock);
1091
1092         CDEBUG(D_NET, "peer %s: %d\n",
1093                libcfs_nid2str(lpni->lpni_nid), rc);
1094         return rc;
1095 }
1096
1097 void
1098 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1099 {
1100         lpni->lpni_sel_priority = priority;
1101 }
1102
1103 /*
1104  * Clear the preferred NIDs from a non-multi-rail peer.
1105  */
1106 void
1107 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1108 {
1109         struct lnet_peer_ni *lpni = NULL;
1110
1111         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1112                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1113 }
1114
1115 int
1116 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1117 {
1118         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1119         struct lnet_nid_list *ne1 = NULL;
1120         struct lnet_nid_list *ne2 = NULL;
1121         lnet_nid_t tmp_nid = LNET_NID_ANY;
1122         int rc = 0;
1123
1124         if (nid == LNET_NID_ANY) {
1125                 rc = -EINVAL;
1126                 goto out;
1127         }
1128
1129         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1130                 rc = -EEXIST;
1131                 goto out;
1132         }
1133
1134         /* A non-MR node may have only one preferred NI per peer_ni */
1135         if (lpni->lpni_pref_nnids > 0 &&
1136             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1137                 rc = -EPERM;
1138                 goto out;
1139         }
1140
1141         /* add the new preferred nid to the list of preferred nids */
1142         if (lpni->lpni_pref_nnids != 0) {
1143                 size_t alloc_size = sizeof(*ne1);
1144
1145                 if (lpni->lpni_pref_nnids == 1) {
1146                         tmp_nid = lpni->lpni_pref.nid;
1147                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1148                 }
1149
1150                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1151                         if (ne1->nl_nid == nid) {
1152                                 rc = -EEXIST;
1153                                 goto out;
1154                         }
1155                 }
1156
1157                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1158                                  alloc_size);
1159                 if (!ne1) {
1160                         rc = -ENOMEM;
1161                         goto out;
1162                 }
1163
1164                 /* move the originally stored nid to the list */
1165                 if (lpni->lpni_pref_nnids == 1) {
1166                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1167                                 lpni->lpni_cpt, alloc_size);
1168                         if (!ne2) {
1169                                 rc = -ENOMEM;
1170                                 goto out;
1171                         }
1172                         INIT_LIST_HEAD(&ne2->nl_list);
1173                         ne2->nl_nid = tmp_nid;
1174                 }
1175                 ne1->nl_nid = nid;
1176         }
1177
1178         lnet_net_lock(LNET_LOCK_EX);
1179         spin_lock(&lpni->lpni_lock);
1180         if (lpni->lpni_pref_nnids == 0) {
1181                 lpni->lpni_pref.nid = nid;
1182         } else {
1183                 if (ne2)
1184                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1185                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1186         }
1187         lpni->lpni_pref_nnids++;
1188         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1189         spin_unlock(&lpni->lpni_lock);
1190         lnet_net_unlock(LNET_LOCK_EX);
1191
1192 out:
1193         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1194                 spin_lock(&lpni->lpni_lock);
1195                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1196                 spin_unlock(&lpni->lpni_lock);
1197         }
1198         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1199                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1200         return rc;
1201 }
1202
1203 int
1204 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1205 {
1206         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1207         struct lnet_nid_list *ne = NULL;
1208         int rc = 0;
1209
1210         if (lpni->lpni_pref_nnids == 0) {
1211                 rc = -ENOENT;
1212                 goto out;
1213         }
1214
1215         if (lpni->lpni_pref_nnids == 1) {
1216                 if (lpni->lpni_pref.nid != nid) {
1217                         rc = -ENOENT;
1218                         goto out;
1219                 }
1220         } else {
1221                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1222                         if (ne->nl_nid == nid)
1223                                 goto remove_nid_entry;
1224                 }
1225                 rc = -ENOENT;
1226                 ne = NULL;
1227                 goto out;
1228         }
1229
1230 remove_nid_entry:
1231         lnet_net_lock(LNET_LOCK_EX);
1232         spin_lock(&lpni->lpni_lock);
1233         if (lpni->lpni_pref_nnids == 1)
1234                 lpni->lpni_pref.nid = LNET_NID_ANY;
1235         else {
1236                 list_del_init(&ne->nl_list);
1237                 if (lpni->lpni_pref_nnids == 2) {
1238                         struct lnet_nid_list *ne, *tmp;
1239
1240                         list_for_each_entry_safe(ne, tmp,
1241                                                  &lpni->lpni_pref.nids,
1242                                                  nl_list) {
1243                                 lpni->lpni_pref.nid = ne->nl_nid;
1244                                 list_del_init(&ne->nl_list);
1245                                 LIBCFS_FREE(ne, sizeof(*ne));
1246                         }
1247                 }
1248         }
1249         lpni->lpni_pref_nnids--;
1250         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1251         spin_unlock(&lpni->lpni_lock);
1252         lnet_net_unlock(LNET_LOCK_EX);
1253
1254         if (ne)
1255                 LIBCFS_FREE(ne, sizeof(*ne));
1256 out:
1257         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1258                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1259         return rc;
1260 }
1261
1262 void
1263 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1264 {
1265         struct list_head zombies;
1266         struct lnet_nid_list *ne;
1267         struct lnet_nid_list *tmp;
1268
1269         INIT_LIST_HEAD(&zombies);
1270
1271         lnet_net_lock(LNET_LOCK_EX);
1272         if (lpni->lpni_pref_nnids == 1)
1273                 lpni->lpni_pref.nid = LNET_NID_ANY;
1274         else if (lpni->lpni_pref_nnids > 1)
1275                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1276         lpni->lpni_pref_nnids = 0;
1277         lnet_net_unlock(LNET_LOCK_EX);
1278
1279         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1280                 list_del_init(&ne->nl_list);
1281                 LIBCFS_FREE(ne, sizeof(*ne));
1282         }
1283 }
1284
1285 lnet_nid_t
1286 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1287 {
1288         struct lnet_peer_ni *lpni;
1289         lnet_nid_t primary_nid = nid;
1290
1291         lpni = lnet_find_peer_ni_locked(nid);
1292         if (lpni) {
1293                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1294                 lnet_peer_ni_decref_locked(lpni);
1295         }
1296
1297         return primary_nid;
1298 }
1299
1300 bool
1301 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1302 __must_hold(&lp->lp_lock)
1303 {
1304         if (lnet_peer_discovery_disabled)
1305                 return true;
1306
1307         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1308             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1309                 return true;
1310         }
1311
1312         return false;
1313 }
1314
1315 /*
1316  * Peer Discovery
1317  */
1318 bool
1319 lnet_is_discovery_disabled(struct lnet_peer *lp)
1320 {
1321         bool rc = false;
1322
1323         spin_lock(&lp->lp_lock);
1324         rc = lnet_is_discovery_disabled_locked(lp);
1325         spin_unlock(&lp->lp_lock);
1326
1327         return rc;
1328 }
1329
1330 lnet_nid_t
1331 LNetPrimaryNID(lnet_nid_t nid)
1332 {
1333         struct lnet_peer *lp;
1334         struct lnet_peer_ni *lpni;
1335         lnet_nid_t primary_nid = nid;
1336         int rc = 0;
1337         int cpt;
1338
1339         if (nid == LNET_NID_LO_0)
1340                 return LNET_NID_LO_0;
1341
1342         cpt = lnet_net_lock_current();
1343         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1344         if (IS_ERR(lpni)) {
1345                 rc = PTR_ERR(lpni);
1346                 goto out_unlock;
1347         }
1348         lp = lpni->lpni_peer_net->lpn_peer;
1349
1350         while (!lnet_peer_is_uptodate(lp)) {
1351                 spin_lock(&lp->lp_lock);
1352                 /* force a full discovery cycle */
1353                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1354                 spin_unlock(&lp->lp_lock);
1355
1356                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1357                 if (rc)
1358                         goto out_decref;
1359                 /* The lpni (or lp) for this NID may have changed and our ref is
1360                  * the only thing keeping the old one around. Release the ref
1361                  * and lookup the lpni again
1362                  */
1363                 lnet_peer_ni_decref_locked(lpni);
1364                 lpni = lnet_find_peer_ni_locked(nid);
1365                 if (!lpni) {
1366                         rc = -ENOENT;
1367                         goto out_unlock;
1368                 }
1369                 lp = lpni->lpni_peer_net->lpn_peer;
1370
1371                 /* Only try once if discovery is disabled */
1372                 if (lnet_is_discovery_disabled(lp))
1373                         break;
1374         }
1375         primary_nid = lp->lp_primary_nid;
1376 out_decref:
1377         lnet_peer_ni_decref_locked(lpni);
1378 out_unlock:
1379         lnet_net_unlock(cpt);
1380
1381         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1382                libcfs_nid2str(primary_nid), rc);
1383         return primary_nid;
1384 }
1385 EXPORT_SYMBOL(LNetPrimaryNID);
1386
1387 struct lnet_peer_net *
1388 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1389 {
1390         struct lnet_peer_net *peer_net;
1391         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1392                 if (peer_net->lpn_net_id == net_id)
1393                         return peer_net;
1394         }
1395         return NULL;
1396 }
1397
1398 /*
1399  * Attach a peer_ni to a peer_net and peer. This function assumes
1400  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1401  * may be attached to a different peer, in which case it will be
1402  * properly detached first. The whole operation is done atomically.
1403  *
1404  * This function consumes the reference on lpni and Always returns 0.
1405  * This is the last function called from functions that do return an
1406  * int, so returning 0 here allows the compiler to do a tail call.
1407  */
1408 static int
1409 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1410                                 struct lnet_peer_net *lpn,
1411                                 struct lnet_peer_ni *lpni,
1412                                 unsigned flags)
1413 {
1414         struct lnet_peer_table *ptable;
1415         bool new_lpn = false;
1416         int rc;
1417
1418         /* Install the new peer_ni */
1419         lnet_net_lock(LNET_LOCK_EX);
1420         /* Add peer_ni to global peer table hash, if necessary. */
1421         if (list_empty(&lpni->lpni_hashlist)) {
1422                 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1423
1424                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1425                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1426                 ptable->pt_version++;
1427                 lnet_peer_ni_addref_locked(lpni);
1428         }
1429
1430         /* Detach the peer_ni from an existing peer, if necessary. */
1431         if (lpni->lpni_peer_net) {
1432                 LASSERT(lpni->lpni_peer_net != lpn);
1433                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1434                 lnet_peer_detach_peer_ni_locked(lpni);
1435                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1436                 lpni->lpni_peer_net = NULL;
1437         }
1438
1439         /* Add peer_ni to peer_net */
1440         lpni->lpni_peer_net = lpn;
1441         if (lp->lp_primary_nid == lpni->lpni_nid)
1442                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1443         else
1444                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1445         lnet_update_peer_net_healthv(lpni);
1446         lnet_peer_net_addref_locked(lpn);
1447
1448         /* Add peer_net to peer */
1449         if (!lpn->lpn_peer) {
1450                 new_lpn = true;
1451                 lpn->lpn_peer = lp;
1452                 if (lp->lp_primary_nid == lpni->lpni_nid)
1453                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1454                 else
1455                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1456                 lnet_peer_addref_locked(lp);
1457         }
1458
1459         /* Add peer to global peer list, if necessary */
1460         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1461         if (list_empty(&lp->lp_peer_list)) {
1462                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1463                 ptable->pt_peers++;
1464         }
1465
1466
1467         /* Update peer state */
1468         spin_lock(&lp->lp_lock);
1469         if (flags & LNET_PEER_CONFIGURED) {
1470                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1471                         lp->lp_state |= LNET_PEER_CONFIGURED;
1472         }
1473         if (flags & LNET_PEER_MULTI_RAIL) {
1474                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1475                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1476                         lnet_peer_clr_non_mr_pref_nids(lp);
1477                 }
1478         }
1479         spin_unlock(&lp->lp_lock);
1480
1481         lp->lp_nnis++;
1482
1483         /* apply UDSPs */
1484         if (new_lpn) {
1485                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1486                 if (rc)
1487                         CERROR("Failed to apply UDSPs on lpn %s\n",
1488                                libcfs_net2str(lpn->lpn_net_id));
1489         }
1490         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1491         if (rc)
1492                 CERROR("Failed to apply UDSPs on lpni %s\n",
1493                        libcfs_nid2str(lpni->lpni_nid));
1494
1495         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1496                libcfs_nid2str(lp->lp_primary_nid),
1497                libcfs_nid2str(lpni->lpni_nid), flags);
1498         lnet_peer_ni_decref_locked(lpni);
1499         lnet_net_unlock(LNET_LOCK_EX);
1500
1501         return 0;
1502 }
1503
1504 /*
1505  * Create a new peer, with nid as its primary nid.
1506  *
1507  * Call with the lnet_api_mutex held.
1508  */
1509 static int
1510 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1511 {
1512         struct lnet_peer *lp;
1513         struct lnet_peer_net *lpn;
1514         struct lnet_peer_ni *lpni;
1515         int rc = 0;
1516
1517         LASSERT(nid != LNET_NID_ANY);
1518
1519         /*
1520          * No need for the lnet_net_lock here, because the
1521          * lnet_api_mutex is held.
1522          */
1523         lpni = lnet_find_peer_ni_locked(nid);
1524         if (lpni) {
1525                 /* A peer with this NID already exists. */
1526                 lp = lpni->lpni_peer_net->lpn_peer;
1527                 lnet_peer_ni_decref_locked(lpni);
1528                 /*
1529                  * This is an error if the peer was configured and the
1530                  * primary NID differs or an attempt is made to change
1531                  * the Multi-Rail flag. Otherwise the assumption is
1532                  * that an existing peer is being modified.
1533                  */
1534                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1535                         if (lp->lp_primary_nid != nid)
1536                                 rc = -EEXIST;
1537                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1538                                 rc = -EPERM;
1539                         goto out;
1540                 }
1541                 /* Delete and recreate as a configured peer. */
1542                 lnet_peer_del(lp);
1543         }
1544
1545         /* Create peer, peer_net, and peer_ni. */
1546         rc = -ENOMEM;
1547         lp = lnet_peer_alloc(nid);
1548         if (!lp)
1549                 goto out;
1550         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1551         if (!lpn)
1552                 goto out_free_lp;
1553         lpni = lnet_peer_ni_alloc(nid);
1554         if (!lpni)
1555                 goto out_free_lpn;
1556
1557         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1558
1559 out_free_lpn:
1560         LIBCFS_FREE(lpn, sizeof(*lpn));
1561 out_free_lp:
1562         LIBCFS_FREE(lp, sizeof(*lp));
1563 out:
1564         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1565                libcfs_nid2str(nid), flags, rc);
1566         return rc;
1567 }
1568
1569 /*
1570  * Add a NID to a peer. Call with ln_api_mutex held.
1571  *
1572  * Error codes:
1573  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1574  *  -EEXIST:   The NID was configured by DLC for a different peer.
1575  *  -ENOMEM:   Out of memory.
1576  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1577  *             non-multi-rail peer.
1578  */
1579 static int
1580 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1581 {
1582         struct lnet_peer_net *lpn;
1583         struct lnet_peer_ni *lpni;
1584         int rc = 0;
1585
1586         LASSERT(lp);
1587         LASSERT(nid != LNET_NID_ANY);
1588
1589         /* A configured peer can only be updated through configuration. */
1590         if (!(flags & LNET_PEER_CONFIGURED)) {
1591                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1592                         rc = -EPERM;
1593                         goto out;
1594                 }
1595         }
1596
1597         /*
1598          * The MULTI_RAIL flag can be set but not cleared, because
1599          * that would leave the peer struct in an invalid state.
1600          */
1601         if (flags & LNET_PEER_MULTI_RAIL) {
1602                 spin_lock(&lp->lp_lock);
1603                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1604                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1605                         lnet_peer_clr_non_mr_pref_nids(lp);
1606                 }
1607                 spin_unlock(&lp->lp_lock);
1608         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1609                 rc = -EPERM;
1610                 goto out;
1611         }
1612
1613         lpni = lnet_find_peer_ni_locked(nid);
1614         if (lpni) {
1615                 /*
1616                  * A peer_ni already exists. This is only a problem if
1617                  * it is not connected to this peer and was configured
1618                  * by DLC.
1619                  */
1620                 if (lpni->lpni_peer_net->lpn_peer == lp)
1621                         goto out_free_lpni;
1622                 if (lnet_peer_ni_is_configured(lpni)) {
1623                         rc = -EEXIST;
1624                         goto out_free_lpni;
1625                 }
1626                 /* If this is the primary NID, destroy the peer. */
1627                 if (lnet_peer_ni_is_primary(lpni)) {
1628                         struct lnet_peer *rtr_lp =
1629                                 lpni->lpni_peer_net->lpn_peer;
1630                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1631                         /*
1632                          * if we're trying to delete a router it means
1633                          * we're moving this peer NI to a new peer so must
1634                          * transfer router properties to the new peer
1635                          */
1636                         if (rtr_refcount > 0) {
1637                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1638                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1639                         }
1640                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1641                         lnet_peer_ni_decref_locked(lpni);
1642                         lpni = lnet_peer_ni_alloc(nid);
1643                         if (!lpni) {
1644                                 rc = -ENOMEM;
1645                                 goto out_free_lpni;
1646                         }
1647                 }
1648         } else {
1649                 lpni = lnet_peer_ni_alloc(nid);
1650                 if (!lpni) {
1651                         rc = -ENOMEM;
1652                         goto out_free_lpni;
1653                 }
1654         }
1655
1656         /*
1657          * Get the peer_net. Check that we're not adding a second
1658          * peer_ni on a peer_net of a non-multi-rail peer.
1659          */
1660         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1661         if (!lpn) {
1662                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1663                 if (!lpn) {
1664                         rc = -ENOMEM;
1665                         goto out_free_lpni;
1666                 }
1667         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1668                 rc = -ENOTUNIQ;
1669                 goto out_free_lpni;
1670         }
1671
1672         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1673
1674 out_free_lpni:
1675         lnet_peer_ni_decref_locked(lpni);
1676 out:
1677         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1678                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1679                flags, rc);
1680         return rc;
1681 }
1682
1683 /*
1684  * Update the primary NID of a peer, if possible.
1685  *
1686  * Call with the lnet_api_mutex held.
1687  */
1688 static int
1689 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1690 {
1691         lnet_nid_t old = lp->lp_primary_nid;
1692         int rc = 0;
1693
1694         if (lp->lp_primary_nid == nid)
1695                 goto out;
1696
1697         lp->lp_primary_nid = nid;
1698
1699         rc = lnet_peer_add_nid(lp, nid, flags);
1700         if (rc) {
1701                 lp->lp_primary_nid = old;
1702                 goto out;
1703         }
1704 out:
1705         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1706                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1707         return rc;
1708 }
1709
1710 /*
1711  * lpni creation initiated due to traffic either sending or receiving.
1712  */
1713 static int
1714 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1715 {
1716         struct lnet_peer *lp;
1717         struct lnet_peer_net *lpn;
1718         struct lnet_peer_ni *lpni;
1719         unsigned flags = 0;
1720         int rc = 0;
1721
1722         if (nid == LNET_NID_ANY) {
1723                 rc = -EINVAL;
1724                 goto out;
1725         }
1726
1727         /* lnet_net_lock is not needed here because ln_api_lock is held */
1728         lpni = lnet_find_peer_ni_locked(nid);
1729         if (lpni) {
1730                 /*
1731                  * We must have raced with another thread. Since we
1732                  * know next to nothing about a peer_ni created by
1733                  * traffic, we just assume everything is ok and
1734                  * return.
1735                  */
1736                 lnet_peer_ni_decref_locked(lpni);
1737                 goto out;
1738         }
1739
1740         /* Create peer, peer_net, and peer_ni. */
1741         rc = -ENOMEM;
1742         lp = lnet_peer_alloc(nid);
1743         if (!lp)
1744                 goto out;
1745         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1746         if (!lpn)
1747                 goto out_free_lp;
1748         lpni = lnet_peer_ni_alloc(nid);
1749         if (!lpni)
1750                 goto out_free_lpn;
1751         if (pref != LNET_NID_ANY)
1752                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1753
1754         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1755
1756 out_free_lpn:
1757         LIBCFS_FREE(lpn, sizeof(*lpn));
1758 out_free_lp:
1759         LIBCFS_FREE(lp, sizeof(*lp));
1760 out:
1761         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1762         return rc;
1763 }
1764
1765 /*
1766  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1767  *
1768  * This API handles the following combinations:
1769  *   Create a peer with its primary NI if only the prim_nid is provided
1770  *   Add a NID to a peer identified by the prim_nid. The peer identified
1771  *   by the prim_nid must already exist.
1772  *   The peer being created may be non-MR.
1773  *
1774  * The caller must hold ln_api_mutex. This prevents the peer from
1775  * being created/modified/deleted by a different thread.
1776  */
1777 int
1778 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1779 {
1780         struct lnet_peer *lp = NULL;
1781         struct lnet_peer_ni *lpni;
1782         unsigned flags;
1783
1784         /* The prim_nid must always be specified */
1785         if (prim_nid == LNET_NID_ANY)
1786                 return -EINVAL;
1787
1788         flags = LNET_PEER_CONFIGURED;
1789         if (mr)
1790                 flags |= LNET_PEER_MULTI_RAIL;
1791
1792         /*
1793          * If nid isn't specified, we must create a new peer with
1794          * prim_nid as its primary nid.
1795          */
1796         if (nid == LNET_NID_ANY)
1797                 return lnet_peer_add(prim_nid, flags);
1798
1799         /* Look up the prim_nid, which must exist. */
1800         lpni = lnet_find_peer_ni_locked(prim_nid);
1801         if (!lpni)
1802                 return -ENOENT;
1803         lnet_peer_ni_decref_locked(lpni);
1804         lp = lpni->lpni_peer_net->lpn_peer;
1805
1806         /* Peer must have been configured. */
1807         if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1808                 CDEBUG(D_NET, "peer %s was not configured\n",
1809                        libcfs_nid2str(prim_nid));
1810                 return -ENOENT;
1811         }
1812
1813         /* Primary NID must match */
1814         if (lp->lp_primary_nid != prim_nid) {
1815                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1816                        libcfs_nid2str(prim_nid),
1817                        libcfs_nid2str(lp->lp_primary_nid));
1818                 return -ENODEV;
1819         }
1820
1821         /* Multi-Rail flag must match. */
1822         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1823                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1824                        libcfs_nid2str(prim_nid));
1825                 return -EPERM;
1826         }
1827
1828         return lnet_peer_add_nid(lp, nid, flags);
1829 }
1830
1831 /*
1832  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1833  *
1834  * This API handles the following combinations:
1835  *   Delete a NI from a peer if both prim_nid and nid are provided.
1836  *   Delete a peer if only prim_nid is provided.
1837  *   Delete a peer if its primary nid is provided.
1838  *
1839  * The caller must hold ln_api_mutex. This prevents the peer from
1840  * being modified/deleted by a different thread.
1841  */
1842 int
1843 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1844 {
1845         struct lnet_peer *lp;
1846         struct lnet_peer_ni *lpni;
1847         unsigned flags;
1848
1849         if (prim_nid == LNET_NID_ANY)
1850                 return -EINVAL;
1851
1852         lpni = lnet_find_peer_ni_locked(prim_nid);
1853         if (!lpni)
1854                 return -ENOENT;
1855         lnet_peer_ni_decref_locked(lpni);
1856         lp = lpni->lpni_peer_net->lpn_peer;
1857
1858         if (prim_nid != lp->lp_primary_nid) {
1859                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1860                        libcfs_nid2str(prim_nid),
1861                        libcfs_nid2str(lp->lp_primary_nid));
1862                 return -ENODEV;
1863         }
1864
1865         lnet_net_lock(LNET_LOCK_EX);
1866         if (lp->lp_rtr_refcount > 0) {
1867                 lnet_net_unlock(LNET_LOCK_EX);
1868                 CERROR("%s is a router. Can not be deleted\n",
1869                        libcfs_nid2str(prim_nid));
1870                 return -EBUSY;
1871         }
1872         lnet_net_unlock(LNET_LOCK_EX);
1873
1874         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1875                 return lnet_peer_del(lp);
1876
1877         flags = LNET_PEER_CONFIGURED;
1878         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1879                 flags |= LNET_PEER_MULTI_RAIL;
1880
1881         return lnet_peer_del_nid(lp, nid, flags);
1882 }
1883
1884 void
1885 lnet_destroy_peer_ni_locked(struct kref *ref)
1886 {
1887         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1888                                                  lpni_kref);
1889         struct lnet_peer_table *ptable;
1890         struct lnet_peer_net *lpn;
1891
1892         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1893
1894         LASSERT(kref_read(&lpni->lpni_kref) == 0);
1895         LASSERT(list_empty(&lpni->lpni_txq));
1896         LASSERT(lpni->lpni_txqnob == 0);
1897         LASSERT(list_empty(&lpni->lpni_peer_nis));
1898         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1899
1900         lpn = lpni->lpni_peer_net;
1901         lpni->lpni_peer_net = NULL;
1902         lpni->lpni_net = NULL;
1903
1904         if (!list_empty(&lpni->lpni_hashlist)) {
1905                 /* remove the peer ni from the zombie list */
1906                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1907                 spin_lock(&ptable->pt_zombie_lock);
1908                 list_del_init(&lpni->lpni_hashlist);
1909                 ptable->pt_zombies--;
1910                 spin_unlock(&ptable->pt_zombie_lock);
1911         }
1912
1913         if (lpni->lpni_pref_nnids > 1) {
1914                 struct lnet_nid_list *ne, *tmp;
1915
1916                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1917                                          nl_list) {
1918                         list_del_init(&ne->nl_list);
1919                         LIBCFS_FREE(ne, sizeof(*ne));
1920                 }
1921         }
1922         LIBCFS_FREE(lpni, sizeof(*lpni));
1923
1924         if (lpn)
1925                 lnet_peer_net_decref_locked(lpn);
1926 }
1927
1928 struct lnet_peer_ni *
1929 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1930 {
1931         struct lnet_peer_ni *lpni = NULL;
1932         int rc;
1933
1934         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1935                 return ERR_PTR(-ESHUTDOWN);
1936
1937         /*
1938          * find if a peer_ni already exists.
1939          * If so then just return that.
1940          */
1941         lpni = lnet_find_peer_ni_locked(nid);
1942         if (lpni)
1943                 return lpni;
1944
1945         lnet_net_unlock(cpt);
1946
1947         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1948         if (rc) {
1949                 lpni = ERR_PTR(rc);
1950                 goto out_net_relock;
1951         }
1952
1953         lpni = lnet_find_peer_ni_locked(nid);
1954         LASSERT(lpni);
1955
1956 out_net_relock:
1957         lnet_net_lock(cpt);
1958
1959         return lpni;
1960 }
1961
1962 /*
1963  * Get a peer_ni for the given nid, create it if necessary. Takes a
1964  * hold on the peer_ni.
1965  */
1966 struct lnet_peer_ni *
1967 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1968 {
1969         struct lnet_peer_ni *lpni = NULL;
1970         int rc;
1971
1972         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1973                 return ERR_PTR(-ESHUTDOWN);
1974
1975         /*
1976          * find if a peer_ni already exists.
1977          * If so then just return that.
1978          */
1979         lpni = lnet_find_peer_ni_locked(nid);
1980         if (lpni)
1981                 return lpni;
1982
1983         /*
1984          * Slow path:
1985          * use the lnet_api_mutex to serialize the creation of the peer_ni
1986          * and the creation/deletion of the local ni/net. When a local ni is
1987          * created, if there exists a set of peer_nis on that network,
1988          * they need to be traversed and updated. When a local NI is
1989          * deleted, which could result in a network being deleted, then
1990          * all peer nis on that network need to be removed as well.
1991          *
1992          * Creation through traffic should also be serialized with
1993          * creation through DLC.
1994          */
1995         lnet_net_unlock(cpt);
1996         mutex_lock(&the_lnet.ln_api_mutex);
1997         /*
1998          * Shutdown is only set under the ln_api_lock, so a single
1999          * check here is sufficent.
2000          */
2001         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2002                 lpni = ERR_PTR(-ESHUTDOWN);
2003                 goto out_mutex_unlock;
2004         }
2005
2006         rc = lnet_peer_ni_traffic_add(nid, pref);
2007         if (rc) {
2008                 lpni = ERR_PTR(rc);
2009                 goto out_mutex_unlock;
2010         }
2011
2012         lpni = lnet_find_peer_ni_locked(nid);
2013         LASSERT(lpni);
2014
2015 out_mutex_unlock:
2016         mutex_unlock(&the_lnet.ln_api_mutex);
2017         lnet_net_lock(cpt);
2018
2019         /* Lock has been dropped, check again for shutdown. */
2020         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2021                 if (!IS_ERR(lpni))
2022                         lnet_peer_ni_decref_locked(lpni);
2023                 lpni = ERR_PTR(-ESHUTDOWN);
2024         }
2025
2026         return lpni;
2027 }
2028
2029 bool
2030 lnet_peer_gw_discovery(struct lnet_peer *lp)
2031 {
2032         bool rc = false;
2033
2034         spin_lock(&lp->lp_lock);
2035         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2036                 rc = true;
2037         spin_unlock(&lp->lp_lock);
2038
2039         return rc;
2040 }
2041
2042 bool
2043 lnet_peer_is_uptodate(struct lnet_peer *lp)
2044 {
2045         bool rc;
2046
2047         spin_lock(&lp->lp_lock);
2048         rc = lnet_peer_is_uptodate_locked(lp);
2049         spin_unlock(&lp->lp_lock);
2050         return rc;
2051 }
2052
2053 /*
2054  * Is a peer uptodate from the point of view of discovery?
2055  *
2056  * If it is currently being processed, obviously not.
2057  * A forced Ping or Push is also handled by the discovery thread.
2058  *
2059  * Otherwise look at whether the peer needs rediscovering.
2060  */
2061 bool
2062 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2063 __must_hold(&lp->lp_lock)
2064 {
2065         bool rc;
2066
2067         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2068                             LNET_PEER_FORCE_PING |
2069                             LNET_PEER_FORCE_PUSH)) {
2070                 rc = false;
2071         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2072                 rc = false;
2073         } else if (lnet_peer_needs_push(lp)) {
2074                 rc = false;
2075         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2076                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2077                         rc = true;
2078                 else
2079                         rc = false;
2080         } else {
2081                 rc = false;
2082         }
2083
2084         return rc;
2085 }
2086
2087 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2088 void
2089 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2090 {
2091         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2092          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2093          * when adding to the list and queuing the peer to ensure that we do not
2094          * strand any messages on the lp_dc_pendq. This scheme ensures the
2095          * message will be resent even if the peer is already being discovered.
2096          * Therefore we needn't check the return value of
2097          * lnet_peer_queue_for_discovery(lp).
2098          */
2099         lnet_net_lock(LNET_LOCK_EX);
2100         spin_lock(&lp->lp_lock);
2101         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2102         spin_unlock(&lp->lp_lock);
2103         lnet_peer_queue_for_discovery(lp);
2104         lnet_net_unlock(LNET_LOCK_EX);
2105 }
2106
2107 /*
2108  * Queue a peer for the attention of the discovery thread.  Call with
2109  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2110  * -EALREADY if the peer was already queued.
2111  */
2112 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2113 {
2114         int rc;
2115
2116         spin_lock(&lp->lp_lock);
2117         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2118                 lp->lp_state |= LNET_PEER_DISCOVERING;
2119         spin_unlock(&lp->lp_lock);
2120         if (list_empty(&lp->lp_dc_list)) {
2121                 lnet_peer_addref_locked(lp);
2122                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2123                 wake_up(&the_lnet.ln_dc_waitq);
2124                 rc = 0;
2125         } else {
2126                 rc = -EALREADY;
2127         }
2128
2129         CDEBUG(D_NET, "Queue peer %s: %d\n",
2130                libcfs_nid2str(lp->lp_primary_nid), rc);
2131
2132         return rc;
2133 }
2134
2135 /*
2136  * Discovery of a peer is complete. Wake all waiters on the peer.
2137  * Call with lnet_net_lock/EX held.
2138  */
2139 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2140 {
2141         struct lnet_msg *msg, *tmp;
2142         int rc = 0;
2143         LIST_HEAD(pending_msgs);
2144
2145         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2146                libcfs_nid2str(lp->lp_primary_nid));
2147
2148         list_del_init(&lp->lp_dc_list);
2149         spin_lock(&lp->lp_lock);
2150         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2151         spin_unlock(&lp->lp_lock);
2152         wake_up(&lp->lp_dc_waitq);
2153
2154         if (lp->lp_rtr_refcount > 0)
2155                 lnet_router_discovery_complete(lp);
2156
2157         lnet_net_unlock(LNET_LOCK_EX);
2158
2159         /* iterate through all pending messages and send them again */
2160         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2161                 list_del_init(&msg->msg_list);
2162                 if (lp->lp_dc_error) {
2163                         lnet_finalize(msg, lp->lp_dc_error);
2164                         continue;
2165                 }
2166
2167                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2168                        lnet_msgtyp2str(msg->msg_type),
2169                        libcfs_id2str(msg->msg_target));
2170                 rc = lnet_send(msg->msg_src_nid_param, msg,
2171                                msg->msg_rtr_nid_param);
2172                 if (rc < 0) {
2173                         CNETERR("Error sending %s to %s: %d\n",
2174                                lnet_msgtyp2str(msg->msg_type),
2175                                libcfs_id2str(msg->msg_target), rc);
2176                         lnet_finalize(msg, rc);
2177                 }
2178         }
2179         lnet_net_lock(LNET_LOCK_EX);
2180         lnet_peer_decref_locked(lp);
2181 }
2182
2183 /*
2184  * Handle inbound push.
2185  * Like any event handler, called with lnet_res_lock/CPT held.
2186  */
2187 void lnet_peer_push_event(struct lnet_event *ev)
2188 {
2189         struct lnet_ping_buffer *pbuf;
2190         struct lnet_peer *lp;
2191
2192         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2193
2194         /* lnet_find_peer() adds a refcount */
2195         lp = lnet_find_peer(ev->source.nid);
2196         if (!lp) {
2197                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2198                        libcfs_nid2str(ev->initiator.nid),
2199                        libcfs_nid2str(ev->source.nid));
2200                 pbuf->pb_needs_post = true;
2201                 return;
2202         }
2203
2204         /* Ensure peer state remains consistent while we modify it. */
2205         spin_lock(&lp->lp_lock);
2206
2207         /*
2208          * If some kind of error happened the contents of the message
2209          * cannot be used. Clear the NIDS_UPTODATE and set the
2210          * FORCE_PING flag to trigger a ping.
2211          */
2212         if (ev->status) {
2213                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2214                 lp->lp_state |= LNET_PEER_FORCE_PING;
2215                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2216                        ev->status,
2217                        libcfs_nid2str(lp->lp_primary_nid),
2218                        libcfs_nid2str(ev->source.nid));
2219                 goto out;
2220         }
2221
2222         /*
2223          * A push with invalid or corrupted info. Clear the UPTODATE
2224          * flag to trigger a ping.
2225          */
2226         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2227                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2228                 lp->lp_state |= LNET_PEER_FORCE_PING;
2229                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2230                        libcfs_nid2str(lp->lp_primary_nid));
2231                 goto out;
2232         }
2233
2234         /*
2235          * Make sure we'll allocate the correct size ping buffer when
2236          * pinging the peer.
2237          */
2238         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2239                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2240
2241         /*
2242          * A non-Multi-Rail peer is not supposed to be capable of
2243          * sending a push.
2244          */
2245         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2246                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2247                        libcfs_nid2str(lp->lp_primary_nid));
2248                 goto out;
2249         }
2250
2251         /*
2252          * The peer may have discovery disabled at its end. Set
2253          * NO_DISCOVERY as appropriate.
2254          */
2255         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2256                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2257                        libcfs_nid2str(lp->lp_primary_nid));
2258                 /*
2259                  * Mark the peer for deletion if we already know about it
2260                  * and it's going from discovery set to no discovery set
2261                  */
2262                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2263                                       LNET_PEER_DISCOVERING)) &&
2264                      lp->lp_state & LNET_PEER_DISCOVERED) {
2265                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2266                                libcfs_nid2str(lp->lp_primary_nid),
2267                                lp->lp_state);
2268                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2269                 }
2270                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2271         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2272                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2273                        libcfs_nid2str(lp->lp_primary_nid));
2274                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2275         }
2276
2277         /*
2278          * Update the MULTI_RAIL flag based on the push. If the peer
2279          * was configured with DLC then the setting should match what
2280          * DLC put in.
2281          * NB: We verified above that the MR feature bit is set in pi_features
2282          */
2283         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2284                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2285                        libcfs_nid2str(lp->lp_primary_nid), lp);
2286         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2287                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2288                       libcfs_nid2str(lp->lp_primary_nid));
2289         } else if (lnet_peer_discovery_disabled) {
2290                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2291                        libcfs_nid2str(lp->lp_primary_nid), lp);
2292         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2293                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2294                        libcfs_nid2str(lp->lp_primary_nid), lp);
2295         } else {
2296                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2297                        libcfs_nid2str(lp->lp_primary_nid), lp);
2298                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2299                 lnet_peer_clr_non_mr_pref_nids(lp);
2300         }
2301
2302         /*
2303          * Check for truncation of the Put message. Clear the
2304          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2305          * and tell discovery to allocate a bigger buffer.
2306          */
2307         if (ev->mlength < ev->rlength) {
2308                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2309                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2310                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2311                 lp->lp_state |= LNET_PEER_FORCE_PING;
2312                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2313                        libcfs_nid2str(lp->lp_primary_nid),
2314                        pbuf->pb_info.pi_nnis);
2315                 goto out;
2316         }
2317
2318         /* always assume new data */
2319         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2320         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2321
2322         /*
2323          * If there is data present that hasn't been processed yet,
2324          * we'll replace it if the Put contained newer data and it
2325          * fits. We're racing with a Ping or earlier Push in this
2326          * case.
2327          */
2328         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2329                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2330                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2331                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2332                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2333                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2334                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2335                               libcfs_nid2str(lp->lp_primary_nid),
2336                               LNET_PING_BUFFER_SEQNO(pbuf),
2337                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2338                 }
2339                 goto out;
2340         }
2341
2342         /*
2343          * Allocate a buffer to copy the data. On a failure we drop
2344          * the Push and set FORCE_PING to force the discovery
2345          * thread to fix the problem by pinging the peer.
2346          */
2347         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2348         if (!lp->lp_data) {
2349                 lp->lp_state |= LNET_PEER_FORCE_PING;
2350                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2351                        libcfs_nid2str(lp->lp_primary_nid),
2352                        LNET_PING_BUFFER_SEQNO(pbuf));
2353                 goto out;
2354         }
2355
2356         /* Success */
2357         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2358                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2359         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2360         CDEBUG(D_NET, "Received Push %s %u\n",
2361                libcfs_nid2str(lp->lp_primary_nid),
2362                LNET_PING_BUFFER_SEQNO(pbuf));
2363
2364 out:
2365         /* We've processed this buffer. It can be reposted */
2366         pbuf->pb_needs_post = true;
2367
2368         /*
2369          * Queue the peer for discovery if not done, force it on the request
2370          * queue and wake the discovery thread if the peer was already queued,
2371          * because its status changed.
2372          */
2373         spin_unlock(&lp->lp_lock);
2374         lnet_net_lock(LNET_LOCK_EX);
2375         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2376                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2377                 wake_up(&the_lnet.ln_dc_waitq);
2378         }
2379         /* Drop refcount from lookup */
2380         lnet_peer_decref_locked(lp);
2381         lnet_net_unlock(LNET_LOCK_EX);
2382 }
2383
2384 /*
2385  * Clear the discovery error state, unless we're already discovering
2386  * this peer, in which case the error is current.
2387  */
2388 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2389 {
2390         spin_lock(&lp->lp_lock);
2391         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2392                 lp->lp_dc_error = 0;
2393         spin_unlock(&lp->lp_lock);
2394 }
2395
2396 /*
2397  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2398  * dropped/retaken within this function. An lnet_peer_ni is passed in
2399  * because discovery could tear down an lnet_peer.
2400  */
2401 int
2402 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2403 {
2404         DEFINE_WAIT(wait);
2405         struct lnet_peer *lp;
2406         int rc = 0;
2407         int count = 0;
2408
2409 again:
2410         lnet_net_unlock(cpt);
2411         lnet_net_lock(LNET_LOCK_EX);
2412         lp = lpni->lpni_peer_net->lpn_peer;
2413         lnet_peer_clear_discovery_error(lp);
2414
2415         /*
2416          * We're willing to be interrupted. The lpni can become a
2417          * zombie if we race with DLC, so we must check for that.
2418          */
2419         for (;;) {
2420                 /* Keep lp alive when the lnet_net_lock is unlocked */
2421                 lnet_peer_addref_locked(lp);
2422                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2423                 if (signal_pending(current))
2424                         break;
2425                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2426                         break;
2427                 /*
2428                  * Don't repeat discovery if discovery is disabled. This is
2429                  * done to ensure we can use discovery as a standard ping as
2430                  * well for backwards compatibility with routers which do not
2431                  * have discovery or have discovery disabled
2432                  */
2433                 if (lnet_is_discovery_disabled(lp) && count > 0)
2434                         break;
2435                 if (lp->lp_dc_error)
2436                         break;
2437                 if (lnet_peer_is_uptodate(lp))
2438                         break;
2439                 lnet_peer_queue_for_discovery(lp);
2440                 count++;
2441                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2442
2443                 /*
2444                  * If caller requested a non-blocking operation then
2445                  * return immediately. Once discovery is complete any
2446                  * pending messages that were stopped due to discovery
2447                  * will be transmitted.
2448                  */
2449                 if (!block)
2450                         break;
2451
2452                 lnet_net_unlock(LNET_LOCK_EX);
2453                 schedule();
2454                 finish_wait(&lp->lp_dc_waitq, &wait);
2455                 lnet_net_lock(LNET_LOCK_EX);
2456                 lnet_peer_decref_locked(lp);
2457                 /* Peer may have changed */
2458                 lp = lpni->lpni_peer_net->lpn_peer;
2459         }
2460         finish_wait(&lp->lp_dc_waitq, &wait);
2461
2462         lnet_net_unlock(LNET_LOCK_EX);
2463         lnet_net_lock(cpt);
2464         lnet_peer_decref_locked(lp);
2465         /*
2466          * The peer may have changed, so re-check and rediscover if that turns
2467          * out to have been the case. The reference count on lp ensured that
2468          * even if it was unlinked from lpni the memory could not be recycled.
2469          * Thus the check below is sufficient to determine whether the peer
2470          * changed. If the peer changed, then lp must not be dereferenced.
2471          */
2472         if (lp != lpni->lpni_peer_net->lpn_peer)
2473                 goto again;
2474
2475         if (signal_pending(current))
2476                 rc = -EINTR;
2477         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2478                 rc = -ESHUTDOWN;
2479         else if (lp->lp_dc_error)
2480                 rc = lp->lp_dc_error;
2481         else if (!block)
2482                 CDEBUG(D_NET, "non-blocking discovery\n");
2483         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2484                 goto again;
2485
2486         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2487                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2488                libcfs_nid2str(lpni->lpni_nid), rc,
2489                (!block) ? "pending discovery" : "discovery complete");
2490
2491         return rc;
2492 }
2493
2494 /* Handle an incoming ack for a push. */
2495 static void
2496 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2497 {
2498         struct lnet_ping_buffer *pbuf;
2499
2500         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2501         spin_lock(&lp->lp_lock);
2502         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2503         lp->lp_push_error = ev->status;
2504         if (ev->status)
2505                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2506         else
2507                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2508         spin_unlock(&lp->lp_lock);
2509
2510         CDEBUG(D_NET, "peer %s ev->status %d\n",
2511                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2512 }
2513
2514 /* Handle a Reply message. This is the reply to a Ping message. */
2515 static void
2516 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2517 {
2518         struct lnet_ping_buffer *pbuf;
2519         int rc;
2520
2521         spin_lock(&lp->lp_lock);
2522
2523         lp->lp_disc_src_nid = ev->target.nid;
2524
2525         /*
2526          * If some kind of error happened the contents of message
2527          * cannot be used. Set PING_FAILED to trigger a retry.
2528          */
2529         if (ev->status) {
2530                 lp->lp_state |= LNET_PEER_PING_FAILED;
2531                 lp->lp_ping_error = ev->status;
2532                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2533                        ev->status,
2534                        libcfs_nid2str(lp->lp_primary_nid),
2535                        libcfs_nid2str(ev->source.nid));
2536                 goto out;
2537         }
2538
2539         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2540         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2541                 lnet_swap_pinginfo(pbuf);
2542
2543         /*
2544          * A reply with invalid or corrupted info. Set PING_FAILED to
2545          * trigger a retry.
2546          */
2547         rc = lnet_ping_info_validate(&pbuf->pb_info);
2548         if (rc) {
2549                 lp->lp_state |= LNET_PEER_PING_FAILED;
2550                 lp->lp_ping_error = 0;
2551                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2552                        libcfs_nid2str(lp->lp_primary_nid), rc);
2553                 goto out;
2554         }
2555
2556
2557         /*
2558          * The peer may have discovery disabled at its end. Set
2559          * NO_DISCOVERY as appropriate.
2560          */
2561         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2562             !lnet_peer_discovery_disabled) {
2563                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2564                        libcfs_nid2str(lp->lp_primary_nid));
2565                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2566         } else {
2567                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2568                        libcfs_nid2str(lp->lp_primary_nid));
2569                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2570         }
2571
2572         /*
2573          * Update the MULTI_RAIL flag based on the reply. If the peer
2574          * was configured with DLC then the setting should match what
2575          * DLC put in.
2576          */
2577         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2578                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2579                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2580                                libcfs_nid2str(lp->lp_primary_nid), lp);
2581                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2582                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2583                               libcfs_nid2str(lp->lp_primary_nid));
2584                 } else if (lnet_peer_discovery_disabled) {
2585                         CDEBUG(D_NET,
2586                                "peer %s(%p) not MR: DD disabled locally\n",
2587                                libcfs_nid2str(lp->lp_primary_nid), lp);
2588                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2589                         CDEBUG(D_NET,
2590                                "peer %s(%p) not MR: DD disabled remotely\n",
2591                                libcfs_nid2str(lp->lp_primary_nid), lp);
2592                 } else {
2593                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2594                                libcfs_nid2str(lp->lp_primary_nid), lp);
2595                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2596                         lnet_peer_clr_non_mr_pref_nids(lp);
2597                 }
2598         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2599                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2600                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2601                               libcfs_nid2str(lp->lp_primary_nid));
2602                 } else {
2603                         CERROR("Multi-Rail state vanished from %s\n",
2604                                libcfs_nid2str(lp->lp_primary_nid));
2605                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2606                 }
2607         }
2608
2609         /*
2610          * Make sure we'll allocate the correct size ping buffer when
2611          * pinging the peer.
2612          */
2613         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2614                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2615
2616         /*
2617          * Check for truncation of the Reply. Clear PING_SENT and set
2618          * PING_FAILED to trigger a retry.
2619          */
2620         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2621                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2622                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2623                 lp->lp_state |= LNET_PEER_PING_FAILED;
2624                 lp->lp_ping_error = 0;
2625                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2626                        libcfs_nid2str(lp->lp_primary_nid),
2627                        pbuf->pb_info.pi_nnis);
2628                 goto out;
2629         }
2630
2631         /*
2632          * Check the sequence numbers in the reply. These are only
2633          * available if the reply came from a Multi-Rail peer.
2634          */
2635         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2636             pbuf->pb_info.pi_nnis > 1 &&
2637             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2638                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2639                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2640                                 libcfs_nid2str(lp->lp_primary_nid),
2641                                 LNET_PING_BUFFER_SEQNO(pbuf),
2642                                 lp->lp_peer_seqno);
2643
2644                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2645         }
2646
2647         /* We're happy with the state of the data in the buffer. */
2648         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2649                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2650         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2651                 lnet_ping_buffer_decref(lp->lp_data);
2652         else
2653                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2654         lnet_ping_buffer_addref(pbuf);
2655         lp->lp_data = pbuf;
2656 out:
2657         lp->lp_state &= ~LNET_PEER_PING_SENT;
2658         spin_unlock(&lp->lp_lock);
2659
2660         lnet_net_lock(LNET_LOCK_EX);
2661         /*
2662          * If this peer is a gateway, call the routing callback to
2663          * handle the ping reply
2664          */
2665         if (lp->lp_rtr_refcount > 0)
2666                 lnet_router_discovery_ping_reply(lp);
2667         lnet_net_unlock(LNET_LOCK_EX);
2668 }
2669
2670 /*
2671  * Send event handling. Only matters for error cases, where we clean
2672  * up state on the peer and peer_ni that would otherwise be updated in
2673  * the REPLY event handler for a successful Ping, and the ACK event
2674  * handler for a successful Push.
2675  */
2676 static int
2677 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2678 {
2679         int rc = 0;
2680
2681         if (!ev->status)
2682                 goto out;
2683
2684         spin_lock(&lp->lp_lock);
2685         if (ev->msg_type == LNET_MSG_GET) {
2686                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2687                 lp->lp_state |= LNET_PEER_PING_FAILED;
2688                 lp->lp_ping_error = ev->status;
2689         } else { /* ev->msg_type == LNET_MSG_PUT */
2690                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2691                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2692                 lp->lp_push_error = ev->status;
2693         }
2694         spin_unlock(&lp->lp_lock);
2695         rc = LNET_REDISCOVER_PEER;
2696 out:
2697         CDEBUG(D_NET, "%s Send to %s: %d\n",
2698                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2699                 libcfs_nid2str(ev->target.nid), rc);
2700         return rc;
2701 }
2702
2703 /*
2704  * Unlink event handling. This event is only seen if a call to
2705  * LNetMDUnlink() caused the event to be unlinked. If this call was
2706  * made after the event was set up in LNetGet() or LNetPut() then we
2707  * assume the Ping or Push timed out.
2708  */
2709 static void
2710 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2711 {
2712         spin_lock(&lp->lp_lock);
2713         /* We've passed through LNetGet() */
2714         if (lp->lp_state & LNET_PEER_PING_SENT) {
2715                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2716                 lp->lp_state |= LNET_PEER_PING_FAILED;
2717                 lp->lp_ping_error = -ETIMEDOUT;
2718                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2719                         libcfs_nid2str(lp->lp_primary_nid));
2720         }
2721         /* We've passed through LNetPut() */
2722         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2723                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2724                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2725                 lp->lp_push_error = -ETIMEDOUT;
2726                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2727                         libcfs_nid2str(lp->lp_primary_nid));
2728         }
2729         spin_unlock(&lp->lp_lock);
2730 }
2731
2732 /*
2733  * Event handler for the discovery EQ.
2734  *
2735  * Called with lnet_res_lock(cpt) held. The cpt is the
2736  * lnet_cpt_of_cookie() of the md handle cookie.
2737  */
2738 static void lnet_discovery_event_handler(struct lnet_event *event)
2739 {
2740         struct lnet_peer *lp = event->md_user_ptr;
2741         struct lnet_ping_buffer *pbuf;
2742         int rc;
2743
2744         /* discovery needs to take another look */
2745         rc = LNET_REDISCOVER_PEER;
2746
2747         CDEBUG(D_NET, "Received event: %d\n", event->type);
2748
2749         switch (event->type) {
2750         case LNET_EVENT_ACK:
2751                 lnet_discovery_event_ack(lp, event);
2752                 break;
2753         case LNET_EVENT_REPLY:
2754                 lnet_discovery_event_reply(lp, event);
2755                 break;
2756         case LNET_EVENT_SEND:
2757                 /* Only send failure triggers a retry. */
2758                 rc = lnet_discovery_event_send(lp, event);
2759                 break;
2760         case LNET_EVENT_UNLINK:
2761                 /* LNetMDUnlink() was called */
2762                 lnet_discovery_event_unlink(lp, event);
2763                 break;
2764         default:
2765                 /* Invalid events. */
2766                 LBUG();
2767         }
2768         lnet_net_lock(LNET_LOCK_EX);
2769         if (event->unlinked) {
2770                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2771                 lnet_ping_buffer_decref(pbuf);
2772                 lnet_peer_decref_locked(lp);
2773         }
2774
2775         /* put peer back at end of request queue, if discovery not already
2776          * done */
2777         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2778                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2779                 wake_up(&the_lnet.ln_dc_waitq);
2780         }
2781         lnet_net_unlock(LNET_LOCK_EX);
2782 }
2783
2784 /*
2785  * Build a peer from incoming data.
2786  *
2787  * The NIDs in the incoming data are supposed to be structured as follows:
2788  *  - loopback
2789  *  - primary NID
2790  *  - other NIDs in same net
2791  *  - NIDs in second net
2792  *  - NIDs in third net
2793  *  - ...
2794  * This due to the way the list of NIDs in the data is created.
2795  *
2796  * Note that this function will mark the peer uptodate unless an
2797  * ENOMEM is encontered. All other errors are due to a conflict
2798  * between the DLC configuration and what discovery sees. We treat DLC
2799  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2800  * peer from becoming stuck in discovery.
2801  */
2802 static int lnet_peer_merge_data(struct lnet_peer *lp,
2803                                 struct lnet_ping_buffer *pbuf)
2804 {
2805         struct lnet_peer_net *lpn;
2806         struct lnet_peer_ni *lpni;
2807         lnet_nid_t *curnis = NULL;
2808         struct lnet_ni_status *addnis = NULL;
2809         lnet_nid_t *delnis = NULL;
2810         unsigned flags;
2811         int ncurnis;
2812         int naddnis;
2813         int ndelnis;
2814         int nnis = 0;
2815         int i;
2816         int j;
2817         int rc;
2818
2819         flags = LNET_PEER_DISCOVERED;
2820         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2821                 flags |= LNET_PEER_MULTI_RAIL;
2822
2823         /*
2824          * Cache the routing feature for the peer; whether it is enabled
2825          * for disabled as reported by the remote peer.
2826          */
2827         spin_lock(&lp->lp_lock);
2828         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2829                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2830         else
2831                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2832         spin_unlock(&lp->lp_lock);
2833
2834         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2835         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2836         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2837         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2838         if (!curnis || !addnis || !delnis) {
2839                 rc = -ENOMEM;
2840                 goto out;
2841         }
2842         ncurnis = 0;
2843         naddnis = 0;
2844         ndelnis = 0;
2845
2846         /* Construct the list of NIDs present in peer. */
2847         lpni = NULL;
2848         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2849                 curnis[ncurnis++] = lpni->lpni_nid;
2850
2851         /*
2852          * Check for NIDs in pbuf not present in curnis[].
2853          * The loop starts at 1 to skip the loopback NID.
2854          */
2855         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2856                 for (j = 0; j < ncurnis; j++)
2857                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2858                                 break;
2859                 if (j == ncurnis)
2860                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2861         }
2862         /*
2863          * Check for NIDs in curnis[] not present in pbuf.
2864          * The nested loop starts at 1 to skip the loopback NID.
2865          *
2866          * But never add the loopback NID to delnis[]: if it is
2867          * present in curnis[] then this peer is for this node.
2868          */
2869         for (i = 0; i < ncurnis; i++) {
2870                 if (curnis[i] == LNET_NID_LO_0)
2871                         continue;
2872                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2873                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2874                                 /*
2875                                  * update the information we cache for the
2876                                  * peer with the latest information we
2877                                  * received
2878                                  */
2879                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
2880                                 if (lpni) {
2881                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2882                                         lnet_peer_ni_decref_locked(lpni);
2883                                 }
2884                                 break;
2885                         }
2886                 }
2887                 if (j == pbuf->pb_info.pi_nnis)
2888                         delnis[ndelnis++] = curnis[i];
2889         }
2890
2891         /*
2892          * If we get here and the discovery is disabled then we don't want
2893          * to add or delete any NIs. We just updated the ones we have some
2894          * information on, and call it a day
2895          */
2896         rc = 0;
2897         if (lnet_is_discovery_disabled(lp))
2898                 goto out;
2899
2900         for (i = 0; i < naddnis; i++) {
2901                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2902                 if (rc) {
2903                         CERROR("Error adding NID %s to peer %s: %d\n",
2904                                libcfs_nid2str(addnis[i].ns_nid),
2905                                libcfs_nid2str(lp->lp_primary_nid), rc);
2906                         if (rc == -ENOMEM)
2907                                 goto out;
2908                 }
2909                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2910                 if (lpni) {
2911                         lpni->lpni_ns_status = addnis[i].ns_status;
2912                         lnet_peer_ni_decref_locked(lpni);
2913                 }
2914         }
2915
2916         for (i = 0; i < ndelnis; i++) {
2917                 /*
2918                  * for routers it's okay to delete the primary_nid because
2919                  * the upper layers don't really rely on it. So if we're
2920                  * being told that the router changed its primary_nid
2921                  * then it's okay to delete it.
2922                  */
2923                 if (lp->lp_rtr_refcount > 0)
2924                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2925                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2926                 if (rc) {
2927                         CERROR("Error deleting NID %s from peer %s: %d\n",
2928                                libcfs_nid2str(delnis[i]),
2929                                libcfs_nid2str(lp->lp_primary_nid), rc);
2930                         if (rc == -ENOMEM)
2931                                 goto out;
2932                 }
2933         }
2934
2935         /* The peer net for the primary NID should be the first entry in the
2936          * peer's lp_peer_nets list, and the peer NI for the primary NID should
2937          * be the first entry in its peer net's lpn_peer_nis list.
2938          */
2939         lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
2940         if (!lpni) {
2941                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
2942                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
2943                 goto out;
2944         }
2945
2946         lnet_peer_ni_decref_locked(lpni);
2947
2948         lpn = lpni->lpni_peer_net;
2949         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
2950                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
2951
2952         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
2953                 list_move(&lpni->lpni_peer_nis,
2954                           &lpni->lpni_peer_net->lpn_peer_nis);
2955
2956         /*
2957          * Errors other than -ENOMEM are due to peers having been
2958          * configured with DLC. Ignore these because DLC overrides
2959          * Discovery.
2960          */
2961         rc = 0;
2962 out:
2963         CFS_FREE_PTR_ARRAY(curnis, nnis);
2964         CFS_FREE_PTR_ARRAY(addnis, nnis);
2965         CFS_FREE_PTR_ARRAY(delnis, nnis);
2966         lnet_ping_buffer_decref(pbuf);
2967         CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2968
2969         if (rc) {
2970                 spin_lock(&lp->lp_lock);
2971                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2972                 lp->lp_state |= LNET_PEER_FORCE_PING;
2973                 spin_unlock(&lp->lp_lock);
2974         }
2975         return rc;
2976 }
2977
2978 /*
2979  * The data in pbuf says lp is its primary peer, but the data was
2980  * received by a different peer. Try to update lp with the data.
2981  */
2982 static int
2983 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2984 {
2985         struct lnet_handle_md mdh;
2986
2987         /* Queue lp for discovery, and force it on the request queue. */
2988         lnet_net_lock(LNET_LOCK_EX);
2989         if (lnet_peer_queue_for_discovery(lp))
2990                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2991         lnet_net_unlock(LNET_LOCK_EX);
2992
2993         LNetInvalidateMDHandle(&mdh);
2994
2995         /*
2996          * Decide whether we can move the peer to the DATA_PRESENT state.
2997          *
2998          * We replace stale data for a multi-rail peer, repair PING_FAILED
2999          * status, and preempt FORCE_PING.
3000          *
3001          * If after that we have DATA_PRESENT, we merge it into this peer.
3002          */
3003         spin_lock(&lp->lp_lock);
3004         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3005                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3006                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3007                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3008                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3009                         lnet_ping_buffer_decref(pbuf);
3010                         pbuf = lp->lp_data;
3011                         lp->lp_data = NULL;
3012                 }
3013         }
3014         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3015                 lnet_ping_buffer_decref(lp->lp_data);
3016                 lp->lp_data = NULL;
3017                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3018         }
3019         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3020                 mdh = lp->lp_ping_mdh;
3021                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3022                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3023                 lp->lp_ping_error = 0;
3024         }
3025         if (lp->lp_state & LNET_PEER_FORCE_PING)
3026                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3027         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3028         spin_unlock(&lp->lp_lock);
3029
3030         if (!LNetMDHandleIsInvalid(mdh))
3031                 LNetMDUnlink(mdh);
3032
3033         if (pbuf)
3034                 return lnet_peer_merge_data(lp, pbuf);
3035
3036         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3037         return 0;
3038 }
3039
3040 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3041 {
3042         int i;
3043
3044         for (i = 0; i < pinfo->pi_nnis; i++) {
3045                 if (pinfo->pi_ni[i].ns_nid == nid)
3046                         return true;
3047         }
3048
3049         return false;
3050 }
3051
3052 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3053  * to the discovery queue a reference was taken that will prevent the peer from
3054  * actually being freed by this function. After this function exits the
3055  * discovery thread should call lnet_peer_discovery_complete() which will
3056  * drop that reference as well as wake any waiters that may also be holding a
3057  * ref on the peer
3058  */
3059 static int lnet_peer_deletion(struct lnet_peer *lp)
3060 __must_hold(&lp->lp_lock)
3061 {
3062         struct list_head rlist;
3063         struct lnet_route *route, *tmp;
3064         int sensitivity = lp->lp_health_sensitivity;
3065
3066         INIT_LIST_HEAD(&rlist);
3067
3068         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3069                           LNET_PEER_FORCE_PUSH);
3070         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3071                libcfs_nid2str(lp->lp_primary_nid), lp, lp->lp_state);
3072
3073         /* no-op if lnet_peer_del() has already been called on this peer */
3074         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3075                 return 0;
3076
3077         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3078                 return -ESHUTDOWN;
3079
3080         spin_unlock(&lp->lp_lock);
3081
3082         mutex_lock(&the_lnet.ln_api_mutex);
3083
3084         lnet_net_lock(LNET_LOCK_EX);
3085         /* remove the peer from the discovery work
3086          * queue if it's on there in preparation
3087          * of deleting it.
3088          */
3089         if (!list_empty(&lp->lp_dc_list))
3090                 list_del(&lp->lp_dc_list);
3091         list_for_each_entry_safe(route, tmp,
3092                                  &lp->lp_routes,
3093                                  lr_gwlist)
3094                 lnet_move_route(route, NULL, &rlist);
3095         lnet_net_unlock(LNET_LOCK_EX);
3096
3097         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3098         lnet_peer_del(lp);
3099
3100         list_for_each_entry_safe(route, tmp,
3101                                  &rlist, lr_list) {
3102                 /* re-add these routes */
3103                 lnet_add_route(route->lr_net,
3104                                route->lr_hops,
3105                                route->lr_nid,
3106                                route->lr_priority,
3107                                sensitivity);
3108                 LIBCFS_FREE(route, sizeof(*route));
3109         }
3110
3111         mutex_unlock(&the_lnet.ln_api_mutex);
3112
3113         spin_lock(&lp->lp_lock);
3114
3115         return 0;
3116 }
3117
3118 /*
3119  * Update a peer using the data received.
3120  */
3121 static int lnet_peer_data_present(struct lnet_peer *lp)
3122 __must_hold(&lp->lp_lock)
3123 {
3124         struct lnet_ping_buffer *pbuf;
3125         struct lnet_peer_ni *lpni;
3126         lnet_nid_t nid = LNET_NID_ANY;
3127         unsigned flags;
3128         int rc = 0;
3129
3130         pbuf = lp->lp_data;
3131         lp->lp_data = NULL;
3132         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3133         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3134         spin_unlock(&lp->lp_lock);
3135
3136         /*
3137          * Modifications of peer structures are done while holding the
3138          * ln_api_mutex. A global lock is required because we may be
3139          * modifying multiple peer structures, and a mutex greatly
3140          * simplifies memory management.
3141          *
3142          * The actual changes to the data structures must also protect
3143          * against concurrent lookups, for which the lnet_net_lock in
3144          * LNET_LOCK_EX mode is used.
3145          */
3146         mutex_lock(&the_lnet.ln_api_mutex);
3147         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3148                 rc = -ESHUTDOWN;
3149                 goto out;
3150         }
3151
3152         /*
3153          * If this peer is not on the peer list then it is being torn
3154          * down, and our reference count may be all that is keeping it
3155          * alive. Don't do any work on it.
3156          */
3157         if (list_empty(&lp->lp_peer_list))
3158                 goto out;
3159
3160         flags = LNET_PEER_DISCOVERED;
3161         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3162                 flags |= LNET_PEER_MULTI_RAIL;
3163
3164         /*
3165          * Check whether the primary NID in the message matches the
3166          * primary NID of the peer. If it does, update the peer, if
3167          * it it does not, check whether there is already a peer with
3168          * that primary NID. If no such peer exists, try to update
3169          * the primary NID of the current peer (allowed if it was
3170          * created due to message traffic) and complete the update.
3171          * If the peer did exist, hand off the data to it.
3172          *
3173          * The peer for the loopback interface is a special case: this
3174          * is the peer for the local node, and we want to set its
3175          * primary NID to the correct value here. Moreover, this peer
3176          * can show up with only the loopback NID in the ping buffer.
3177          */
3178         if (pbuf->pb_info.pi_nnis <= 1)
3179                 goto out;
3180         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3181         if (lp->lp_primary_nid == LNET_NID_LO_0) {
3182                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3183                 if (!rc)
3184                         rc = lnet_peer_merge_data(lp, pbuf);
3185         /*
3186          * if the primary nid of the peer is present in the ping info returned
3187          * from the peer, but it's not the local primary peer we have
3188          * cached and discovery is disabled, then we don't want to update
3189          * our local peer info, by adding or removing NIDs, we just want
3190          * to update the status of the nids that we currently have
3191          * recorded in that peer.
3192          */
3193         } else if (lp->lp_primary_nid == nid ||
3194                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3195                     lnet_is_discovery_disabled(lp))) {
3196                 rc = lnet_peer_merge_data(lp, pbuf);
3197         } else {
3198                 lpni = lnet_find_peer_ni_locked(nid);
3199                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3200                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3201                         if (rc) {
3202                                 CERROR("Primary NID error %s versus %s: %d\n",
3203                                        libcfs_nid2str(lp->lp_primary_nid),
3204                                        libcfs_nid2str(nid), rc);
3205                         } else {
3206                                 rc = lnet_peer_merge_data(lp, pbuf);
3207                         }
3208                         if (lpni)
3209                                 lnet_peer_ni_decref_locked(lpni);
3210                 } else {
3211                         struct lnet_peer *new_lp;
3212                         new_lp = lpni->lpni_peer_net->lpn_peer;
3213                         /*
3214                          * if lp has discovery/MR enabled that means new_lp
3215                          * should have discovery/MR enabled as well, since
3216                          * it's the same peer, which we're about to merge
3217                          */
3218                         spin_lock(&lp->lp_lock);
3219                         spin_lock(&new_lp->lp_lock);
3220                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3221                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3222                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3223                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3224                         /* If we're processing a ping reply then we may be
3225                          * about to send a push to the peer that we ping'd.
3226                          * Since the ping reply that we're processing was
3227                          * received by lp, we need to set the discovery source
3228                          * NID for new_lp to the NID stored in lp.
3229                          */
3230                         if (lp->lp_disc_src_nid != LNET_NID_ANY)
3231                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3232                         spin_unlock(&new_lp->lp_lock);
3233                         spin_unlock(&lp->lp_lock);
3234
3235                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3236                         lnet_consolidate_routes_locked(lp, new_lp);
3237                         lnet_peer_ni_decref_locked(lpni);
3238                 }
3239         }
3240 out:
3241         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3242                lp->lp_state);
3243         mutex_unlock(&the_lnet.ln_api_mutex);
3244
3245         spin_lock(&lp->lp_lock);
3246         /* Tell discovery to re-check the peer immediately. */
3247         if (!rc)
3248                 rc = LNET_REDISCOVER_PEER;
3249         return rc;
3250 }
3251
3252 /*
3253  * A ping failed. Clear the PING_FAILED state and set the
3254  * FORCE_PING state, to ensure a retry even if discovery is
3255  * disabled. This avoids being left with incorrect state.
3256  */
3257 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3258 __must_hold(&lp->lp_lock)
3259 {
3260         struct lnet_handle_md mdh;
3261         int rc;
3262
3263         mdh = lp->lp_ping_mdh;
3264         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3265         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3266         lp->lp_state |= LNET_PEER_FORCE_PING;
3267         rc = lp->lp_ping_error;
3268         lp->lp_ping_error = 0;
3269         spin_unlock(&lp->lp_lock);
3270
3271         if (!LNetMDHandleIsInvalid(mdh))
3272                 LNetMDUnlink(mdh);
3273
3274         CDEBUG(D_NET, "peer %s:%d\n",
3275                libcfs_nid2str(lp->lp_primary_nid), rc);
3276
3277         spin_lock(&lp->lp_lock);
3278         return rc ? rc : LNET_REDISCOVER_PEER;
3279 }
3280
3281 /*
3282  * Select NID to send a Ping or Push to.
3283  */
3284 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
3285 {
3286         struct lnet_peer_ni *lpni;
3287
3288         /* Look for a direct-connected NID for this peer. */
3289         lpni = NULL;
3290         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3291                 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
3292                         continue;
3293                 break;
3294         }
3295         if (lpni)
3296                 return lpni->lpni_nid;
3297
3298         /* Look for a routed-connected NID for this peer. */
3299         lpni = NULL;
3300         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3301                 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
3302                         continue;
3303                 break;
3304         }
3305         if (lpni)
3306                 return lpni->lpni_nid;
3307
3308         return LNET_NID_ANY;
3309 }
3310
3311 /* Active side of ping. */
3312 static int lnet_peer_send_ping(struct lnet_peer *lp)
3313 __must_hold(&lp->lp_lock)
3314 {
3315         lnet_nid_t pnid;
3316         int nnis;
3317         int rc;
3318         int cpt;
3319
3320         lp->lp_state |= LNET_PEER_PING_SENT;
3321         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3322         spin_unlock(&lp->lp_lock);
3323
3324         cpt = lnet_net_lock_current();
3325         /* Refcount for MD. */
3326         lnet_peer_addref_locked(lp);
3327         pnid = lnet_peer_select_nid(lp);
3328         lnet_net_unlock(cpt);
3329
3330         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3331
3332         rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
3333                             the_lnet.ln_dc_handler, false);
3334
3335         /*
3336          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3337          * refcount on the peer, otherwise LNetMDUnlink will be called
3338          * which will eventually do that.
3339          */
3340         if (rc > 0) {
3341                 lnet_net_lock(cpt);
3342                 lnet_peer_decref_locked(lp);
3343                 lnet_net_unlock(cpt);
3344                 rc = -rc; /* change the rc to negative value */
3345                 goto fail_error;
3346         } else if (rc < 0) {
3347                 goto fail_error;
3348         }
3349
3350         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3351
3352         spin_lock(&lp->lp_lock);
3353         return 0;
3354
3355 fail_error:
3356         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3357         /*
3358          * The errors that get us here are considered hard errors and
3359          * cause Discovery to terminate. So we clear PING_SENT, but do
3360          * not set either PING_FAILED or FORCE_PING. In fact we need
3361          * to clear PING_FAILED, because the unlink event handler will
3362          * have set it if we called LNetMDUnlink() above.
3363          */
3364         spin_lock(&lp->lp_lock);
3365         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3366         return rc;
3367 }
3368
3369 /*
3370  * This function exists because you cannot call LNetMDUnlink() from an
3371  * event handler.
3372  */
3373 static int lnet_peer_push_failed(struct lnet_peer *lp)
3374 __must_hold(&lp->lp_lock)
3375 {
3376         struct lnet_handle_md mdh;
3377         int rc;
3378
3379         mdh = lp->lp_push_mdh;
3380         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3381         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3382         rc = lp->lp_push_error;
3383         lp->lp_push_error = 0;
3384         spin_unlock(&lp->lp_lock);
3385
3386         if (!LNetMDHandleIsInvalid(mdh))
3387                 LNetMDUnlink(mdh);
3388
3389         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3390         spin_lock(&lp->lp_lock);
3391         return rc ? rc : LNET_REDISCOVER_PEER;
3392 }
3393
3394 /*
3395  * Mark the peer as discovered.
3396  */
3397 static int lnet_peer_discovered(struct lnet_peer *lp)
3398 __must_hold(&lp->lp_lock)
3399 {
3400         lp->lp_state |= LNET_PEER_DISCOVERED;
3401         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3402                           LNET_PEER_REDISCOVER);
3403
3404         lp->lp_dc_error = 0;
3405
3406         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3407
3408         return 0;
3409 }
3410
3411 /* Active side of push. */
3412 static int lnet_peer_send_push(struct lnet_peer *lp)
3413 __must_hold(&lp->lp_lock)
3414 {
3415         struct lnet_ping_buffer *pbuf;
3416         struct lnet_process_id id;
3417         struct lnet_md md;
3418         int cpt;
3419         int rc;
3420
3421         /* Don't push to a non-multi-rail peer. */
3422         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3423                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3424                 /* if peer's NIDs are uptodate then peer is discovered */
3425                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3426                         rc = lnet_peer_discovered(lp);
3427                         return rc;
3428                 }
3429
3430                 return 0;
3431         }
3432
3433         lp->lp_state |= LNET_PEER_PUSH_SENT;
3434         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3435         spin_unlock(&lp->lp_lock);
3436
3437         cpt = lnet_net_lock_current();
3438         pbuf = the_lnet.ln_ping_target;
3439         lnet_ping_buffer_addref(pbuf);
3440         lnet_net_unlock(cpt);
3441
3442         /* Push source MD */
3443         md.start     = &pbuf->pb_info;
3444         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3445         md.threshold = 2; /* Put/Ack */
3446         md.max_size  = 0;
3447         md.options   = LNET_MD_TRACK_RESPONSE;
3448         md.handler   = the_lnet.ln_dc_handler;
3449         md.user_ptr  = lp;
3450
3451         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3452         if (rc) {
3453                 lnet_ping_buffer_decref(pbuf);
3454                 CERROR("Can't bind push source MD: %d\n", rc);
3455                 goto fail_error;
3456         }
3457         cpt = lnet_net_lock_current();
3458         /* Refcount for MD. */
3459         lnet_peer_addref_locked(lp);
3460         id.pid = LNET_PID_LUSTRE;
3461         id.nid = lnet_peer_select_nid(lp);
3462         lnet_net_unlock(cpt);
3463
3464         if (id.nid == LNET_NID_ANY) {
3465                 rc = -EHOSTUNREACH;
3466                 goto fail_unlink;
3467         }
3468
3469         rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3470                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3471                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3472
3473         /*
3474          * reset the discovery nid. There is no need to restrict sending
3475          * from that source, if we call lnet_push_update_to_peers(). It'll
3476          * get set to a specific NID, if we initiate discovery from the
3477          * scratch
3478          */
3479         lp->lp_disc_src_nid = LNET_NID_ANY;
3480
3481         if (rc)
3482                 goto fail_unlink;
3483
3484         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3485
3486         spin_lock(&lp->lp_lock);
3487         return 0;
3488
3489 fail_unlink:
3490         LNetMDUnlink(lp->lp_push_mdh);
3491         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3492 fail_error:
3493         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3494         /*
3495          * The errors that get us here are considered hard errors and
3496          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3497          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3498          * because the unlink event handler will have set it if we
3499          * called LNetMDUnlink() above.
3500          */
3501         spin_lock(&lp->lp_lock);
3502         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3503         return rc;
3504 }
3505
3506 /*
3507  * An unrecoverable error was encountered during discovery.
3508  * Set error status in peer and abort discovery.
3509  */
3510 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3511 {
3512         CDEBUG(D_NET, "Discovery error %s: %d\n",
3513                libcfs_nid2str(lp->lp_primary_nid), error);
3514
3515         spin_lock(&lp->lp_lock);
3516         lp->lp_dc_error = error;
3517         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3518         lp->lp_state |= LNET_PEER_REDISCOVER;
3519         spin_unlock(&lp->lp_lock);
3520 }
3521
3522 /*
3523  * Wait for work to be queued or some other change that must be
3524  * attended to. Returns non-zero if the discovery thread should shut
3525  * down.
3526  */
3527 static int lnet_peer_discovery_wait_for_work(void)
3528 {
3529         int cpt;
3530         int rc = 0;
3531
3532         DEFINE_WAIT(wait);
3533
3534         cpt = lnet_net_lock_current();
3535         for (;;) {
3536                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3537                                 TASK_INTERRUPTIBLE);
3538                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3539                         break;
3540                 if (lnet_push_target_resize_needed() ||
3541                     the_lnet.ln_push_target->pb_needs_post)
3542                         break;
3543                 if (!list_empty(&the_lnet.ln_dc_request))
3544                         break;
3545                 if (!list_empty(&the_lnet.ln_msg_resend))
3546                         break;
3547                 lnet_net_unlock(cpt);
3548
3549                 /*
3550                  * wakeup max every second to check if there are peers that
3551                  * have been stuck on the working queue for greater than
3552                  * the peer timeout.
3553                  */
3554                 schedule_timeout(cfs_time_seconds(1));
3555                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3556                 cpt = lnet_net_lock_current();
3557         }
3558         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3559
3560         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3561                 rc = -ESHUTDOWN;
3562
3563         lnet_net_unlock(cpt);
3564
3565         CDEBUG(D_NET, "woken: %d\n", rc);
3566
3567         return rc;
3568 }
3569
3570 /*
3571  * Messages that were pending on a destroyed peer will be put on a global
3572  * resend list. The message resend list will be checked by
3573  * the discovery thread when it wakes up, and will resend messages. These
3574  * messages can still be sendable in the case the lpni which was the initial
3575  * cause of the message re-queue was transfered to another peer.
3576  *
3577  * It is possible that LNet could be shutdown while we're iterating
3578  * through the list. lnet_shudown_lndnets() will attempt to access the
3579  * resend list, but will have to wait until the spinlock is released, by
3580  * which time there shouldn't be any more messages on the resend list.
3581  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3582  * for the messages so they can be released. The other case is that
3583  * lnet_shudown_lndnets() can finalize all the messages before this
3584  * function can visit the resend list, in which case this function will be
3585  * a no-op.
3586  */
3587 static void lnet_resend_msgs(void)
3588 {
3589         struct lnet_msg *msg, *tmp;
3590         LIST_HEAD(resend);
3591         int rc;
3592
3593         spin_lock(&the_lnet.ln_msg_resend_lock);
3594         list_splice(&the_lnet.ln_msg_resend, &resend);
3595         spin_unlock(&the_lnet.ln_msg_resend_lock);
3596
3597         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3598                 list_del_init(&msg->msg_list);
3599                 rc = lnet_send(msg->msg_src_nid_param, msg,
3600                                msg->msg_rtr_nid_param);
3601                 if (rc < 0) {
3602                         CNETERR("Error sending %s to %s: %d\n",
3603                                lnet_msgtyp2str(msg->msg_type),
3604                                libcfs_id2str(msg->msg_target), rc);
3605                         lnet_finalize(msg, rc);
3606                 }
3607         }
3608 }
3609
3610 /* The discovery thread. */
3611 static int lnet_peer_discovery(void *arg)
3612 {
3613         struct lnet_peer *lp;
3614         int rc;
3615
3616         wait_for_completion(&the_lnet.ln_started);
3617
3618         CDEBUG(D_NET, "started\n");
3619
3620         for (;;) {
3621                 if (lnet_peer_discovery_wait_for_work())
3622                         break;
3623
3624                 if (lnet_push_target_resize_needed())
3625                         lnet_push_target_resize();
3626                 else if (the_lnet.ln_push_target->pb_needs_post)
3627                         lnet_push_target_post(the_lnet.ln_push_target,
3628                                               &the_lnet.ln_push_target_md);
3629
3630                 lnet_resend_msgs();
3631
3632                 lnet_net_lock(LNET_LOCK_EX);
3633                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3634                         lnet_net_unlock(LNET_LOCK_EX);
3635                         break;
3636                 }
3637
3638                 /*
3639                  * Process all incoming discovery work requests.  When
3640                  * discovery must wait on a peer to change state, it
3641                  * is added to the tail of the ln_dc_working queue. A
3642                  * timestamp keeps track of when the peer was added,
3643                  * so we can time out discovery requests that take too
3644                  * long.
3645                  */
3646                 while (!list_empty(&the_lnet.ln_dc_request)) {
3647                         lp = list_first_entry(&the_lnet.ln_dc_request,
3648                                               struct lnet_peer, lp_dc_list);
3649                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3650                         /*
3651                          * set the time the peer was put on the dc_working
3652                          * queue. It shouldn't remain on the queue
3653                          * forever, in case the GET message (for ping)
3654                          * doesn't get a REPLY or the PUT message (for
3655                          * push) doesn't get an ACK.
3656                          */
3657                         lp->lp_last_queued = ktime_get_real_seconds();
3658                         lnet_net_unlock(LNET_LOCK_EX);
3659
3660                         if (lnet_push_target_resize_needed())
3661                                 lnet_push_target_resize();
3662                         else if (the_lnet.ln_push_target->pb_needs_post)
3663                                 lnet_push_target_post(the_lnet.ln_push_target,
3664                                                       &the_lnet.ln_push_target_md);
3665
3666                         /*
3667                          * Select an action depending on the state of
3668                          * the peer and whether discovery is disabled.
3669                          * The check whether discovery is disabled is
3670                          * done after the code that handles processing
3671                          * for arrived data, cleanup for failures, and
3672                          * forcing a Ping or Push.
3673                          */
3674                         spin_lock(&lp->lp_lock);
3675                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3676                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3677                                 lp->lp_state);
3678                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3679                                             LNET_PEER_MARK_DELETED))
3680                                 rc = lnet_peer_deletion(lp);
3681                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3682                                 rc = lnet_peer_data_present(lp);
3683                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3684                                 rc = lnet_peer_ping_failed(lp);
3685                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3686                                 rc = lnet_peer_push_failed(lp);
3687                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3688                                 rc = lnet_peer_send_ping(lp);
3689                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3690                                 rc = lnet_peer_send_push(lp);
3691                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3692                                 rc = lnet_peer_send_ping(lp);
3693                         else if (lnet_peer_needs_push(lp))
3694                                 rc = lnet_peer_send_push(lp);
3695                         else
3696                                 rc = lnet_peer_discovered(lp);
3697                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3698                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3699                                 lp->lp_state, rc);
3700                         spin_unlock(&lp->lp_lock);
3701
3702                         lnet_net_lock(LNET_LOCK_EX);
3703                         if (rc == LNET_REDISCOVER_PEER) {
3704                                 list_move(&lp->lp_dc_list,
3705                                           &the_lnet.ln_dc_request);
3706                         } else if (rc) {
3707                                 lnet_peer_discovery_error(lp, rc);
3708                         }
3709                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3710                                 lnet_peer_discovery_complete(lp);
3711                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3712                                 break;
3713
3714                 }
3715
3716                 lnet_net_unlock(LNET_LOCK_EX);
3717         }
3718
3719         CDEBUG(D_NET, "stopping\n");
3720         /*
3721          * Clean up before telling lnet_peer_discovery_stop() that
3722          * we're done. Use wake_up() below to somewhat reduce the
3723          * size of the thundering herd if there are multiple threads
3724          * waiting on discovery of a single peer.
3725          */
3726
3727         /* Queue cleanup 1: stop all pending pings and pushes. */
3728         lnet_net_lock(LNET_LOCK_EX);
3729         while (!list_empty(&the_lnet.ln_dc_working)) {
3730                 lp = list_first_entry(&the_lnet.ln_dc_working,
3731                                       struct lnet_peer, lp_dc_list);
3732                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3733                 lnet_net_unlock(LNET_LOCK_EX);
3734                 lnet_peer_cancel_discovery(lp);
3735                 lnet_net_lock(LNET_LOCK_EX);
3736         }
3737         lnet_net_unlock(LNET_LOCK_EX);
3738
3739         /* Queue cleanup 2: wait for the expired queue to clear. */
3740         while (!list_empty(&the_lnet.ln_dc_expired))
3741                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3742
3743         /* Queue cleanup 3: clear the request queue. */
3744         lnet_net_lock(LNET_LOCK_EX);
3745         while (!list_empty(&the_lnet.ln_dc_request)) {
3746                 lp = list_first_entry(&the_lnet.ln_dc_request,
3747                                       struct lnet_peer, lp_dc_list);
3748                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3749                 lnet_peer_discovery_complete(lp);
3750         }
3751         lnet_net_unlock(LNET_LOCK_EX);
3752
3753         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3754         the_lnet.ln_dc_handler = NULL;
3755
3756         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3757         wake_up(&the_lnet.ln_dc_waitq);
3758
3759         CDEBUG(D_NET, "stopped\n");
3760
3761         return 0;
3762 }
3763
3764 /* ln_api_mutex is held on entry. */
3765 int lnet_peer_discovery_start(void)
3766 {
3767         struct task_struct *task;
3768         int rc = 0;
3769
3770         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3771                 return -EALREADY;
3772
3773         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3774         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3775         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3776         if (IS_ERR(task)) {
3777                 rc = PTR_ERR(task);
3778                 CERROR("Can't start peer discovery thread: %d\n", rc);
3779
3780                 the_lnet.ln_dc_handler = NULL;
3781
3782                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3783         }
3784
3785         CDEBUG(D_NET, "discovery start: %d\n", rc);
3786
3787         return rc;
3788 }
3789
3790 /* ln_api_mutex is held on entry. */
3791 void lnet_peer_discovery_stop(void)
3792 {
3793         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3794                 return;
3795
3796         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3797         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3798
3799         /* In the LNetNIInit() path we may be stopping discovery before it
3800          * entered its work loop
3801          */
3802         if (!completion_done(&the_lnet.ln_started))
3803                 complete(&the_lnet.ln_started);
3804         else
3805                 wake_up(&the_lnet.ln_dc_waitq);
3806
3807         wait_event(the_lnet.ln_dc_waitq,
3808                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3809
3810         LASSERT(list_empty(&the_lnet.ln_dc_request));
3811         LASSERT(list_empty(&the_lnet.ln_dc_working));
3812         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3813
3814         CDEBUG(D_NET, "discovery stopped\n");
3815 }
3816
3817 /* Debugging */
3818
3819 void
3820 lnet_debug_peer(lnet_nid_t nid)
3821 {
3822         char                    *aliveness = "NA";
3823         struct lnet_peer_ni     *lp;
3824         int                     cpt;
3825
3826         cpt = lnet_cpt_of_nid(nid, NULL);
3827         lnet_net_lock(cpt);
3828
3829         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3830         if (IS_ERR(lp)) {
3831                 lnet_net_unlock(cpt);
3832                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3833                 return;
3834         }
3835
3836         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3837                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3838
3839         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3840                libcfs_nid2str(lp->lpni_nid), kref_read(&lp->lpni_kref),
3841                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3842                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3843                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3844
3845         lnet_peer_ni_decref_locked(lp);
3846
3847         lnet_net_unlock(cpt);
3848 }
3849
3850 /* Gathering information for userspace. */
3851
3852 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3853                           char aliveness[LNET_MAX_STR_LEN],
3854                           __u32 *cpt_iter, __u32 *refcount,
3855                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3856                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3857                           __u32 *peer_tx_qnob)
3858 {
3859         struct lnet_peer_table          *peer_table;
3860         struct lnet_peer_ni             *lp;
3861         int                             j;
3862         int                             lncpt;
3863         bool                            found = false;
3864
3865         /* get the number of CPTs */
3866         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3867
3868         /* if the cpt number to be examined is >= the number of cpts in
3869          * the system then indicate that there are no more cpts to examin
3870          */
3871         if (*cpt_iter >= lncpt)
3872                 return -ENOENT;
3873
3874         /* get the current table */
3875         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3876         /* if the ptable is NULL then there are no more cpts to examine */
3877         if (peer_table == NULL)
3878                 return -ENOENT;
3879
3880         lnet_net_lock(*cpt_iter);
3881
3882         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3883                 struct list_head *peers = &peer_table->pt_hash[j];
3884
3885                 list_for_each_entry(lp, peers, lpni_hashlist) {
3886                         if (peer_index-- > 0)
3887                                 continue;
3888
3889                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3890                         if (lnet_isrouter(lp) ||
3891                                 lnet_peer_aliveness_enabled(lp))
3892                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3893                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3894
3895                         *nid = lp->lpni_nid;
3896                         *refcount = kref_read(&lp->lpni_kref);
3897                         *ni_peer_tx_credits =
3898                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3899                         *peer_tx_credits = lp->lpni_txcredits;
3900                         *peer_rtr_credits = lp->lpni_rtrcredits;
3901                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
3902                         *peer_tx_qnob = lp->lpni_txqnob;
3903
3904                         found = true;
3905                 }
3906
3907         }
3908         lnet_net_unlock(*cpt_iter);
3909
3910         *cpt_iter = lncpt;
3911
3912         return found ? 0 : -ENOENT;
3913 }
3914
3915 /* ln_api_mutex is held, which keeps the peer list stable */
3916 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3917 {
3918         struct lnet_ioctl_element_stats *lpni_stats;
3919         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3920         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3921         struct lnet_peer_ni_credit_info *lpni_info;
3922         struct lnet_peer_ni *lpni;
3923         struct lnet_peer *lp;
3924         lnet_nid_t nid;
3925         __u32 size;
3926         int rc;
3927
3928         lp = lnet_find_peer(cfg->prcfg_prim_nid);
3929
3930         if (!lp) {
3931                 rc = -ENOENT;
3932                 goto out;
3933         }
3934
3935         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3936                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3937         size *= lp->lp_nnis;
3938         if (size > cfg->prcfg_size) {
3939                 cfg->prcfg_size = size;
3940                 rc = -E2BIG;
3941                 goto out_lp_decref;
3942         }
3943
3944         cfg->prcfg_prim_nid = lp->lp_primary_nid;
3945         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3946         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3947         cfg->prcfg_count = lp->lp_nnis;
3948         cfg->prcfg_size = size;
3949         cfg->prcfg_state = lp->lp_state;
3950
3951         /* Allocate helper buffers. */
3952         rc = -ENOMEM;
3953         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3954         if (!lpni_info)
3955                 goto out_lp_decref;
3956         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3957         if (!lpni_stats)
3958                 goto out_free_info;
3959         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3960         if (!lpni_msg_stats)
3961                 goto out_free_stats;
3962         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3963         if (!lpni_hstats)
3964                 goto out_free_msg_stats;
3965
3966
3967         lpni = NULL;
3968         rc = -EFAULT;
3969         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3970                 nid = lpni->lpni_nid;
3971                 if (copy_to_user(bulk, &nid, sizeof(nid)))
3972                         goto out_free_hstats;
3973                 bulk += sizeof(nid);
3974
3975                 memset(lpni_info, 0, sizeof(*lpni_info));
3976                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3977                 if (lnet_isrouter(lpni) ||
3978                         lnet_peer_aliveness_enabled(lpni))
3979                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3980                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3981
3982                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
3983                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3984                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3985                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3986                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3987                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3988                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3989                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3990                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3991                         goto out_free_hstats;
3992                 bulk += sizeof(*lpni_info);
3993
3994                 memset(lpni_stats, 0, sizeof(*lpni_stats));
3995                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3996                                                             LNET_STATS_TYPE_SEND);
3997                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3998                                                             LNET_STATS_TYPE_RECV);
3999                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4000                                                             LNET_STATS_TYPE_DROP);
4001                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4002                         goto out_free_hstats;
4003                 bulk += sizeof(*lpni_stats);
4004                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4005                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4006                         goto out_free_hstats;
4007                 bulk += sizeof(*lpni_msg_stats);
4008                 lpni_hstats->hlpni_network_timeout =
4009                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4010                 lpni_hstats->hlpni_remote_dropped =
4011                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4012                 lpni_hstats->hlpni_remote_timeout =
4013                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4014                 lpni_hstats->hlpni_remote_error =
4015                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4016                 lpni_hstats->hlpni_health_value =
4017                   atomic_read(&lpni->lpni_healthv);
4018                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4019                         goto out_free_hstats;
4020                 bulk += sizeof(*lpni_hstats);
4021         }
4022         rc = 0;
4023
4024 out_free_hstats:
4025         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4026 out_free_msg_stats:
4027         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4028 out_free_stats:
4029         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4030 out_free_info:
4031         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4032 out_lp_decref:
4033         lnet_peer_decref_locked(lp);
4034 out:
4035         return rc;
4036 }
4037
4038 /* must hold net_lock/0 */
4039 void
4040 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4041                                      struct list_head *recovery_queue,
4042                                      time64_t now)
4043 {
4044         /* the mt could've shutdown and cleaned up the queues */
4045         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4046                 return;
4047
4048         if (!list_empty(&lpni->lpni_recovery))
4049                 return;
4050
4051         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4052                 return;
4053
4054         if (!lpni->lpni_last_alive) {
4055                 CDEBUG(D_NET,
4056                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4057                        libcfs_nid2str(lpni->lpni_nid), lpni,
4058                        lpni->lpni_last_alive);
4059                 return;
4060         }
4061
4062         if (now > lpni->lpni_last_alive + lnet_recovery_limit) {
4063                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4064                        libcfs_nid2str(lpni->lpni_nid),
4065                        lpni->lpni_last_alive);
4066                 /* Reset the ping count so that if this peer NI is added back to
4067                  * the recovery queue we will send the first ping right away.
4068                  */
4069                 lpni->lpni_ping_count = 0;
4070                 return;
4071         }
4072
4073         /* This peer NI is going on the recovery queue, so take a ref on it */
4074         lnet_peer_ni_addref_locked(lpni);
4075
4076         lnet_peer_ni_set_next_ping(lpni, now);
4077
4078         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4079                libcfs_nid2str(lpni->lpni_nid),
4080                lpni->lpni_ping_count,
4081                lpni->lpni_next_ping,
4082                lpni->lpni_last_alive,
4083                atomic_read(&lpni->lpni_healthv));
4084
4085         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4086 }
4087
4088 /* Call with the ln_api_mutex held */
4089 void
4090 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4091 {
4092         struct lnet_peer_table *ptable;
4093         struct lnet_peer *lp;
4094         struct lnet_peer_net *lpn;
4095         struct lnet_peer_ni *lpni;
4096         int lncpt;
4097         int cpt;
4098         time64_t now;
4099
4100         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4101                 return;
4102
4103         now = ktime_get_seconds();
4104
4105         if (!all) {
4106                 lnet_net_lock(LNET_LOCK_EX);
4107                 lpni = lnet_find_peer_ni_locked(nid);
4108                 if (!lpni) {
4109                         lnet_net_unlock(LNET_LOCK_EX);
4110                         return;
4111                 }
4112                 atomic_set(&lpni->lpni_healthv, value);
4113                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4114                                              &the_lnet.ln_mt_peerNIRecovq, now);
4115                 lnet_peer_ni_decref_locked(lpni);
4116                 lnet_net_unlock(LNET_LOCK_EX);
4117                 return;
4118         }
4119
4120         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4121
4122         /*
4123          * Walk all the peers and reset the health value for each one to the
4124          * specified value.
4125          */
4126         lnet_net_lock(LNET_LOCK_EX);
4127         for (cpt = 0; cpt < lncpt; cpt++) {
4128                 ptable = the_lnet.ln_peer_tables[cpt];
4129                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4130                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4131                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4132                                                     lpni_peer_nis) {
4133                                         atomic_set(&lpni->lpni_healthv, value);
4134                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4135                                              &the_lnet.ln_mt_peerNIRecovq, now);
4136                                 }
4137                         }
4138                 }
4139         }
4140         lnet_net_unlock(LNET_LOCK_EX);
4141 }
4142