Whamcloud - gitweb
d70415e7c82dd31584a224f3b317c9cbb0b10832
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NIDNET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(lnet_nid_t nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = nid;
265         lp->lp_disc_src_nid = LNET_NID_ANY;
266         if (lnet_peers_start_down())
267                 lp->lp_alive = false;
268         else
269                 lp->lp_alive = true;
270
271         /*
272          * all peers created on a router should have health on
273          * if it's not already on.
274          */
275         if (the_lnet.ln_routing && !lnet_health_sensitivity)
276                 lp->lp_health_sensitivity = 1;
277
278         /*
279          * Turn off discovery for loopback peer. If you're creating a peer
280          * for the loopback interface then that was initiated when we
281          * attempted to send a message over the loopback. There is no need
282          * to ever use a different interface when sending messages to
283          * myself.
284          */
285         if (nid == LNET_NID_LO_0)
286                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
287         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
288
289         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
290
291         return lp;
292 }
293
294 void
295 lnet_destroy_peer_locked(struct lnet_peer *lp)
296 {
297         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
298
299         LASSERT(atomic_read(&lp->lp_refcount) == 0);
300         LASSERT(lp->lp_rtr_refcount == 0);
301         LASSERT(list_empty(&lp->lp_peer_nets));
302         LASSERT(list_empty(&lp->lp_peer_list));
303         LASSERT(list_empty(&lp->lp_dc_list));
304
305         if (lp->lp_data)
306                 lnet_ping_buffer_decref(lp->lp_data);
307
308         /*
309          * if there are messages still on the pending queue, then make
310          * sure to queue them on the ln_msg_resend list so they can be
311          * resent at a later point if the discovery thread is still
312          * running.
313          * If the discovery thread has stopped, then the wakeup will be a
314          * no-op, and it is expected the lnet_shutdown_lndnets() will
315          * eventually be called, which will traverse this list and
316          * finalize the messages on the list.
317          * We can not resend them now because we're holding the cpt lock.
318          * Releasing the lock can cause an inconsistent state
319          */
320         spin_lock(&the_lnet.ln_msg_resend_lock);
321         spin_lock(&lp->lp_lock);
322         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
323         spin_unlock(&lp->lp_lock);
324         spin_unlock(&the_lnet.ln_msg_resend_lock);
325         wake_up(&the_lnet.ln_dc_waitq);
326
327         LIBCFS_FREE(lp, sizeof(*lp));
328 }
329
330 /*
331  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
332  * that peer_net, detach the peer_net from the peer.
333  *
334  * Call with lnet_net_lock/EX held
335  */
336 static void
337 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
338 {
339         struct lnet_peer_table *ptable;
340         struct lnet_peer_net *lpn;
341         struct lnet_peer *lp;
342
343         /*
344          * Belts and suspenders: gracefully handle teardown of a
345          * partially connected peer_ni.
346          */
347         lpn = lpni->lpni_peer_net;
348
349         list_del_init(&lpni->lpni_peer_nis);
350         /*
351          * If there are no lpni's left, we detach lpn from
352          * lp_peer_nets, so it cannot be found anymore.
353          */
354         if (list_empty(&lpn->lpn_peer_nis))
355                 list_del_init(&lpn->lpn_peer_nets);
356
357         /* Update peer NID count. */
358         lp = lpn->lpn_peer;
359         lp->lp_nnis--;
360
361         /*
362          * If there are no more peer nets, make the peer unfindable
363          * via the peer_tables.
364          *
365          * Otherwise, if the peer is DISCOVERED, tell discovery to
366          * take another look at it. This is a no-op if discovery for
367          * this peer did the detaching.
368          */
369         if (list_empty(&lp->lp_peer_nets)) {
370                 list_del_init(&lp->lp_peer_list);
371                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
372                 ptable->pt_peers--;
373         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
374                 /* Discovery isn't running, nothing to do here. */
375         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
376                 lnet_peer_queue_for_discovery(lp);
377                 wake_up(&the_lnet.ln_dc_waitq);
378         }
379         CDEBUG(D_NET, "peer %s NID %s\n",
380                 libcfs_nid2str(lp->lp_primary_nid),
381                 libcfs_nid2str(lpni->lpni_nid));
382 }
383
384 /* called with lnet_net_lock LNET_LOCK_EX held */
385 static int
386 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
387 {
388         struct lnet_peer_table *ptable = NULL;
389
390         /* don't remove a peer_ni if it's also a gateway */
391         if (lnet_isrouter(lpni) && !force) {
392                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
393                        libcfs_nid2str(lpni->lpni_nid));
394                 return -EBUSY;
395         }
396
397         lnet_peer_remove_from_remote_list(lpni);
398
399         /* remove peer ni from the hash list. */
400         list_del_init(&lpni->lpni_hashlist);
401
402         /*
403          * indicate the peer is being deleted so the monitor thread can
404          * remove it from the recovery queue.
405          */
406         spin_lock(&lpni->lpni_lock);
407         lpni->lpni_state |= LNET_PEER_NI_DELETING;
408         spin_unlock(&lpni->lpni_lock);
409
410         /* decrement the ref count on the peer table */
411         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
412
413         /*
414          * The peer_ni can no longer be found with a lookup. But there
415          * can be current users, so keep track of it on the zombie
416          * list until the reference count has gone to zero.
417          *
418          * The last reference may be lost in a place where the
419          * lnet_net_lock locks only a single cpt, and that cpt may not
420          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
421          * has its own lock.
422          */
423         spin_lock(&ptable->pt_zombie_lock);
424         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
425         ptable->pt_zombies++;
426         spin_unlock(&ptable->pt_zombie_lock);
427
428         /* no need to keep this peer_ni on the hierarchy anymore */
429         lnet_peer_detach_peer_ni_locked(lpni);
430
431         /* remove hashlist reference on peer_ni */
432         lnet_peer_ni_decref_locked(lpni);
433
434         return 0;
435 }
436
437 void lnet_peer_uninit(void)
438 {
439         struct lnet_peer_ni *lpni, *tmp;
440
441         lnet_net_lock(LNET_LOCK_EX);
442
443         /* remove all peer_nis from the remote peer and the hash list */
444         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
445                                  lpni_on_remote_peer_ni_list)
446                 lnet_peer_ni_del_locked(lpni, false);
447
448         lnet_peer_tables_destroy();
449
450         lnet_net_unlock(LNET_LOCK_EX);
451 }
452
453 static int
454 lnet_peer_del_locked(struct lnet_peer *peer)
455 {
456         struct lnet_peer_ni *lpni = NULL, *lpni2;
457         int rc = 0, rc2 = 0;
458
459         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
460
461         spin_lock(&peer->lp_lock);
462         peer->lp_state |= LNET_PEER_MARK_DELETED;
463         spin_unlock(&peer->lp_lock);
464
465         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
466         while (lpni != NULL) {
467                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
468                 rc = lnet_peer_ni_del_locked(lpni, false);
469                 if (rc != 0)
470                         rc2 = rc;
471                 lpni = lpni2;
472         }
473
474         return rc2;
475 }
476
477 /*
478  * Discovering this peer is taking too long. Cancel any Ping or Push
479  * that discovery is waiting on by unlinking the relevant MDs. The
480  * lnet_discovery_event_handler() will proceed from here and complete
481  * the cleanup.
482  */
483 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
484 {
485         struct lnet_handle_md ping_mdh;
486         struct lnet_handle_md push_mdh;
487
488         LNetInvalidateMDHandle(&ping_mdh);
489         LNetInvalidateMDHandle(&push_mdh);
490
491         spin_lock(&lp->lp_lock);
492         if (lp->lp_state & LNET_PEER_PING_SENT) {
493                 ping_mdh = lp->lp_ping_mdh;
494                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
495         }
496         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
497                 push_mdh = lp->lp_push_mdh;
498                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
499         }
500         spin_unlock(&lp->lp_lock);
501
502         if (!LNetMDHandleIsInvalid(ping_mdh))
503                 LNetMDUnlink(ping_mdh);
504         if (!LNetMDHandleIsInvalid(push_mdh))
505                 LNetMDUnlink(push_mdh);
506 }
507
508 static int
509 lnet_peer_del(struct lnet_peer *peer)
510 {
511         lnet_peer_cancel_discovery(peer);
512         lnet_net_lock(LNET_LOCK_EX);
513         lnet_peer_del_locked(peer);
514         lnet_net_unlock(LNET_LOCK_EX);
515
516         return 0;
517 }
518
519 /*
520  * Delete a NID from a peer. Call with ln_api_mutex held.
521  *
522  * Error codes:
523  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
524  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
525  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
526  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
527  */
528 static int
529 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
530 {
531         struct lnet_peer_ni *lpni;
532         lnet_nid_t primary_nid = lp->lp_primary_nid;
533         int rc = 0;
534         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
535
536         if (!(flags & LNET_PEER_CONFIGURED)) {
537                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
538                         rc = -EPERM;
539                         goto out;
540                 }
541         }
542         lpni = lnet_find_peer_ni_locked(nid);
543         if (!lpni) {
544                 rc = -ENOENT;
545                 goto out;
546         }
547         lnet_peer_ni_decref_locked(lpni);
548         if (lp != lpni->lpni_peer_net->lpn_peer) {
549                 rc = -ECHILD;
550                 goto out;
551         }
552
553         /*
554          * This function only allows deletion of the primary NID if it
555          * is the only NID.
556          */
557         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
558                 rc = -EBUSY;
559                 goto out;
560         }
561
562         lnet_net_lock(LNET_LOCK_EX);
563
564         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
565                 struct lnet_peer_ni *lpni2;
566                 /* assign the next peer_ni to be the primary */
567                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
568                 LASSERT(lpni2);
569                 lp->lp_primary_nid = lpni2->lpni_nid;
570         }
571         rc = lnet_peer_ni_del_locked(lpni, force);
572
573         lnet_net_unlock(LNET_LOCK_EX);
574
575 out:
576         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
577                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
578
579         return rc;
580 }
581
582 static void
583 lnet_peer_table_cleanup_locked(struct lnet_net *net,
584                                struct lnet_peer_table *ptable)
585 {
586         int                      i;
587         struct lnet_peer_ni     *next;
588         struct lnet_peer_ni     *lpni;
589         struct lnet_peer        *peer;
590
591         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
592                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
593                                          lpni_hashlist) {
594                         if (net != NULL && net != lpni->lpni_net)
595                                 continue;
596
597                         peer = lpni->lpni_peer_net->lpn_peer;
598                         if (peer->lp_primary_nid != lpni->lpni_nid) {
599                                 lnet_peer_ni_del_locked(lpni, false);
600                                 continue;
601                         }
602                         /*
603                          * Removing the primary NID implies removing
604                          * the entire peer. Advance next beyond any
605                          * peer_ni that belongs to the same peer.
606                          */
607                         list_for_each_entry_from(next, &ptable->pt_hash[i],
608                                                  lpni_hashlist) {
609                                 if (next->lpni_peer_net->lpn_peer != peer)
610                                         break;
611                         }
612                         lnet_peer_del_locked(peer);
613                 }
614         }
615 }
616
617 static void
618 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
619 {
620         wait_var_event_warning(&ptable->pt_zombies,
621                                ptable->pt_zombies == 0,
622                                "Waiting for %d zombies on peer table\n",
623                                ptable->pt_zombies);
624 }
625
626 static void
627 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
628                                 struct lnet_peer_table *ptable)
629 {
630         struct lnet_peer_ni     *lp;
631         struct lnet_peer_ni     *tmp;
632         lnet_nid_t              gw_nid;
633         int                     i;
634
635         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
636                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
637                                          lpni_hashlist) {
638                         if (net != lp->lpni_net)
639                                 continue;
640
641                         if (!lnet_isrouter(lp))
642                                 continue;
643
644                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
645
646                         lnet_net_unlock(LNET_LOCK_EX);
647                         lnet_del_route(LNET_NET_ANY, gw_nid);
648                         lnet_net_lock(LNET_LOCK_EX);
649                 }
650         }
651 }
652
653 void
654 lnet_peer_tables_cleanup(struct lnet_net *net)
655 {
656         int i;
657         struct lnet_peer_table *ptable;
658
659         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
660         /* If just deleting the peers for a NI, get rid of any routes these
661          * peers are gateways for. */
662         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
663                 lnet_net_lock(LNET_LOCK_EX);
664                 lnet_peer_table_del_rtrs_locked(net, ptable);
665                 lnet_net_unlock(LNET_LOCK_EX);
666         }
667
668         /* Start the cleanup process */
669         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
670                 lnet_net_lock(LNET_LOCK_EX);
671                 lnet_peer_table_cleanup_locked(net, ptable);
672                 lnet_net_unlock(LNET_LOCK_EX);
673         }
674
675         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
676                 lnet_peer_ni_finalize_wait(ptable);
677 }
678
679 static struct lnet_peer_ni *
680 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
681 {
682         struct list_head        *peers;
683         struct lnet_peer_ni     *lp;
684
685         if (the_lnet.ln_state != LNET_STATE_RUNNING)
686                 return NULL;
687
688         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
689         list_for_each_entry(lp, peers, lpni_hashlist) {
690                 if (lp->lpni_nid == nid) {
691                         lnet_peer_ni_addref_locked(lp);
692                         return lp;
693                 }
694         }
695
696         return NULL;
697 }
698
699 struct lnet_peer_ni *
700 lnet_find_peer_ni_locked(lnet_nid_t nid)
701 {
702         struct lnet_peer_ni *lpni;
703         struct lnet_peer_table *ptable;
704         int cpt;
705
706         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
707
708         ptable = the_lnet.ln_peer_tables[cpt];
709         lpni = lnet_get_peer_ni_locked(ptable, nid);
710
711         return lpni;
712 }
713
714 struct lnet_peer_ni *
715 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
716 {
717         struct lnet_peer_net *lpn;
718         struct lnet_peer_ni *lpni;
719
720         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
721         if (!lpn)
722                 return NULL;
723
724         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
725                 if (lpni->lpni_nid == nid)
726                         return lpni;
727         }
728
729         return NULL;
730 }
731
732 struct lnet_peer *
733 lnet_find_peer(lnet_nid_t nid)
734 {
735         struct lnet_peer_ni *lpni;
736         struct lnet_peer *lp = NULL;
737         int cpt;
738
739         cpt = lnet_net_lock_current();
740         lpni = lnet_find_peer_ni_locked(nid);
741         if (lpni) {
742                 lp = lpni->lpni_peer_net->lpn_peer;
743                 lnet_peer_addref_locked(lp);
744                 lnet_peer_ni_decref_locked(lpni);
745         }
746         lnet_net_unlock(cpt);
747
748         return lp;
749 }
750
751 struct lnet_peer_net *
752 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
753 {
754         struct lnet_peer_net *net;
755
756         if (!prev_lpn_id) {
757                 /* no net id provided return the first net */
758                 net = list_first_entry_or_null(&lp->lp_peer_nets,
759                                                struct lnet_peer_net,
760                                                lpn_peer_nets);
761
762                 return net;
763         }
764
765         /* find the net after the one provided */
766         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
767                 if (net->lpn_net_id == prev_lpn_id) {
768                         /*
769                          * if we reached the end of the list loop to the
770                          * beginning.
771                          */
772                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
773                                 return list_first_entry_or_null(&lp->lp_peer_nets,
774                                                                 struct lnet_peer_net,
775                                                                 lpn_peer_nets);
776                         else
777                                 return list_next_entry(net, lpn_peer_nets);
778                 }
779         }
780
781         return NULL;
782 }
783
784 struct lnet_peer_ni *
785 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
786                              struct lnet_peer_net *peer_net,
787                              struct lnet_peer_ni *prev)
788 {
789         struct lnet_peer_ni *lpni;
790         struct lnet_peer_net *net = peer_net;
791
792         if (!prev) {
793                 if (!net) {
794                         if (list_empty(&peer->lp_peer_nets))
795                                 return NULL;
796
797                         net = list_entry(peer->lp_peer_nets.next,
798                                          struct lnet_peer_net,
799                                          lpn_peer_nets);
800                 }
801                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
802                                   lpni_peer_nis);
803
804                 return lpni;
805         }
806
807         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
808                 /*
809                  * if you reached the end of the peer ni list and the peer
810                  * net is specified then there are no more peer nis in that
811                  * net.
812                  */
813                 if (net)
814                         return NULL;
815
816                 /*
817                  * we reached the end of this net ni list. move to the
818                  * next net
819                  */
820                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
821                     &peer->lp_peer_nets)
822                         /* no more nets and no more NIs. */
823                         return NULL;
824
825                 /* get the next net */
826                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
827                                  struct lnet_peer_net,
828                                  lpn_peer_nets);
829                 /* get the ni on it */
830                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
831                                   lpni_peer_nis);
832
833                 return lpni;
834         }
835
836         /* there are more nis left */
837         lpni = list_entry(prev->lpni_peer_nis.next,
838                           struct lnet_peer_ni, lpni_peer_nis);
839
840         return lpni;
841 }
842
843 /* Call with the ln_api_mutex held */
844 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
845 {
846         struct lnet_process_id id;
847         struct lnet_peer_table *ptable;
848         struct lnet_peer *lp;
849         __u32 count = 0;
850         __u32 size = 0;
851         int lncpt;
852         int cpt;
853         __u32 i;
854         int rc;
855
856         rc = -ESHUTDOWN;
857         if (the_lnet.ln_state != LNET_STATE_RUNNING)
858                 goto done;
859
860         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
861
862         /*
863          * Count the number of peers, and return E2BIG if the buffer
864          * is too small. We'll also return the desired size.
865          */
866         rc = -E2BIG;
867         for (cpt = 0; cpt < lncpt; cpt++) {
868                 ptable = the_lnet.ln_peer_tables[cpt];
869                 count += ptable->pt_peers;
870         }
871         size = count * sizeof(*ids);
872         if (size > *sizep)
873                 goto done;
874
875         /*
876          * Walk the peer lists and copy out the primary nids.
877          * This is safe because the peer lists are only modified
878          * while the ln_api_mutex is held. So we don't need to
879          * hold the lnet_net_lock as well, and can therefore
880          * directly call copy_to_user().
881          */
882         rc = -EFAULT;
883         memset(&id, 0, sizeof(id));
884         id.pid = LNET_PID_LUSTRE;
885         i = 0;
886         for (cpt = 0; cpt < lncpt; cpt++) {
887                 ptable = the_lnet.ln_peer_tables[cpt];
888                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
889                         if (i >= count)
890                                 goto done;
891                         id.nid = lp->lp_primary_nid;
892                         if (copy_to_user(&ids[i], &id, sizeof(id)))
893                                 goto done;
894                         i++;
895                 }
896         }
897         rc = 0;
898 done:
899         *countp = count;
900         *sizep = size;
901         return rc;
902 }
903
904 /*
905  * Start pushes to peers that need to be updated for a configuration
906  * change on this node.
907  */
908 void
909 lnet_push_update_to_peers(int force)
910 {
911         struct lnet_peer_table *ptable;
912         struct lnet_peer *lp;
913         int lncpt;
914         int cpt;
915
916         lnet_net_lock(LNET_LOCK_EX);
917         if (lnet_peer_discovery_disabled)
918                 force = 0;
919         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
920         for (cpt = 0; cpt < lncpt; cpt++) {
921                 ptable = the_lnet.ln_peer_tables[cpt];
922                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
923                         if (force) {
924                                 spin_lock(&lp->lp_lock);
925                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
926                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
927                                 spin_unlock(&lp->lp_lock);
928                         }
929                         if (lnet_peer_needs_push(lp))
930                                 lnet_peer_queue_for_discovery(lp);
931                 }
932         }
933         lnet_net_unlock(LNET_LOCK_EX);
934         wake_up(&the_lnet.ln_dc_waitq);
935 }
936
937 /* find the NID in the preferred gateways for the remote peer
938  * return:
939  *      false: list is not empty and NID is not preferred
940  *      false: list is empty
941  *      true: nid is found in the list
942  */
943 bool
944 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
945                              lnet_nid_t gw_nid)
946 {
947         struct lnet_nid_list *ne;
948
949         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
950                libcfs_nid2str(lpni->lpni_nid),
951                list_empty(&lpni->lpni_rtr_pref_nids));
952
953         if (list_empty(&lpni->lpni_rtr_pref_nids))
954                 return false;
955
956         /* iterate through all the preferred NIDs and see if any of them
957          * matches the provided gw_nid
958          */
959         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
960                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
961                        libcfs_nid2str(ne->nl_nid),
962                        libcfs_nid2str(gw_nid));
963                 if (ne->nl_nid == gw_nid)
964                         return true;
965         }
966
967         return false;
968 }
969
970 void
971 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
972 {
973         struct list_head zombies;
974         struct lnet_nid_list *ne;
975         struct lnet_nid_list *tmp;
976         int cpt = lpni->lpni_cpt;
977
978         INIT_LIST_HEAD(&zombies);
979
980         lnet_net_lock(cpt);
981         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
982         lnet_net_unlock(cpt);
983
984         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
985                 list_del(&ne->nl_list);
986                 LIBCFS_FREE(ne, sizeof(*ne));
987         }
988 }
989
990 int
991 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
992                        lnet_nid_t gw_nid)
993 {
994         int cpt = lpni->lpni_cpt;
995         struct lnet_nid_list *ne = NULL;
996
997         /* This function is called with api_mutex held. When the api_mutex
998          * is held the list can not be modified, as it is only modified as
999          * a result of applying a UDSP and that happens under api_mutex
1000          * lock.
1001          */
1002         __must_hold(&the_lnet.ln_api_mutex);
1003
1004         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1005                 if (ne->nl_nid == gw_nid)
1006                         return -EEXIST;
1007         }
1008
1009         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1010         if (!ne)
1011                 return -ENOMEM;
1012
1013         ne->nl_nid = gw_nid;
1014
1015         /* Lock the cpt to protect against addition and checks in the
1016          * selection algorithm
1017          */
1018         lnet_net_lock(cpt);
1019         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1020         lnet_net_unlock(cpt);
1021
1022         return 0;
1023 }
1024
1025 /*
1026  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1027  * this is a preferred point-to-point path. Call with lnet_net_lock in
1028  * shared mmode.
1029  */
1030 bool
1031 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1032 {
1033         struct lnet_nid_list *ne;
1034
1035         if (lpni->lpni_pref_nnids == 0)
1036                 return false;
1037         if (lpni->lpni_pref_nnids == 1)
1038                 return lpni->lpni_pref.nid == nid;
1039         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1040                 if (ne->nl_nid == nid)
1041                         return true;
1042         }
1043         return false;
1044 }
1045
1046 /*
1047  * Set a single ni as preferred, provided no preferred ni is already
1048  * defined. Only to be used for non-multi-rail peer_ni.
1049  */
1050 int
1051 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1052 {
1053         int rc = 0;
1054
1055         spin_lock(&lpni->lpni_lock);
1056         if (nid == LNET_NID_ANY) {
1057                 rc = -EINVAL;
1058         } else if (lpni->lpni_pref_nnids > 0) {
1059                 rc = -EPERM;
1060         } else if (lpni->lpni_pref_nnids == 0) {
1061                 lpni->lpni_pref.nid = nid;
1062                 lpni->lpni_pref_nnids = 1;
1063                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1064         }
1065         spin_unlock(&lpni->lpni_lock);
1066
1067         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1068                libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1069         return rc;
1070 }
1071
1072 /*
1073  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1074  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1075  */
1076 int
1077 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1078 {
1079         int rc = 0;
1080
1081         spin_lock(&lpni->lpni_lock);
1082         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1083                 lpni->lpni_pref_nnids = 0;
1084                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1085         } else if (lpni->lpni_pref_nnids == 0) {
1086                 rc = -ENOENT;
1087         } else {
1088                 rc = -EPERM;
1089         }
1090         spin_unlock(&lpni->lpni_lock);
1091
1092         CDEBUG(D_NET, "peer %s: %d\n",
1093                libcfs_nid2str(lpni->lpni_nid), rc);
1094         return rc;
1095 }
1096
1097 void
1098 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1099 {
1100         lpni->lpni_sel_priority = priority;
1101 }
1102
1103 /*
1104  * Clear the preferred NIDs from a non-multi-rail peer.
1105  */
1106 void
1107 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1108 {
1109         struct lnet_peer_ni *lpni = NULL;
1110
1111         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1112                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1113 }
1114
1115 int
1116 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1117 {
1118         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1119         struct lnet_nid_list *ne1 = NULL;
1120         struct lnet_nid_list *ne2 = NULL;
1121         lnet_nid_t tmp_nid = LNET_NID_ANY;
1122         int rc = 0;
1123
1124         if (nid == LNET_NID_ANY) {
1125                 rc = -EINVAL;
1126                 goto out;
1127         }
1128
1129         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1130                 rc = -EEXIST;
1131                 goto out;
1132         }
1133
1134         /* A non-MR node may have only one preferred NI per peer_ni */
1135         if (lpni->lpni_pref_nnids > 0 &&
1136             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1137                 rc = -EPERM;
1138                 goto out;
1139         }
1140
1141         /* add the new preferred nid to the list of preferred nids */
1142         if (lpni->lpni_pref_nnids != 0) {
1143                 size_t alloc_size = sizeof(*ne1);
1144
1145                 if (lpni->lpni_pref_nnids == 1) {
1146                         tmp_nid = lpni->lpni_pref.nid;
1147                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1148                 }
1149
1150                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1151                         if (ne1->nl_nid == nid) {
1152                                 rc = -EEXIST;
1153                                 goto out;
1154                         }
1155                 }
1156
1157                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1158                                  alloc_size);
1159                 if (!ne1) {
1160                         rc = -ENOMEM;
1161                         goto out;
1162                 }
1163
1164                 /* move the originally stored nid to the list */
1165                 if (lpni->lpni_pref_nnids == 1) {
1166                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1167                                 lpni->lpni_cpt, alloc_size);
1168                         if (!ne2) {
1169                                 rc = -ENOMEM;
1170                                 goto out;
1171                         }
1172                         INIT_LIST_HEAD(&ne2->nl_list);
1173                         ne2->nl_nid = tmp_nid;
1174                 }
1175                 ne1->nl_nid = nid;
1176         }
1177
1178         lnet_net_lock(LNET_LOCK_EX);
1179         spin_lock(&lpni->lpni_lock);
1180         if (lpni->lpni_pref_nnids == 0) {
1181                 lpni->lpni_pref.nid = nid;
1182         } else {
1183                 if (ne2)
1184                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1185                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1186         }
1187         lpni->lpni_pref_nnids++;
1188         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1189         spin_unlock(&lpni->lpni_lock);
1190         lnet_net_unlock(LNET_LOCK_EX);
1191
1192 out:
1193         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1194                 spin_lock(&lpni->lpni_lock);
1195                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1196                 spin_unlock(&lpni->lpni_lock);
1197         }
1198         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1199                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1200         return rc;
1201 }
1202
1203 int
1204 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1205 {
1206         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1207         struct lnet_nid_list *ne = NULL;
1208         int rc = 0;
1209
1210         if (lpni->lpni_pref_nnids == 0) {
1211                 rc = -ENOENT;
1212                 goto out;
1213         }
1214
1215         if (lpni->lpni_pref_nnids == 1) {
1216                 if (lpni->lpni_pref.nid != nid) {
1217                         rc = -ENOENT;
1218                         goto out;
1219                 }
1220         } else {
1221                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1222                         if (ne->nl_nid == nid)
1223                                 goto remove_nid_entry;
1224                 }
1225                 rc = -ENOENT;
1226                 ne = NULL;
1227                 goto out;
1228         }
1229
1230 remove_nid_entry:
1231         lnet_net_lock(LNET_LOCK_EX);
1232         spin_lock(&lpni->lpni_lock);
1233         if (lpni->lpni_pref_nnids == 1)
1234                 lpni->lpni_pref.nid = LNET_NID_ANY;
1235         else {
1236                 list_del_init(&ne->nl_list);
1237                 if (lpni->lpni_pref_nnids == 2) {
1238                         struct lnet_nid_list *ne, *tmp;
1239
1240                         list_for_each_entry_safe(ne, tmp,
1241                                                  &lpni->lpni_pref.nids,
1242                                                  nl_list) {
1243                                 lpni->lpni_pref.nid = ne->nl_nid;
1244                                 list_del_init(&ne->nl_list);
1245                                 LIBCFS_FREE(ne, sizeof(*ne));
1246                         }
1247                 }
1248         }
1249         lpni->lpni_pref_nnids--;
1250         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1251         spin_unlock(&lpni->lpni_lock);
1252         lnet_net_unlock(LNET_LOCK_EX);
1253
1254         if (ne)
1255                 LIBCFS_FREE(ne, sizeof(*ne));
1256 out:
1257         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1258                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1259         return rc;
1260 }
1261
1262 void
1263 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1264 {
1265         struct list_head zombies;
1266         struct lnet_nid_list *ne;
1267         struct lnet_nid_list *tmp;
1268
1269         INIT_LIST_HEAD(&zombies);
1270
1271         lnet_net_lock(LNET_LOCK_EX);
1272         if (lpni->lpni_pref_nnids == 1)
1273                 lpni->lpni_pref.nid = LNET_NID_ANY;
1274         else if (lpni->lpni_pref_nnids > 1)
1275                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1276         lpni->lpni_pref_nnids = 0;
1277         lnet_net_unlock(LNET_LOCK_EX);
1278
1279         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1280                 list_del_init(&ne->nl_list);
1281                 LIBCFS_FREE(ne, sizeof(*ne));
1282         }
1283 }
1284
1285 lnet_nid_t
1286 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1287 {
1288         struct lnet_peer_ni *lpni;
1289         lnet_nid_t primary_nid = nid;
1290
1291         lpni = lnet_find_peer_ni_locked(nid);
1292         if (lpni) {
1293                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1294                 lnet_peer_ni_decref_locked(lpni);
1295         }
1296
1297         return primary_nid;
1298 }
1299
1300 bool
1301 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1302 __must_hold(&lp->lp_lock)
1303 {
1304         if (lnet_peer_discovery_disabled)
1305                 return true;
1306
1307         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1308             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1309                 return true;
1310         }
1311
1312         return false;
1313 }
1314
1315 /*
1316  * Peer Discovery
1317  */
1318 bool
1319 lnet_is_discovery_disabled(struct lnet_peer *lp)
1320 {
1321         bool rc = false;
1322
1323         spin_lock(&lp->lp_lock);
1324         rc = lnet_is_discovery_disabled_locked(lp);
1325         spin_unlock(&lp->lp_lock);
1326
1327         return rc;
1328 }
1329
1330 lnet_nid_t
1331 LNetPrimaryNID(lnet_nid_t nid)
1332 {
1333         struct lnet_peer *lp;
1334         struct lnet_peer_ni *lpni;
1335         lnet_nid_t primary_nid = nid;
1336         int rc = 0;
1337         int cpt;
1338
1339         if (nid == LNET_NID_LO_0)
1340                 return LNET_NID_LO_0;
1341
1342         cpt = lnet_net_lock_current();
1343         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1344         if (IS_ERR(lpni)) {
1345                 rc = PTR_ERR(lpni);
1346                 goto out_unlock;
1347         }
1348         lp = lpni->lpni_peer_net->lpn_peer;
1349
1350         /* If discovery is disabled locally then we needn't bother running
1351          * discovery here because discovery will not modify whatever
1352          * primary NID is currently set for this peer. If the specified peer is
1353          * down then this discovery can introduce long delays into the mount
1354          * process, so skip it if it isn't necessary.
1355          */
1356         while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1357                 spin_lock(&lp->lp_lock);
1358                 /* force a full discovery cycle */
1359                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1360                 spin_unlock(&lp->lp_lock);
1361
1362                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1363                 if (rc)
1364                         goto out_decref;
1365                 /* The lpni (or lp) for this NID may have changed and our ref is
1366                  * the only thing keeping the old one around. Release the ref
1367                  * and lookup the lpni again
1368                  */
1369                 lnet_peer_ni_decref_locked(lpni);
1370                 lpni = lnet_find_peer_ni_locked(nid);
1371                 if (!lpni) {
1372                         rc = -ENOENT;
1373                         goto out_unlock;
1374                 }
1375                 lp = lpni->lpni_peer_net->lpn_peer;
1376
1377                 /* If we find that the peer has discovery disabled then we will
1378                  * not modify whatever primary NID is currently set for this
1379                  * peer. Thus, we can break out of this loop even if the peer
1380                  * is not fully up to date.
1381                  */
1382                 if (lnet_is_discovery_disabled(lp))
1383                         break;
1384         }
1385         primary_nid = lp->lp_primary_nid;
1386 out_decref:
1387         lnet_peer_ni_decref_locked(lpni);
1388 out_unlock:
1389         lnet_net_unlock(cpt);
1390
1391         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1392                libcfs_nid2str(primary_nid), rc);
1393         return primary_nid;
1394 }
1395 EXPORT_SYMBOL(LNetPrimaryNID);
1396
1397 struct lnet_peer_net *
1398 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1399 {
1400         struct lnet_peer_net *peer_net;
1401         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1402                 if (peer_net->lpn_net_id == net_id)
1403                         return peer_net;
1404         }
1405         return NULL;
1406 }
1407
1408 /*
1409  * Attach a peer_ni to a peer_net and peer. This function assumes
1410  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1411  * may be attached to a different peer, in which case it will be
1412  * properly detached first. The whole operation is done atomically.
1413  *
1414  * This function consumes the reference on lpni and Always returns 0.
1415  * This is the last function called from functions that do return an
1416  * int, so returning 0 here allows the compiler to do a tail call.
1417  */
1418 static int
1419 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1420                                 struct lnet_peer_net *lpn,
1421                                 struct lnet_peer_ni *lpni,
1422                                 unsigned flags)
1423 {
1424         struct lnet_peer_table *ptable;
1425         bool new_lpn = false;
1426         int rc;
1427
1428         /* Install the new peer_ni */
1429         lnet_net_lock(LNET_LOCK_EX);
1430         /* Add peer_ni to global peer table hash, if necessary. */
1431         if (list_empty(&lpni->lpni_hashlist)) {
1432                 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1433
1434                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1435                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1436                 ptable->pt_version++;
1437                 lnet_peer_ni_addref_locked(lpni);
1438         }
1439
1440         /* Detach the peer_ni from an existing peer, if necessary. */
1441         if (lpni->lpni_peer_net) {
1442                 LASSERT(lpni->lpni_peer_net != lpn);
1443                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1444                 lnet_peer_detach_peer_ni_locked(lpni);
1445                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1446                 lpni->lpni_peer_net = NULL;
1447         }
1448
1449         /* Add peer_ni to peer_net */
1450         lpni->lpni_peer_net = lpn;
1451         if (lp->lp_primary_nid == lpni->lpni_nid)
1452                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1453         else
1454                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1455         lnet_update_peer_net_healthv(lpni);
1456         lnet_peer_net_addref_locked(lpn);
1457
1458         /* Add peer_net to peer */
1459         if (!lpn->lpn_peer) {
1460                 new_lpn = true;
1461                 lpn->lpn_peer = lp;
1462                 if (lp->lp_primary_nid == lpni->lpni_nid)
1463                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1464                 else
1465                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1466                 lnet_peer_addref_locked(lp);
1467         }
1468
1469         /* Add peer to global peer list, if necessary */
1470         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1471         if (list_empty(&lp->lp_peer_list)) {
1472                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1473                 ptable->pt_peers++;
1474         }
1475
1476
1477         /* Update peer state */
1478         spin_lock(&lp->lp_lock);
1479         if (flags & LNET_PEER_CONFIGURED) {
1480                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1481                         lp->lp_state |= LNET_PEER_CONFIGURED;
1482         }
1483         if (flags & LNET_PEER_MULTI_RAIL) {
1484                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1485                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1486                         lnet_peer_clr_non_mr_pref_nids(lp);
1487                 }
1488         }
1489         spin_unlock(&lp->lp_lock);
1490
1491         lp->lp_nnis++;
1492
1493         /* apply UDSPs */
1494         if (new_lpn) {
1495                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1496                 if (rc)
1497                         CERROR("Failed to apply UDSPs on lpn %s\n",
1498                                libcfs_net2str(lpn->lpn_net_id));
1499         }
1500         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1501         if (rc)
1502                 CERROR("Failed to apply UDSPs on lpni %s\n",
1503                        libcfs_nid2str(lpni->lpni_nid));
1504
1505         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1506                libcfs_nid2str(lp->lp_primary_nid),
1507                libcfs_nid2str(lpni->lpni_nid), flags);
1508         lnet_peer_ni_decref_locked(lpni);
1509         lnet_net_unlock(LNET_LOCK_EX);
1510
1511         return 0;
1512 }
1513
1514 /*
1515  * Create a new peer, with nid as its primary nid.
1516  *
1517  * Call with the lnet_api_mutex held.
1518  */
1519 static int
1520 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1521 {
1522         struct lnet_peer *lp;
1523         struct lnet_peer_net *lpn;
1524         struct lnet_peer_ni *lpni;
1525         int rc = 0;
1526
1527         LASSERT(nid != LNET_NID_ANY);
1528
1529         /*
1530          * No need for the lnet_net_lock here, because the
1531          * lnet_api_mutex is held.
1532          */
1533         lpni = lnet_find_peer_ni_locked(nid);
1534         if (lpni) {
1535                 /* A peer with this NID already exists. */
1536                 lp = lpni->lpni_peer_net->lpn_peer;
1537                 lnet_peer_ni_decref_locked(lpni);
1538                 /*
1539                  * This is an error if the peer was configured and the
1540                  * primary NID differs or an attempt is made to change
1541                  * the Multi-Rail flag. Otherwise the assumption is
1542                  * that an existing peer is being modified.
1543                  */
1544                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1545                         if (lp->lp_primary_nid != nid)
1546                                 rc = -EEXIST;
1547                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1548                                 rc = -EPERM;
1549                         goto out;
1550                 }
1551                 /* Delete and recreate as a configured peer. */
1552                 lnet_peer_del(lp);
1553         }
1554
1555         /* Create peer, peer_net, and peer_ni. */
1556         rc = -ENOMEM;
1557         lp = lnet_peer_alloc(nid);
1558         if (!lp)
1559                 goto out;
1560         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1561         if (!lpn)
1562                 goto out_free_lp;
1563         lpni = lnet_peer_ni_alloc(nid);
1564         if (!lpni)
1565                 goto out_free_lpn;
1566
1567         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1568
1569 out_free_lpn:
1570         LIBCFS_FREE(lpn, sizeof(*lpn));
1571 out_free_lp:
1572         LIBCFS_FREE(lp, sizeof(*lp));
1573 out:
1574         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1575                libcfs_nid2str(nid), flags, rc);
1576         return rc;
1577 }
1578
1579 /*
1580  * Add a NID to a peer. Call with ln_api_mutex held.
1581  *
1582  * Error codes:
1583  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1584  *  -EEXIST:   The NID was configured by DLC for a different peer.
1585  *  -ENOMEM:   Out of memory.
1586  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1587  *             non-multi-rail peer.
1588  */
1589 static int
1590 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1591 {
1592         struct lnet_peer_net *lpn;
1593         struct lnet_peer_ni *lpni;
1594         int rc = 0;
1595
1596         LASSERT(lp);
1597         LASSERT(nid != LNET_NID_ANY);
1598
1599         /* A configured peer can only be updated through configuration. */
1600         if (!(flags & LNET_PEER_CONFIGURED)) {
1601                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1602                         rc = -EPERM;
1603                         goto out;
1604                 }
1605         }
1606
1607         /*
1608          * The MULTI_RAIL flag can be set but not cleared, because
1609          * that would leave the peer struct in an invalid state.
1610          */
1611         if (flags & LNET_PEER_MULTI_RAIL) {
1612                 spin_lock(&lp->lp_lock);
1613                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1614                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1615                         lnet_peer_clr_non_mr_pref_nids(lp);
1616                 }
1617                 spin_unlock(&lp->lp_lock);
1618         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1619                 rc = -EPERM;
1620                 goto out;
1621         }
1622
1623         lpni = lnet_find_peer_ni_locked(nid);
1624         if (lpni) {
1625                 /*
1626                  * A peer_ni already exists. This is only a problem if
1627                  * it is not connected to this peer and was configured
1628                  * by DLC.
1629                  */
1630                 if (lpni->lpni_peer_net->lpn_peer == lp)
1631                         goto out_free_lpni;
1632                 if (lnet_peer_ni_is_configured(lpni)) {
1633                         rc = -EEXIST;
1634                         goto out_free_lpni;
1635                 }
1636                 /* If this is the primary NID, destroy the peer. */
1637                 if (lnet_peer_ni_is_primary(lpni)) {
1638                         struct lnet_peer *rtr_lp =
1639                                 lpni->lpni_peer_net->lpn_peer;
1640                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1641                         /*
1642                          * if we're trying to delete a router it means
1643                          * we're moving this peer NI to a new peer so must
1644                          * transfer router properties to the new peer
1645                          */
1646                         if (rtr_refcount > 0) {
1647                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1648                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1649                         }
1650                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1651                         lnet_peer_ni_decref_locked(lpni);
1652                         lpni = lnet_peer_ni_alloc(nid);
1653                         if (!lpni) {
1654                                 rc = -ENOMEM;
1655                                 goto out_free_lpni;
1656                         }
1657                 }
1658         } else {
1659                 lpni = lnet_peer_ni_alloc(nid);
1660                 if (!lpni) {
1661                         rc = -ENOMEM;
1662                         goto out_free_lpni;
1663                 }
1664         }
1665
1666         /*
1667          * Get the peer_net. Check that we're not adding a second
1668          * peer_ni on a peer_net of a non-multi-rail peer.
1669          */
1670         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1671         if (!lpn) {
1672                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1673                 if (!lpn) {
1674                         rc = -ENOMEM;
1675                         goto out_free_lpni;
1676                 }
1677         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1678                 rc = -ENOTUNIQ;
1679                 goto out_free_lpni;
1680         }
1681
1682         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1683
1684 out_free_lpni:
1685         lnet_peer_ni_decref_locked(lpni);
1686 out:
1687         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1688                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1689                flags, rc);
1690         return rc;
1691 }
1692
1693 /*
1694  * Update the primary NID of a peer, if possible.
1695  *
1696  * Call with the lnet_api_mutex held.
1697  */
1698 static int
1699 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1700 {
1701         lnet_nid_t old = lp->lp_primary_nid;
1702         int rc = 0;
1703
1704         if (lp->lp_primary_nid == nid)
1705                 goto out;
1706
1707         lp->lp_primary_nid = nid;
1708
1709         rc = lnet_peer_add_nid(lp, nid, flags);
1710         if (rc) {
1711                 lp->lp_primary_nid = old;
1712                 goto out;
1713         }
1714 out:
1715         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1716                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1717         return rc;
1718 }
1719
1720 /*
1721  * lpni creation initiated due to traffic either sending or receiving.
1722  */
1723 static int
1724 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1725 {
1726         struct lnet_peer *lp;
1727         struct lnet_peer_net *lpn;
1728         struct lnet_peer_ni *lpni;
1729         unsigned flags = 0;
1730         int rc = 0;
1731
1732         if (nid == LNET_NID_ANY) {
1733                 rc = -EINVAL;
1734                 goto out;
1735         }
1736
1737         /* lnet_net_lock is not needed here because ln_api_lock is held */
1738         lpni = lnet_find_peer_ni_locked(nid);
1739         if (lpni) {
1740                 /*
1741                  * We must have raced with another thread. Since we
1742                  * know next to nothing about a peer_ni created by
1743                  * traffic, we just assume everything is ok and
1744                  * return.
1745                  */
1746                 lnet_peer_ni_decref_locked(lpni);
1747                 goto out;
1748         }
1749
1750         /* Create peer, peer_net, and peer_ni. */
1751         rc = -ENOMEM;
1752         lp = lnet_peer_alloc(nid);
1753         if (!lp)
1754                 goto out;
1755         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1756         if (!lpn)
1757                 goto out_free_lp;
1758         lpni = lnet_peer_ni_alloc(nid);
1759         if (!lpni)
1760                 goto out_free_lpn;
1761         if (pref != LNET_NID_ANY)
1762                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1763
1764         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1765
1766 out_free_lpn:
1767         LIBCFS_FREE(lpn, sizeof(*lpn));
1768 out_free_lp:
1769         LIBCFS_FREE(lp, sizeof(*lp));
1770 out:
1771         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1772         return rc;
1773 }
1774
1775 /*
1776  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1777  *
1778  * This API handles the following combinations:
1779  *   Create a peer with its primary NI if only the prim_nid is provided
1780  *   Add a NID to a peer identified by the prim_nid. The peer identified
1781  *   by the prim_nid must already exist.
1782  *   The peer being created may be non-MR.
1783  *
1784  * The caller must hold ln_api_mutex. This prevents the peer from
1785  * being created/modified/deleted by a different thread.
1786  */
1787 int
1788 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1789 {
1790         struct lnet_peer *lp = NULL;
1791         struct lnet_peer_ni *lpni;
1792         unsigned flags;
1793
1794         /* The prim_nid must always be specified */
1795         if (prim_nid == LNET_NID_ANY)
1796                 return -EINVAL;
1797
1798         flags = LNET_PEER_CONFIGURED;
1799         if (mr)
1800                 flags |= LNET_PEER_MULTI_RAIL;
1801
1802         /*
1803          * If nid isn't specified, we must create a new peer with
1804          * prim_nid as its primary nid.
1805          */
1806         if (nid == LNET_NID_ANY)
1807                 return lnet_peer_add(prim_nid, flags);
1808
1809         /* Look up the prim_nid, which must exist. */
1810         lpni = lnet_find_peer_ni_locked(prim_nid);
1811         if (!lpni)
1812                 return -ENOENT;
1813         lnet_peer_ni_decref_locked(lpni);
1814         lp = lpni->lpni_peer_net->lpn_peer;
1815
1816         /* Peer must have been configured. */
1817         if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1818                 CDEBUG(D_NET, "peer %s was not configured\n",
1819                        libcfs_nid2str(prim_nid));
1820                 return -ENOENT;
1821         }
1822
1823         /* Primary NID must match */
1824         if (lp->lp_primary_nid != prim_nid) {
1825                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1826                        libcfs_nid2str(prim_nid),
1827                        libcfs_nid2str(lp->lp_primary_nid));
1828                 return -ENODEV;
1829         }
1830
1831         /* Multi-Rail flag must match. */
1832         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1833                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1834                        libcfs_nid2str(prim_nid));
1835                 return -EPERM;
1836         }
1837
1838         return lnet_peer_add_nid(lp, nid, flags);
1839 }
1840
1841 /*
1842  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1843  *
1844  * This API handles the following combinations:
1845  *   Delete a NI from a peer if both prim_nid and nid are provided.
1846  *   Delete a peer if only prim_nid is provided.
1847  *   Delete a peer if its primary nid is provided.
1848  *
1849  * The caller must hold ln_api_mutex. This prevents the peer from
1850  * being modified/deleted by a different thread.
1851  */
1852 int
1853 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1854 {
1855         struct lnet_peer *lp;
1856         struct lnet_peer_ni *lpni;
1857         unsigned flags;
1858
1859         if (prim_nid == LNET_NID_ANY)
1860                 return -EINVAL;
1861
1862         lpni = lnet_find_peer_ni_locked(prim_nid);
1863         if (!lpni)
1864                 return -ENOENT;
1865         lnet_peer_ni_decref_locked(lpni);
1866         lp = lpni->lpni_peer_net->lpn_peer;
1867
1868         if (prim_nid != lp->lp_primary_nid) {
1869                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1870                        libcfs_nid2str(prim_nid),
1871                        libcfs_nid2str(lp->lp_primary_nid));
1872                 return -ENODEV;
1873         }
1874
1875         lnet_net_lock(LNET_LOCK_EX);
1876         if (lp->lp_rtr_refcount > 0) {
1877                 lnet_net_unlock(LNET_LOCK_EX);
1878                 CERROR("%s is a router. Can not be deleted\n",
1879                        libcfs_nid2str(prim_nid));
1880                 return -EBUSY;
1881         }
1882         lnet_net_unlock(LNET_LOCK_EX);
1883
1884         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1885                 return lnet_peer_del(lp);
1886
1887         flags = LNET_PEER_CONFIGURED;
1888         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1889                 flags |= LNET_PEER_MULTI_RAIL;
1890
1891         return lnet_peer_del_nid(lp, nid, flags);
1892 }
1893
1894 void
1895 lnet_destroy_peer_ni_locked(struct kref *ref)
1896 {
1897         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1898                                                  lpni_kref);
1899         struct lnet_peer_table *ptable;
1900         struct lnet_peer_net *lpn;
1901
1902         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1903
1904         LASSERT(kref_read(&lpni->lpni_kref) == 0);
1905         LASSERT(list_empty(&lpni->lpni_txq));
1906         LASSERT(lpni->lpni_txqnob == 0);
1907         LASSERT(list_empty(&lpni->lpni_peer_nis));
1908         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1909
1910         lpn = lpni->lpni_peer_net;
1911         lpni->lpni_peer_net = NULL;
1912         lpni->lpni_net = NULL;
1913
1914         if (!list_empty(&lpni->lpni_hashlist)) {
1915                 /* remove the peer ni from the zombie list */
1916                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1917                 spin_lock(&ptable->pt_zombie_lock);
1918                 list_del_init(&lpni->lpni_hashlist);
1919                 ptable->pt_zombies--;
1920                 spin_unlock(&ptable->pt_zombie_lock);
1921         }
1922
1923         if (lpni->lpni_pref_nnids > 1) {
1924                 struct lnet_nid_list *ne, *tmp;
1925
1926                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1927                                          nl_list) {
1928                         list_del_init(&ne->nl_list);
1929                         LIBCFS_FREE(ne, sizeof(*ne));
1930                 }
1931         }
1932         LIBCFS_FREE(lpni, sizeof(*lpni));
1933
1934         if (lpn)
1935                 lnet_peer_net_decref_locked(lpn);
1936 }
1937
1938 struct lnet_peer_ni *
1939 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1940 {
1941         struct lnet_peer_ni *lpni = NULL;
1942         int rc;
1943
1944         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1945                 return ERR_PTR(-ESHUTDOWN);
1946
1947         /*
1948          * find if a peer_ni already exists.
1949          * If so then just return that.
1950          */
1951         lpni = lnet_find_peer_ni_locked(nid);
1952         if (lpni)
1953                 return lpni;
1954
1955         lnet_net_unlock(cpt);
1956
1957         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1958         if (rc) {
1959                 lpni = ERR_PTR(rc);
1960                 goto out_net_relock;
1961         }
1962
1963         lpni = lnet_find_peer_ni_locked(nid);
1964         LASSERT(lpni);
1965
1966 out_net_relock:
1967         lnet_net_lock(cpt);
1968
1969         return lpni;
1970 }
1971
1972 /*
1973  * Get a peer_ni for the given nid, create it if necessary. Takes a
1974  * hold on the peer_ni.
1975  */
1976 struct lnet_peer_ni *
1977 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1978 {
1979         struct lnet_peer_ni *lpni = NULL;
1980         int rc;
1981
1982         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1983                 return ERR_PTR(-ESHUTDOWN);
1984
1985         /*
1986          * find if a peer_ni already exists.
1987          * If so then just return that.
1988          */
1989         lpni = lnet_find_peer_ni_locked(nid);
1990         if (lpni)
1991                 return lpni;
1992
1993         /*
1994          * Slow path:
1995          * use the lnet_api_mutex to serialize the creation of the peer_ni
1996          * and the creation/deletion of the local ni/net. When a local ni is
1997          * created, if there exists a set of peer_nis on that network,
1998          * they need to be traversed and updated. When a local NI is
1999          * deleted, which could result in a network being deleted, then
2000          * all peer nis on that network need to be removed as well.
2001          *
2002          * Creation through traffic should also be serialized with
2003          * creation through DLC.
2004          */
2005         lnet_net_unlock(cpt);
2006         mutex_lock(&the_lnet.ln_api_mutex);
2007         /*
2008          * Shutdown is only set under the ln_api_lock, so a single
2009          * check here is sufficent.
2010          */
2011         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2012                 lpni = ERR_PTR(-ESHUTDOWN);
2013                 goto out_mutex_unlock;
2014         }
2015
2016         rc = lnet_peer_ni_traffic_add(nid, pref);
2017         if (rc) {
2018                 lpni = ERR_PTR(rc);
2019                 goto out_mutex_unlock;
2020         }
2021
2022         lpni = lnet_find_peer_ni_locked(nid);
2023         LASSERT(lpni);
2024
2025 out_mutex_unlock:
2026         mutex_unlock(&the_lnet.ln_api_mutex);
2027         lnet_net_lock(cpt);
2028
2029         /* Lock has been dropped, check again for shutdown. */
2030         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2031                 if (!IS_ERR(lpni))
2032                         lnet_peer_ni_decref_locked(lpni);
2033                 lpni = ERR_PTR(-ESHUTDOWN);
2034         }
2035
2036         return lpni;
2037 }
2038
2039 bool
2040 lnet_peer_gw_discovery(struct lnet_peer *lp)
2041 {
2042         bool rc = false;
2043
2044         spin_lock(&lp->lp_lock);
2045         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2046                 rc = true;
2047         spin_unlock(&lp->lp_lock);
2048
2049         return rc;
2050 }
2051
2052 bool
2053 lnet_peer_is_uptodate(struct lnet_peer *lp)
2054 {
2055         bool rc;
2056
2057         spin_lock(&lp->lp_lock);
2058         rc = lnet_peer_is_uptodate_locked(lp);
2059         spin_unlock(&lp->lp_lock);
2060         return rc;
2061 }
2062
2063 /*
2064  * Is a peer uptodate from the point of view of discovery?
2065  *
2066  * If it is currently being processed, obviously not.
2067  * A forced Ping or Push is also handled by the discovery thread.
2068  *
2069  * Otherwise look at whether the peer needs rediscovering.
2070  */
2071 bool
2072 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2073 __must_hold(&lp->lp_lock)
2074 {
2075         bool rc;
2076
2077         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2078                             LNET_PEER_FORCE_PING |
2079                             LNET_PEER_FORCE_PUSH)) {
2080                 rc = false;
2081         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2082                 rc = false;
2083         } else if (lnet_peer_needs_push(lp)) {
2084                 rc = false;
2085         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2086                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2087                         rc = true;
2088                 else
2089                         rc = false;
2090         } else {
2091                 rc = false;
2092         }
2093
2094         return rc;
2095 }
2096
2097 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2098 void
2099 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2100 {
2101         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2102          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2103          * when adding to the list and queuing the peer to ensure that we do not
2104          * strand any messages on the lp_dc_pendq. This scheme ensures the
2105          * message will be resent even if the peer is already being discovered.
2106          * Therefore we needn't check the return value of
2107          * lnet_peer_queue_for_discovery(lp).
2108          */
2109         lnet_net_lock(LNET_LOCK_EX);
2110         spin_lock(&lp->lp_lock);
2111         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2112         spin_unlock(&lp->lp_lock);
2113         lnet_peer_queue_for_discovery(lp);
2114         lnet_net_unlock(LNET_LOCK_EX);
2115 }
2116
2117 /*
2118  * Queue a peer for the attention of the discovery thread.  Call with
2119  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2120  * -EALREADY if the peer was already queued.
2121  */
2122 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2123 {
2124         int rc;
2125
2126         spin_lock(&lp->lp_lock);
2127         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2128                 lp->lp_state |= LNET_PEER_DISCOVERING;
2129         spin_unlock(&lp->lp_lock);
2130         if (list_empty(&lp->lp_dc_list)) {
2131                 lnet_peer_addref_locked(lp);
2132                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2133                 wake_up(&the_lnet.ln_dc_waitq);
2134                 rc = 0;
2135         } else {
2136                 rc = -EALREADY;
2137         }
2138
2139         CDEBUG(D_NET, "Queue peer %s: %d\n",
2140                libcfs_nid2str(lp->lp_primary_nid), rc);
2141
2142         return rc;
2143 }
2144
2145 /*
2146  * Discovery of a peer is complete. Wake all waiters on the peer.
2147  * Call with lnet_net_lock/EX held.
2148  */
2149 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2150 {
2151         struct lnet_msg *msg, *tmp;
2152         int rc = 0;
2153         LIST_HEAD(pending_msgs);
2154
2155         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2156                libcfs_nid2str(lp->lp_primary_nid));
2157
2158         list_del_init(&lp->lp_dc_list);
2159         spin_lock(&lp->lp_lock);
2160         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2161         spin_unlock(&lp->lp_lock);
2162         wake_up(&lp->lp_dc_waitq);
2163
2164         if (lp->lp_rtr_refcount > 0)
2165                 lnet_router_discovery_complete(lp);
2166
2167         lnet_net_unlock(LNET_LOCK_EX);
2168
2169         /* iterate through all pending messages and send them again */
2170         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2171                 list_del_init(&msg->msg_list);
2172                 if (lp->lp_dc_error) {
2173                         lnet_finalize(msg, lp->lp_dc_error);
2174                         continue;
2175                 }
2176
2177                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2178                        lnet_msgtyp2str(msg->msg_type),
2179                        libcfs_id2str(msg->msg_target));
2180                 rc = lnet_send(msg->msg_src_nid_param, msg,
2181                                msg->msg_rtr_nid_param);
2182                 if (rc < 0) {
2183                         CNETERR("Error sending %s to %s: %d\n",
2184                                lnet_msgtyp2str(msg->msg_type),
2185                                libcfs_id2str(msg->msg_target), rc);
2186                         lnet_finalize(msg, rc);
2187                 }
2188         }
2189         lnet_net_lock(LNET_LOCK_EX);
2190         lnet_peer_decref_locked(lp);
2191 }
2192
2193 /*
2194  * Handle inbound push.
2195  * Like any event handler, called with lnet_res_lock/CPT held.
2196  */
2197 void lnet_peer_push_event(struct lnet_event *ev)
2198 {
2199         struct lnet_ping_buffer *pbuf;
2200         struct lnet_peer *lp;
2201
2202         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2203
2204         /* lnet_find_peer() adds a refcount */
2205         lp = lnet_find_peer(ev->source.nid);
2206         if (!lp) {
2207                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2208                        libcfs_nid2str(ev->initiator.nid),
2209                        libcfs_nid2str(ev->source.nid));
2210                 pbuf->pb_needs_post = true;
2211                 return;
2212         }
2213
2214         /* Ensure peer state remains consistent while we modify it. */
2215         spin_lock(&lp->lp_lock);
2216
2217         /*
2218          * If some kind of error happened the contents of the message
2219          * cannot be used. Clear the NIDS_UPTODATE and set the
2220          * FORCE_PING flag to trigger a ping.
2221          */
2222         if (ev->status) {
2223                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2224                 lp->lp_state |= LNET_PEER_FORCE_PING;
2225                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2226                        ev->status,
2227                        libcfs_nid2str(lp->lp_primary_nid),
2228                        libcfs_nid2str(ev->source.nid));
2229                 goto out;
2230         }
2231
2232         /*
2233          * A push with invalid or corrupted info. Clear the UPTODATE
2234          * flag to trigger a ping.
2235          */
2236         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2237                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2238                 lp->lp_state |= LNET_PEER_FORCE_PING;
2239                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2240                        libcfs_nid2str(lp->lp_primary_nid));
2241                 goto out;
2242         }
2243
2244         /*
2245          * Make sure we'll allocate the correct size ping buffer when
2246          * pinging the peer.
2247          */
2248         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2249                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2250
2251         /*
2252          * A non-Multi-Rail peer is not supposed to be capable of
2253          * sending a push.
2254          */
2255         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2256                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2257                        libcfs_nid2str(lp->lp_primary_nid));
2258                 goto out;
2259         }
2260
2261         /*
2262          * The peer may have discovery disabled at its end. Set
2263          * NO_DISCOVERY as appropriate.
2264          */
2265         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2266                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2267                        libcfs_nid2str(lp->lp_primary_nid));
2268                 /*
2269                  * Mark the peer for deletion if we already know about it
2270                  * and it's going from discovery set to no discovery set
2271                  */
2272                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2273                                       LNET_PEER_DISCOVERING)) &&
2274                      lp->lp_state & LNET_PEER_DISCOVERED) {
2275                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2276                                libcfs_nid2str(lp->lp_primary_nid),
2277                                lp->lp_state);
2278                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2279                 }
2280                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2281         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2282                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2283                        libcfs_nid2str(lp->lp_primary_nid));
2284                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2285         }
2286
2287         /*
2288          * Update the MULTI_RAIL flag based on the push. If the peer
2289          * was configured with DLC then the setting should match what
2290          * DLC put in.
2291          * NB: We verified above that the MR feature bit is set in pi_features
2292          */
2293         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2294                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2295                        libcfs_nid2str(lp->lp_primary_nid), lp);
2296         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2297                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2298                       libcfs_nid2str(lp->lp_primary_nid));
2299         } else if (lnet_peer_discovery_disabled) {
2300                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2301                        libcfs_nid2str(lp->lp_primary_nid), lp);
2302         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2303                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2304                        libcfs_nid2str(lp->lp_primary_nid), lp);
2305         } else {
2306                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2307                        libcfs_nid2str(lp->lp_primary_nid), lp);
2308                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2309                 lnet_peer_clr_non_mr_pref_nids(lp);
2310         }
2311
2312         /*
2313          * Check for truncation of the Put message. Clear the
2314          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2315          * and tell discovery to allocate a bigger buffer.
2316          */
2317         if (ev->mlength < ev->rlength) {
2318                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2319                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2320                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2321                 lp->lp_state |= LNET_PEER_FORCE_PING;
2322                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2323                        libcfs_nid2str(lp->lp_primary_nid),
2324                        pbuf->pb_info.pi_nnis);
2325                 goto out;
2326         }
2327
2328         /* always assume new data */
2329         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2330         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2331
2332         /*
2333          * If there is data present that hasn't been processed yet,
2334          * we'll replace it if the Put contained newer data and it
2335          * fits. We're racing with a Ping or earlier Push in this
2336          * case.
2337          */
2338         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2339                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2340                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2341                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2342                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2343                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2344                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2345                               libcfs_nid2str(lp->lp_primary_nid),
2346                               LNET_PING_BUFFER_SEQNO(pbuf),
2347                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2348                 }
2349                 goto out;
2350         }
2351
2352         /*
2353          * Allocate a buffer to copy the data. On a failure we drop
2354          * the Push and set FORCE_PING to force the discovery
2355          * thread to fix the problem by pinging the peer.
2356          */
2357         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2358         if (!lp->lp_data) {
2359                 lp->lp_state |= LNET_PEER_FORCE_PING;
2360                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2361                        libcfs_nid2str(lp->lp_primary_nid),
2362                        LNET_PING_BUFFER_SEQNO(pbuf));
2363                 goto out;
2364         }
2365
2366         /* Success */
2367         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2368                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2369         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2370         CDEBUG(D_NET, "Received Push %s %u\n",
2371                libcfs_nid2str(lp->lp_primary_nid),
2372                LNET_PING_BUFFER_SEQNO(pbuf));
2373
2374 out:
2375         /* We've processed this buffer. It can be reposted */
2376         pbuf->pb_needs_post = true;
2377
2378         /*
2379          * Queue the peer for discovery if not done, force it on the request
2380          * queue and wake the discovery thread if the peer was already queued,
2381          * because its status changed.
2382          */
2383         spin_unlock(&lp->lp_lock);
2384         lnet_net_lock(LNET_LOCK_EX);
2385         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2386                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2387                 wake_up(&the_lnet.ln_dc_waitq);
2388         }
2389         /* Drop refcount from lookup */
2390         lnet_peer_decref_locked(lp);
2391         lnet_net_unlock(LNET_LOCK_EX);
2392 }
2393
2394 /*
2395  * Clear the discovery error state, unless we're already discovering
2396  * this peer, in which case the error is current.
2397  */
2398 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2399 {
2400         spin_lock(&lp->lp_lock);
2401         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2402                 lp->lp_dc_error = 0;
2403         spin_unlock(&lp->lp_lock);
2404 }
2405
2406 /*
2407  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2408  * dropped/retaken within this function. An lnet_peer_ni is passed in
2409  * because discovery could tear down an lnet_peer.
2410  */
2411 int
2412 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2413 {
2414         DEFINE_WAIT(wait);
2415         struct lnet_peer *lp;
2416         int rc = 0;
2417         int count = 0;
2418
2419 again:
2420         lnet_net_unlock(cpt);
2421         lnet_net_lock(LNET_LOCK_EX);
2422         lp = lpni->lpni_peer_net->lpn_peer;
2423         lnet_peer_clear_discovery_error(lp);
2424
2425         /*
2426          * We're willing to be interrupted. The lpni can become a
2427          * zombie if we race with DLC, so we must check for that.
2428          */
2429         for (;;) {
2430                 /* Keep lp alive when the lnet_net_lock is unlocked */
2431                 lnet_peer_addref_locked(lp);
2432                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2433                 if (signal_pending(current))
2434                         break;
2435                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2436                         break;
2437                 /*
2438                  * Don't repeat discovery if discovery is disabled. This is
2439                  * done to ensure we can use discovery as a standard ping as
2440                  * well for backwards compatibility with routers which do not
2441                  * have discovery or have discovery disabled
2442                  */
2443                 if (lnet_is_discovery_disabled(lp) && count > 0)
2444                         break;
2445                 if (lp->lp_dc_error)
2446                         break;
2447                 if (lnet_peer_is_uptodate(lp))
2448                         break;
2449                 lnet_peer_queue_for_discovery(lp);
2450                 count++;
2451                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2452
2453                 /*
2454                  * If caller requested a non-blocking operation then
2455                  * return immediately. Once discovery is complete any
2456                  * pending messages that were stopped due to discovery
2457                  * will be transmitted.
2458                  */
2459                 if (!block)
2460                         break;
2461
2462                 lnet_net_unlock(LNET_LOCK_EX);
2463                 schedule();
2464                 finish_wait(&lp->lp_dc_waitq, &wait);
2465                 lnet_net_lock(LNET_LOCK_EX);
2466                 lnet_peer_decref_locked(lp);
2467                 /* Peer may have changed */
2468                 lp = lpni->lpni_peer_net->lpn_peer;
2469         }
2470         finish_wait(&lp->lp_dc_waitq, &wait);
2471
2472         lnet_net_unlock(LNET_LOCK_EX);
2473         lnet_net_lock(cpt);
2474         lnet_peer_decref_locked(lp);
2475         /*
2476          * The peer may have changed, so re-check and rediscover if that turns
2477          * out to have been the case. The reference count on lp ensured that
2478          * even if it was unlinked from lpni the memory could not be recycled.
2479          * Thus the check below is sufficient to determine whether the peer
2480          * changed. If the peer changed, then lp must not be dereferenced.
2481          */
2482         if (lp != lpni->lpni_peer_net->lpn_peer)
2483                 goto again;
2484
2485         if (signal_pending(current))
2486                 rc = -EINTR;
2487         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2488                 rc = -ESHUTDOWN;
2489         else if (lp->lp_dc_error)
2490                 rc = lp->lp_dc_error;
2491         else if (!block)
2492                 CDEBUG(D_NET, "non-blocking discovery\n");
2493         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2494                 goto again;
2495
2496         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2497                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2498                libcfs_nid2str(lpni->lpni_nid), rc,
2499                (!block) ? "pending discovery" : "discovery complete");
2500
2501         return rc;
2502 }
2503
2504 /* Handle an incoming ack for a push. */
2505 static void
2506 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2507 {
2508         struct lnet_ping_buffer *pbuf;
2509
2510         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2511         spin_lock(&lp->lp_lock);
2512         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2513         lp->lp_push_error = ev->status;
2514         if (ev->status)
2515                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2516         else
2517                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2518         spin_unlock(&lp->lp_lock);
2519
2520         CDEBUG(D_NET, "peer %s ev->status %d\n",
2521                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2522 }
2523
2524 /* Handle a Reply message. This is the reply to a Ping message. */
2525 static void
2526 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2527 {
2528         struct lnet_ping_buffer *pbuf;
2529         int rc;
2530
2531         spin_lock(&lp->lp_lock);
2532
2533         lp->lp_disc_src_nid = ev->target.nid;
2534
2535         /*
2536          * If some kind of error happened the contents of message
2537          * cannot be used. Set PING_FAILED to trigger a retry.
2538          */
2539         if (ev->status) {
2540                 lp->lp_state |= LNET_PEER_PING_FAILED;
2541                 lp->lp_ping_error = ev->status;
2542                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2543                        ev->status,
2544                        libcfs_nid2str(lp->lp_primary_nid),
2545                        libcfs_nid2str(ev->source.nid));
2546                 goto out;
2547         }
2548
2549         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2550         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2551                 lnet_swap_pinginfo(pbuf);
2552
2553         /*
2554          * A reply with invalid or corrupted info. Set PING_FAILED to
2555          * trigger a retry.
2556          */
2557         rc = lnet_ping_info_validate(&pbuf->pb_info);
2558         if (rc) {
2559                 lp->lp_state |= LNET_PEER_PING_FAILED;
2560                 lp->lp_ping_error = 0;
2561                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2562                        libcfs_nid2str(lp->lp_primary_nid), rc);
2563                 goto out;
2564         }
2565
2566
2567         /*
2568          * The peer may have discovery disabled at its end. Set
2569          * NO_DISCOVERY as appropriate.
2570          */
2571         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2572             !lnet_peer_discovery_disabled) {
2573                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2574                        libcfs_nid2str(lp->lp_primary_nid));
2575                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2576         } else {
2577                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2578                        libcfs_nid2str(lp->lp_primary_nid));
2579                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2580         }
2581
2582         /*
2583          * Update the MULTI_RAIL flag based on the reply. If the peer
2584          * was configured with DLC then the setting should match what
2585          * DLC put in.
2586          */
2587         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2588                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2589                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2590                                libcfs_nid2str(lp->lp_primary_nid), lp);
2591                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2592                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2593                               libcfs_nid2str(lp->lp_primary_nid));
2594                 } else if (lnet_peer_discovery_disabled) {
2595                         CDEBUG(D_NET,
2596                                "peer %s(%p) not MR: DD disabled locally\n",
2597                                libcfs_nid2str(lp->lp_primary_nid), lp);
2598                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2599                         CDEBUG(D_NET,
2600                                "peer %s(%p) not MR: DD disabled remotely\n",
2601                                libcfs_nid2str(lp->lp_primary_nid), lp);
2602                 } else {
2603                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2604                                libcfs_nid2str(lp->lp_primary_nid), lp);
2605                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2606                         lnet_peer_clr_non_mr_pref_nids(lp);
2607                 }
2608         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2609                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2610                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2611                               libcfs_nid2str(lp->lp_primary_nid));
2612                 } else {
2613                         CERROR("Multi-Rail state vanished from %s\n",
2614                                libcfs_nid2str(lp->lp_primary_nid));
2615                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2616                 }
2617         }
2618
2619         /*
2620          * Make sure we'll allocate the correct size ping buffer when
2621          * pinging the peer.
2622          */
2623         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2624                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2625
2626         /*
2627          * Check for truncation of the Reply. Clear PING_SENT and set
2628          * PING_FAILED to trigger a retry.
2629          */
2630         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2631                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2632                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2633                 lp->lp_state |= LNET_PEER_PING_FAILED;
2634                 lp->lp_ping_error = 0;
2635                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2636                        libcfs_nid2str(lp->lp_primary_nid),
2637                        pbuf->pb_info.pi_nnis);
2638                 goto out;
2639         }
2640
2641         /*
2642          * Check the sequence numbers in the reply. These are only
2643          * available if the reply came from a Multi-Rail peer.
2644          */
2645         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2646             pbuf->pb_info.pi_nnis > 1 &&
2647             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2648                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2649                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2650                                 libcfs_nid2str(lp->lp_primary_nid),
2651                                 LNET_PING_BUFFER_SEQNO(pbuf),
2652                                 lp->lp_peer_seqno);
2653
2654                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2655         }
2656
2657         /* We're happy with the state of the data in the buffer. */
2658         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2659                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2660         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2661                 lnet_ping_buffer_decref(lp->lp_data);
2662         else
2663                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2664         lnet_ping_buffer_addref(pbuf);
2665         lp->lp_data = pbuf;
2666 out:
2667         lp->lp_state &= ~LNET_PEER_PING_SENT;
2668         spin_unlock(&lp->lp_lock);
2669
2670         lnet_net_lock(LNET_LOCK_EX);
2671         /*
2672          * If this peer is a gateway, call the routing callback to
2673          * handle the ping reply
2674          */
2675         if (lp->lp_rtr_refcount > 0)
2676                 lnet_router_discovery_ping_reply(lp);
2677         lnet_net_unlock(LNET_LOCK_EX);
2678 }
2679
2680 /*
2681  * Send event handling. Only matters for error cases, where we clean
2682  * up state on the peer and peer_ni that would otherwise be updated in
2683  * the REPLY event handler for a successful Ping, and the ACK event
2684  * handler for a successful Push.
2685  */
2686 static int
2687 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2688 {
2689         int rc = 0;
2690
2691         if (!ev->status)
2692                 goto out;
2693
2694         spin_lock(&lp->lp_lock);
2695         if (ev->msg_type == LNET_MSG_GET) {
2696                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2697                 lp->lp_state |= LNET_PEER_PING_FAILED;
2698                 lp->lp_ping_error = ev->status;
2699         } else { /* ev->msg_type == LNET_MSG_PUT */
2700                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2701                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2702                 lp->lp_push_error = ev->status;
2703         }
2704         spin_unlock(&lp->lp_lock);
2705         rc = LNET_REDISCOVER_PEER;
2706 out:
2707         CDEBUG(D_NET, "%s Send to %s: %d\n",
2708                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2709                 libcfs_nid2str(ev->target.nid), rc);
2710         return rc;
2711 }
2712
2713 /*
2714  * Unlink event handling. This event is only seen if a call to
2715  * LNetMDUnlink() caused the event to be unlinked. If this call was
2716  * made after the event was set up in LNetGet() or LNetPut() then we
2717  * assume the Ping or Push timed out.
2718  */
2719 static void
2720 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2721 {
2722         spin_lock(&lp->lp_lock);
2723         /* We've passed through LNetGet() */
2724         if (lp->lp_state & LNET_PEER_PING_SENT) {
2725                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2726                 lp->lp_state |= LNET_PEER_PING_FAILED;
2727                 lp->lp_ping_error = -ETIMEDOUT;
2728                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2729                         libcfs_nid2str(lp->lp_primary_nid));
2730         }
2731         /* We've passed through LNetPut() */
2732         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2733                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2734                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2735                 lp->lp_push_error = -ETIMEDOUT;
2736                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2737                         libcfs_nid2str(lp->lp_primary_nid));
2738         }
2739         spin_unlock(&lp->lp_lock);
2740 }
2741
2742 /*
2743  * Event handler for the discovery EQ.
2744  *
2745  * Called with lnet_res_lock(cpt) held. The cpt is the
2746  * lnet_cpt_of_cookie() of the md handle cookie.
2747  */
2748 static void lnet_discovery_event_handler(struct lnet_event *event)
2749 {
2750         struct lnet_peer *lp = event->md_user_ptr;
2751         struct lnet_ping_buffer *pbuf;
2752         int rc;
2753
2754         /* discovery needs to take another look */
2755         rc = LNET_REDISCOVER_PEER;
2756
2757         CDEBUG(D_NET, "Received event: %d\n", event->type);
2758
2759         switch (event->type) {
2760         case LNET_EVENT_ACK:
2761                 lnet_discovery_event_ack(lp, event);
2762                 break;
2763         case LNET_EVENT_REPLY:
2764                 lnet_discovery_event_reply(lp, event);
2765                 break;
2766         case LNET_EVENT_SEND:
2767                 /* Only send failure triggers a retry. */
2768                 rc = lnet_discovery_event_send(lp, event);
2769                 break;
2770         case LNET_EVENT_UNLINK:
2771                 /* LNetMDUnlink() was called */
2772                 lnet_discovery_event_unlink(lp, event);
2773                 break;
2774         default:
2775                 /* Invalid events. */
2776                 LBUG();
2777         }
2778         lnet_net_lock(LNET_LOCK_EX);
2779         if (event->unlinked) {
2780                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2781                 lnet_ping_buffer_decref(pbuf);
2782                 lnet_peer_decref_locked(lp);
2783         }
2784
2785         /* put peer back at end of request queue, if discovery not already
2786          * done */
2787         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2788                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2789                 wake_up(&the_lnet.ln_dc_waitq);
2790         }
2791         lnet_net_unlock(LNET_LOCK_EX);
2792 }
2793
2794 /*
2795  * Build a peer from incoming data.
2796  *
2797  * The NIDs in the incoming data are supposed to be structured as follows:
2798  *  - loopback
2799  *  - primary NID
2800  *  - other NIDs in same net
2801  *  - NIDs in second net
2802  *  - NIDs in third net
2803  *  - ...
2804  * This due to the way the list of NIDs in the data is created.
2805  *
2806  * Note that this function will mark the peer uptodate unless an
2807  * ENOMEM is encontered. All other errors are due to a conflict
2808  * between the DLC configuration and what discovery sees. We treat DLC
2809  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2810  * peer from becoming stuck in discovery.
2811  */
2812 static int lnet_peer_merge_data(struct lnet_peer *lp,
2813                                 struct lnet_ping_buffer *pbuf)
2814 {
2815         struct lnet_peer_net *lpn;
2816         struct lnet_peer_ni *lpni;
2817         lnet_nid_t *curnis = NULL;
2818         struct lnet_ni_status *addnis = NULL;
2819         lnet_nid_t *delnis = NULL;
2820         unsigned flags;
2821         int ncurnis;
2822         int naddnis;
2823         int ndelnis;
2824         int nnis = 0;
2825         int i;
2826         int j;
2827         int rc;
2828
2829         flags = LNET_PEER_DISCOVERED;
2830         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2831                 flags |= LNET_PEER_MULTI_RAIL;
2832
2833         /*
2834          * Cache the routing feature for the peer; whether it is enabled
2835          * for disabled as reported by the remote peer.
2836          */
2837         spin_lock(&lp->lp_lock);
2838         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2839                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2840         else
2841                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2842         spin_unlock(&lp->lp_lock);
2843
2844         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2845         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2846         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2847         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2848         if (!curnis || !addnis || !delnis) {
2849                 rc = -ENOMEM;
2850                 goto out;
2851         }
2852         ncurnis = 0;
2853         naddnis = 0;
2854         ndelnis = 0;
2855
2856         /* Construct the list of NIDs present in peer. */
2857         lpni = NULL;
2858         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2859                 curnis[ncurnis++] = lpni->lpni_nid;
2860
2861         /*
2862          * Check for NIDs in pbuf not present in curnis[].
2863          * The loop starts at 1 to skip the loopback NID.
2864          */
2865         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2866                 for (j = 0; j < ncurnis; j++)
2867                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2868                                 break;
2869                 if (j == ncurnis)
2870                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2871         }
2872         /*
2873          * Check for NIDs in curnis[] not present in pbuf.
2874          * The nested loop starts at 1 to skip the loopback NID.
2875          *
2876          * But never add the loopback NID to delnis[]: if it is
2877          * present in curnis[] then this peer is for this node.
2878          */
2879         for (i = 0; i < ncurnis; i++) {
2880                 if (curnis[i] == LNET_NID_LO_0)
2881                         continue;
2882                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2883                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2884                                 /*
2885                                  * update the information we cache for the
2886                                  * peer with the latest information we
2887                                  * received
2888                                  */
2889                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
2890                                 if (lpni) {
2891                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2892                                         lnet_peer_ni_decref_locked(lpni);
2893                                 }
2894                                 break;
2895                         }
2896                 }
2897                 if (j == pbuf->pb_info.pi_nnis)
2898                         delnis[ndelnis++] = curnis[i];
2899         }
2900
2901         /*
2902          * If we get here and the discovery is disabled then we don't want
2903          * to add or delete any NIs. We just updated the ones we have some
2904          * information on, and call it a day
2905          */
2906         rc = 0;
2907         if (lnet_is_discovery_disabled(lp))
2908                 goto out;
2909
2910         for (i = 0; i < naddnis; i++) {
2911                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2912                 if (rc) {
2913                         CERROR("Error adding NID %s to peer %s: %d\n",
2914                                libcfs_nid2str(addnis[i].ns_nid),
2915                                libcfs_nid2str(lp->lp_primary_nid), rc);
2916                         if (rc == -ENOMEM)
2917                                 goto out;
2918                 }
2919                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2920                 if (lpni) {
2921                         lpni->lpni_ns_status = addnis[i].ns_status;
2922                         lnet_peer_ni_decref_locked(lpni);
2923                 }
2924         }
2925
2926         for (i = 0; i < ndelnis; i++) {
2927                 /*
2928                  * for routers it's okay to delete the primary_nid because
2929                  * the upper layers don't really rely on it. So if we're
2930                  * being told that the router changed its primary_nid
2931                  * then it's okay to delete it.
2932                  */
2933                 if (lp->lp_rtr_refcount > 0)
2934                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2935                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2936                 if (rc) {
2937                         CERROR("Error deleting NID %s from peer %s: %d\n",
2938                                libcfs_nid2str(delnis[i]),
2939                                libcfs_nid2str(lp->lp_primary_nid), rc);
2940                         if (rc == -ENOMEM)
2941                                 goto out;
2942                 }
2943         }
2944
2945         /* The peer net for the primary NID should be the first entry in the
2946          * peer's lp_peer_nets list, and the peer NI for the primary NID should
2947          * be the first entry in its peer net's lpn_peer_nis list.
2948          */
2949         lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
2950         if (!lpni) {
2951                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
2952                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
2953                 goto out;
2954         }
2955
2956         lnet_peer_ni_decref_locked(lpni);
2957
2958         lpn = lpni->lpni_peer_net;
2959         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
2960                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
2961
2962         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
2963                 list_move(&lpni->lpni_peer_nis,
2964                           &lpni->lpni_peer_net->lpn_peer_nis);
2965
2966         /*
2967          * Errors other than -ENOMEM are due to peers having been
2968          * configured with DLC. Ignore these because DLC overrides
2969          * Discovery.
2970          */
2971         rc = 0;
2972 out:
2973         CFS_FREE_PTR_ARRAY(curnis, nnis);
2974         CFS_FREE_PTR_ARRAY(addnis, nnis);
2975         CFS_FREE_PTR_ARRAY(delnis, nnis);
2976         lnet_ping_buffer_decref(pbuf);
2977         CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2978
2979         if (rc) {
2980                 spin_lock(&lp->lp_lock);
2981                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2982                 lp->lp_state |= LNET_PEER_FORCE_PING;
2983                 spin_unlock(&lp->lp_lock);
2984         }
2985         return rc;
2986 }
2987
2988 /*
2989  * The data in pbuf says lp is its primary peer, but the data was
2990  * received by a different peer. Try to update lp with the data.
2991  */
2992 static int
2993 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2994 {
2995         struct lnet_handle_md mdh;
2996
2997         /* Queue lp for discovery, and force it on the request queue. */
2998         lnet_net_lock(LNET_LOCK_EX);
2999         if (lnet_peer_queue_for_discovery(lp))
3000                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3001         lnet_net_unlock(LNET_LOCK_EX);
3002
3003         LNetInvalidateMDHandle(&mdh);
3004
3005         /*
3006          * Decide whether we can move the peer to the DATA_PRESENT state.
3007          *
3008          * We replace stale data for a multi-rail peer, repair PING_FAILED
3009          * status, and preempt FORCE_PING.
3010          *
3011          * If after that we have DATA_PRESENT, we merge it into this peer.
3012          */
3013         spin_lock(&lp->lp_lock);
3014         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3015                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3016                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3017                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3018                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3019                         lnet_ping_buffer_decref(pbuf);
3020                         pbuf = lp->lp_data;
3021                         lp->lp_data = NULL;
3022                 }
3023         }
3024         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3025                 lnet_ping_buffer_decref(lp->lp_data);
3026                 lp->lp_data = NULL;
3027                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3028         }
3029         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3030                 mdh = lp->lp_ping_mdh;
3031                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3032                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3033                 lp->lp_ping_error = 0;
3034         }
3035         if (lp->lp_state & LNET_PEER_FORCE_PING)
3036                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3037         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3038         spin_unlock(&lp->lp_lock);
3039
3040         if (!LNetMDHandleIsInvalid(mdh))
3041                 LNetMDUnlink(mdh);
3042
3043         if (pbuf)
3044                 return lnet_peer_merge_data(lp, pbuf);
3045
3046         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3047         return 0;
3048 }
3049
3050 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3051 {
3052         int i;
3053
3054         for (i = 0; i < pinfo->pi_nnis; i++) {
3055                 if (pinfo->pi_ni[i].ns_nid == nid)
3056                         return true;
3057         }
3058
3059         return false;
3060 }
3061
3062 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3063  * to the discovery queue a reference was taken that will prevent the peer from
3064  * actually being freed by this function. After this function exits the
3065  * discovery thread should call lnet_peer_discovery_complete() which will
3066  * drop that reference as well as wake any waiters that may also be holding a
3067  * ref on the peer
3068  */
3069 static int lnet_peer_deletion(struct lnet_peer *lp)
3070 __must_hold(&lp->lp_lock)
3071 {
3072         struct list_head rlist;
3073         struct lnet_route *route, *tmp;
3074         int sensitivity = lp->lp_health_sensitivity;
3075
3076         INIT_LIST_HEAD(&rlist);
3077
3078         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3079                           LNET_PEER_FORCE_PUSH);
3080         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3081                libcfs_nid2str(lp->lp_primary_nid), lp, lp->lp_state);
3082
3083         /* no-op if lnet_peer_del() has already been called on this peer */
3084         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3085                 return 0;
3086
3087         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3088                 return -ESHUTDOWN;
3089
3090         spin_unlock(&lp->lp_lock);
3091
3092         mutex_lock(&the_lnet.ln_api_mutex);
3093
3094         lnet_net_lock(LNET_LOCK_EX);
3095         /* remove the peer from the discovery work
3096          * queue if it's on there in preparation
3097          * of deleting it.
3098          */
3099         if (!list_empty(&lp->lp_dc_list))
3100                 list_del(&lp->lp_dc_list);
3101         list_for_each_entry_safe(route, tmp,
3102                                  &lp->lp_routes,
3103                                  lr_gwlist)
3104                 lnet_move_route(route, NULL, &rlist);
3105         lnet_net_unlock(LNET_LOCK_EX);
3106
3107         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3108         lnet_peer_del(lp);
3109
3110         list_for_each_entry_safe(route, tmp,
3111                                  &rlist, lr_list) {
3112                 /* re-add these routes */
3113                 lnet_add_route(route->lr_net,
3114                                route->lr_hops,
3115                                route->lr_nid,
3116                                route->lr_priority,
3117                                sensitivity);
3118                 LIBCFS_FREE(route, sizeof(*route));
3119         }
3120
3121         mutex_unlock(&the_lnet.ln_api_mutex);
3122
3123         spin_lock(&lp->lp_lock);
3124
3125         return 0;
3126 }
3127
3128 /*
3129  * Update a peer using the data received.
3130  */
3131 static int lnet_peer_data_present(struct lnet_peer *lp)
3132 __must_hold(&lp->lp_lock)
3133 {
3134         struct lnet_ping_buffer *pbuf;
3135         struct lnet_peer_ni *lpni;
3136         lnet_nid_t nid = LNET_NID_ANY;
3137         unsigned flags;
3138         int rc = 0;
3139
3140         pbuf = lp->lp_data;
3141         lp->lp_data = NULL;
3142         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3143         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3144         spin_unlock(&lp->lp_lock);
3145
3146         /*
3147          * Modifications of peer structures are done while holding the
3148          * ln_api_mutex. A global lock is required because we may be
3149          * modifying multiple peer structures, and a mutex greatly
3150          * simplifies memory management.
3151          *
3152          * The actual changes to the data structures must also protect
3153          * against concurrent lookups, for which the lnet_net_lock in
3154          * LNET_LOCK_EX mode is used.
3155          */
3156         mutex_lock(&the_lnet.ln_api_mutex);
3157         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3158                 rc = -ESHUTDOWN;
3159                 goto out;
3160         }
3161
3162         /*
3163          * If this peer is not on the peer list then it is being torn
3164          * down, and our reference count may be all that is keeping it
3165          * alive. Don't do any work on it.
3166          */
3167         if (list_empty(&lp->lp_peer_list))
3168                 goto out;
3169
3170         flags = LNET_PEER_DISCOVERED;
3171         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3172                 flags |= LNET_PEER_MULTI_RAIL;
3173
3174         /*
3175          * Check whether the primary NID in the message matches the
3176          * primary NID of the peer. If it does, update the peer, if
3177          * it it does not, check whether there is already a peer with
3178          * that primary NID. If no such peer exists, try to update
3179          * the primary NID of the current peer (allowed if it was
3180          * created due to message traffic) and complete the update.
3181          * If the peer did exist, hand off the data to it.
3182          *
3183          * The peer for the loopback interface is a special case: this
3184          * is the peer for the local node, and we want to set its
3185          * primary NID to the correct value here. Moreover, this peer
3186          * can show up with only the loopback NID in the ping buffer.
3187          */
3188         if (pbuf->pb_info.pi_nnis <= 1)
3189                 goto out;
3190         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3191         if (lp->lp_primary_nid == LNET_NID_LO_0) {
3192                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3193                 if (!rc)
3194                         rc = lnet_peer_merge_data(lp, pbuf);
3195         /*
3196          * if the primary nid of the peer is present in the ping info returned
3197          * from the peer, but it's not the local primary peer we have
3198          * cached and discovery is disabled, then we don't want to update
3199          * our local peer info, by adding or removing NIDs, we just want
3200          * to update the status of the nids that we currently have
3201          * recorded in that peer.
3202          */
3203         } else if (lp->lp_primary_nid == nid ||
3204                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3205                     lnet_is_discovery_disabled(lp))) {
3206                 rc = lnet_peer_merge_data(lp, pbuf);
3207         } else {
3208                 lpni = lnet_find_peer_ni_locked(nid);
3209                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3210                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3211                         if (rc) {
3212                                 CERROR("Primary NID error %s versus %s: %d\n",
3213                                        libcfs_nid2str(lp->lp_primary_nid),
3214                                        libcfs_nid2str(nid), rc);
3215                         } else {
3216                                 rc = lnet_peer_merge_data(lp, pbuf);
3217                         }
3218                         if (lpni)
3219                                 lnet_peer_ni_decref_locked(lpni);
3220                 } else {
3221                         struct lnet_peer *new_lp;
3222                         new_lp = lpni->lpni_peer_net->lpn_peer;
3223                         /*
3224                          * if lp has discovery/MR enabled that means new_lp
3225                          * should have discovery/MR enabled as well, since
3226                          * it's the same peer, which we're about to merge
3227                          */
3228                         spin_lock(&lp->lp_lock);
3229                         spin_lock(&new_lp->lp_lock);
3230                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3231                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3232                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3233                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3234                         /* If we're processing a ping reply then we may be
3235                          * about to send a push to the peer that we ping'd.
3236                          * Since the ping reply that we're processing was
3237                          * received by lp, we need to set the discovery source
3238                          * NID for new_lp to the NID stored in lp.
3239                          */
3240                         if (lp->lp_disc_src_nid != LNET_NID_ANY)
3241                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3242                         spin_unlock(&new_lp->lp_lock);
3243                         spin_unlock(&lp->lp_lock);
3244
3245                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3246                         lnet_consolidate_routes_locked(lp, new_lp);
3247                         lnet_peer_ni_decref_locked(lpni);
3248                 }
3249         }
3250 out:
3251         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3252                lp->lp_state);
3253         mutex_unlock(&the_lnet.ln_api_mutex);
3254
3255         spin_lock(&lp->lp_lock);
3256         /* Tell discovery to re-check the peer immediately. */
3257         if (!rc)
3258                 rc = LNET_REDISCOVER_PEER;
3259         return rc;
3260 }
3261
3262 /*
3263  * A ping failed. Clear the PING_FAILED state and set the
3264  * FORCE_PING state, to ensure a retry even if discovery is
3265  * disabled. This avoids being left with incorrect state.
3266  */
3267 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3268 __must_hold(&lp->lp_lock)
3269 {
3270         struct lnet_handle_md mdh;
3271         int rc;
3272
3273         mdh = lp->lp_ping_mdh;
3274         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3275         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3276         lp->lp_state |= LNET_PEER_FORCE_PING;
3277         rc = lp->lp_ping_error;
3278         lp->lp_ping_error = 0;
3279         spin_unlock(&lp->lp_lock);
3280
3281         if (!LNetMDHandleIsInvalid(mdh))
3282                 LNetMDUnlink(mdh);
3283
3284         CDEBUG(D_NET, "peer %s:%d\n",
3285                libcfs_nid2str(lp->lp_primary_nid), rc);
3286
3287         spin_lock(&lp->lp_lock);
3288         return rc ? rc : LNET_REDISCOVER_PEER;
3289 }
3290
3291 /*
3292  * Select NID to send a Ping or Push to.
3293  */
3294 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
3295 {
3296         struct lnet_peer_ni *lpni;
3297
3298         /* Look for a direct-connected NID for this peer. */
3299         lpni = NULL;
3300         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3301                 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
3302                         continue;
3303                 break;
3304         }
3305         if (lpni)
3306                 return lpni->lpni_nid;
3307
3308         /* Look for a routed-connected NID for this peer. */
3309         lpni = NULL;
3310         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3311                 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
3312                         continue;
3313                 break;
3314         }
3315         if (lpni)
3316                 return lpni->lpni_nid;
3317
3318         return LNET_NID_ANY;
3319 }
3320
3321 /* Active side of ping. */
3322 static int lnet_peer_send_ping(struct lnet_peer *lp)
3323 __must_hold(&lp->lp_lock)
3324 {
3325         lnet_nid_t pnid;
3326         int nnis;
3327         int rc;
3328         int cpt;
3329
3330         lp->lp_state |= LNET_PEER_PING_SENT;
3331         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3332         spin_unlock(&lp->lp_lock);
3333
3334         cpt = lnet_net_lock_current();
3335         /* Refcount for MD. */
3336         lnet_peer_addref_locked(lp);
3337         pnid = lnet_peer_select_nid(lp);
3338         lnet_net_unlock(cpt);
3339
3340         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3341
3342         rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
3343                             the_lnet.ln_dc_handler, false);
3344
3345         /*
3346          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3347          * refcount on the peer, otherwise LNetMDUnlink will be called
3348          * which will eventually do that.
3349          */
3350         if (rc > 0) {
3351                 lnet_net_lock(cpt);
3352                 lnet_peer_decref_locked(lp);
3353                 lnet_net_unlock(cpt);
3354                 rc = -rc; /* change the rc to negative value */
3355                 goto fail_error;
3356         } else if (rc < 0) {
3357                 goto fail_error;
3358         }
3359
3360         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3361
3362         spin_lock(&lp->lp_lock);
3363         return 0;
3364
3365 fail_error:
3366         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3367         /*
3368          * The errors that get us here are considered hard errors and
3369          * cause Discovery to terminate. So we clear PING_SENT, but do
3370          * not set either PING_FAILED or FORCE_PING. In fact we need
3371          * to clear PING_FAILED, because the unlink event handler will
3372          * have set it if we called LNetMDUnlink() above.
3373          */
3374         spin_lock(&lp->lp_lock);
3375         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3376         return rc;
3377 }
3378
3379 /*
3380  * This function exists because you cannot call LNetMDUnlink() from an
3381  * event handler.
3382  */
3383 static int lnet_peer_push_failed(struct lnet_peer *lp)
3384 __must_hold(&lp->lp_lock)
3385 {
3386         struct lnet_handle_md mdh;
3387         int rc;
3388
3389         mdh = lp->lp_push_mdh;
3390         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3391         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3392         rc = lp->lp_push_error;
3393         lp->lp_push_error = 0;
3394         spin_unlock(&lp->lp_lock);
3395
3396         if (!LNetMDHandleIsInvalid(mdh))
3397                 LNetMDUnlink(mdh);
3398
3399         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3400         spin_lock(&lp->lp_lock);
3401         return rc ? rc : LNET_REDISCOVER_PEER;
3402 }
3403
3404 /*
3405  * Mark the peer as discovered.
3406  */
3407 static int lnet_peer_discovered(struct lnet_peer *lp)
3408 __must_hold(&lp->lp_lock)
3409 {
3410         lp->lp_state |= LNET_PEER_DISCOVERED;
3411         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3412                           LNET_PEER_REDISCOVER);
3413
3414         lp->lp_dc_error = 0;
3415
3416         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3417
3418         return 0;
3419 }
3420
3421 /* Active side of push. */
3422 static int lnet_peer_send_push(struct lnet_peer *lp)
3423 __must_hold(&lp->lp_lock)
3424 {
3425         struct lnet_ping_buffer *pbuf;
3426         struct lnet_process_id id;
3427         struct lnet_md md;
3428         int cpt;
3429         int rc;
3430
3431         /* Don't push to a non-multi-rail peer. */
3432         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3433                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3434                 /* if peer's NIDs are uptodate then peer is discovered */
3435                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3436                         rc = lnet_peer_discovered(lp);
3437                         return rc;
3438                 }
3439
3440                 return 0;
3441         }
3442
3443         lp->lp_state |= LNET_PEER_PUSH_SENT;
3444         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3445         spin_unlock(&lp->lp_lock);
3446
3447         cpt = lnet_net_lock_current();
3448         pbuf = the_lnet.ln_ping_target;
3449         lnet_ping_buffer_addref(pbuf);
3450         lnet_net_unlock(cpt);
3451
3452         /* Push source MD */
3453         md.start     = &pbuf->pb_info;
3454         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3455         md.threshold = 2; /* Put/Ack */
3456         md.max_size  = 0;
3457         md.options   = LNET_MD_TRACK_RESPONSE;
3458         md.handler   = the_lnet.ln_dc_handler;
3459         md.user_ptr  = lp;
3460
3461         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3462         if (rc) {
3463                 lnet_ping_buffer_decref(pbuf);
3464                 CERROR("Can't bind push source MD: %d\n", rc);
3465                 goto fail_error;
3466         }
3467         cpt = lnet_net_lock_current();
3468         /* Refcount for MD. */
3469         lnet_peer_addref_locked(lp);
3470         id.pid = LNET_PID_LUSTRE;
3471         id.nid = lnet_peer_select_nid(lp);
3472         lnet_net_unlock(cpt);
3473
3474         if (id.nid == LNET_NID_ANY) {
3475                 rc = -EHOSTUNREACH;
3476                 goto fail_unlink;
3477         }
3478
3479         rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3480                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3481                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3482
3483         /*
3484          * reset the discovery nid. There is no need to restrict sending
3485          * from that source, if we call lnet_push_update_to_peers(). It'll
3486          * get set to a specific NID, if we initiate discovery from the
3487          * scratch
3488          */
3489         lp->lp_disc_src_nid = LNET_NID_ANY;
3490
3491         if (rc)
3492                 goto fail_unlink;
3493
3494         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3495
3496         spin_lock(&lp->lp_lock);
3497         return 0;
3498
3499 fail_unlink:
3500         LNetMDUnlink(lp->lp_push_mdh);
3501         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3502 fail_error:
3503         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3504         /*
3505          * The errors that get us here are considered hard errors and
3506          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3507          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3508          * because the unlink event handler will have set it if we
3509          * called LNetMDUnlink() above.
3510          */
3511         spin_lock(&lp->lp_lock);
3512         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3513         return rc;
3514 }
3515
3516 /*
3517  * An unrecoverable error was encountered during discovery.
3518  * Set error status in peer and abort discovery.
3519  */
3520 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3521 {
3522         CDEBUG(D_NET, "Discovery error %s: %d\n",
3523                libcfs_nid2str(lp->lp_primary_nid), error);
3524
3525         spin_lock(&lp->lp_lock);
3526         lp->lp_dc_error = error;
3527         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3528         lp->lp_state |= LNET_PEER_REDISCOVER;
3529         spin_unlock(&lp->lp_lock);
3530 }
3531
3532 /*
3533  * Wait for work to be queued or some other change that must be
3534  * attended to. Returns non-zero if the discovery thread should shut
3535  * down.
3536  */
3537 static int lnet_peer_discovery_wait_for_work(void)
3538 {
3539         int cpt;
3540         int rc = 0;
3541
3542         DEFINE_WAIT(wait);
3543
3544         cpt = lnet_net_lock_current();
3545         for (;;) {
3546                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3547                                 TASK_INTERRUPTIBLE);
3548                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3549                         break;
3550                 if (lnet_push_target_resize_needed() ||
3551                     the_lnet.ln_push_target->pb_needs_post)
3552                         break;
3553                 if (!list_empty(&the_lnet.ln_dc_request))
3554                         break;
3555                 if (!list_empty(&the_lnet.ln_msg_resend))
3556                         break;
3557                 lnet_net_unlock(cpt);
3558
3559                 /*
3560                  * wakeup max every second to check if there are peers that
3561                  * have been stuck on the working queue for greater than
3562                  * the peer timeout.
3563                  */
3564                 schedule_timeout(cfs_time_seconds(1));
3565                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3566                 cpt = lnet_net_lock_current();
3567         }
3568         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3569
3570         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3571                 rc = -ESHUTDOWN;
3572
3573         lnet_net_unlock(cpt);
3574
3575         CDEBUG(D_NET, "woken: %d\n", rc);
3576
3577         return rc;
3578 }
3579
3580 /*
3581  * Messages that were pending on a destroyed peer will be put on a global
3582  * resend list. The message resend list will be checked by
3583  * the discovery thread when it wakes up, and will resend messages. These
3584  * messages can still be sendable in the case the lpni which was the initial
3585  * cause of the message re-queue was transfered to another peer.
3586  *
3587  * It is possible that LNet could be shutdown while we're iterating
3588  * through the list. lnet_shudown_lndnets() will attempt to access the
3589  * resend list, but will have to wait until the spinlock is released, by
3590  * which time there shouldn't be any more messages on the resend list.
3591  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3592  * for the messages so they can be released. The other case is that
3593  * lnet_shudown_lndnets() can finalize all the messages before this
3594  * function can visit the resend list, in which case this function will be
3595  * a no-op.
3596  */
3597 static void lnet_resend_msgs(void)
3598 {
3599         struct lnet_msg *msg, *tmp;
3600         LIST_HEAD(resend);
3601         int rc;
3602
3603         spin_lock(&the_lnet.ln_msg_resend_lock);
3604         list_splice(&the_lnet.ln_msg_resend, &resend);
3605         spin_unlock(&the_lnet.ln_msg_resend_lock);
3606
3607         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3608                 list_del_init(&msg->msg_list);
3609                 rc = lnet_send(msg->msg_src_nid_param, msg,
3610                                msg->msg_rtr_nid_param);
3611                 if (rc < 0) {
3612                         CNETERR("Error sending %s to %s: %d\n",
3613                                lnet_msgtyp2str(msg->msg_type),
3614                                libcfs_id2str(msg->msg_target), rc);
3615                         lnet_finalize(msg, rc);
3616                 }
3617         }
3618 }
3619
3620 /* The discovery thread. */
3621 static int lnet_peer_discovery(void *arg)
3622 {
3623         struct lnet_peer *lp;
3624         int rc;
3625
3626         wait_for_completion(&the_lnet.ln_started);
3627
3628         CDEBUG(D_NET, "started\n");
3629
3630         for (;;) {
3631                 if (lnet_peer_discovery_wait_for_work())
3632                         break;
3633
3634                 if (lnet_push_target_resize_needed())
3635                         lnet_push_target_resize();
3636                 else if (the_lnet.ln_push_target->pb_needs_post)
3637                         lnet_push_target_post(the_lnet.ln_push_target,
3638                                               &the_lnet.ln_push_target_md);
3639
3640                 lnet_resend_msgs();
3641
3642                 lnet_net_lock(LNET_LOCK_EX);
3643                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3644                         lnet_net_unlock(LNET_LOCK_EX);
3645                         break;
3646                 }
3647
3648                 /*
3649                  * Process all incoming discovery work requests.  When
3650                  * discovery must wait on a peer to change state, it
3651                  * is added to the tail of the ln_dc_working queue. A
3652                  * timestamp keeps track of when the peer was added,
3653                  * so we can time out discovery requests that take too
3654                  * long.
3655                  */
3656                 while (!list_empty(&the_lnet.ln_dc_request)) {
3657                         lp = list_first_entry(&the_lnet.ln_dc_request,
3658                                               struct lnet_peer, lp_dc_list);
3659                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3660                         /*
3661                          * set the time the peer was put on the dc_working
3662                          * queue. It shouldn't remain on the queue
3663                          * forever, in case the GET message (for ping)
3664                          * doesn't get a REPLY or the PUT message (for
3665                          * push) doesn't get an ACK.
3666                          */
3667                         lp->lp_last_queued = ktime_get_real_seconds();
3668                         lnet_net_unlock(LNET_LOCK_EX);
3669
3670                         if (lnet_push_target_resize_needed())
3671                                 lnet_push_target_resize();
3672                         else if (the_lnet.ln_push_target->pb_needs_post)
3673                                 lnet_push_target_post(the_lnet.ln_push_target,
3674                                                       &the_lnet.ln_push_target_md);
3675
3676                         /*
3677                          * Select an action depending on the state of
3678                          * the peer and whether discovery is disabled.
3679                          * The check whether discovery is disabled is
3680                          * done after the code that handles processing
3681                          * for arrived data, cleanup for failures, and
3682                          * forcing a Ping or Push.
3683                          */
3684                         spin_lock(&lp->lp_lock);
3685                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3686                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3687                                 lp->lp_state);
3688                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3689                                             LNET_PEER_MARK_DELETED))
3690                                 rc = lnet_peer_deletion(lp);
3691                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3692                                 rc = lnet_peer_data_present(lp);
3693                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3694                                 rc = lnet_peer_ping_failed(lp);
3695                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3696                                 rc = lnet_peer_push_failed(lp);
3697                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3698                                 rc = lnet_peer_send_ping(lp);
3699                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3700                                 rc = lnet_peer_send_push(lp);
3701                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3702                                 rc = lnet_peer_send_ping(lp);
3703                         else if (lnet_peer_needs_push(lp))
3704                                 rc = lnet_peer_send_push(lp);
3705                         else
3706                                 rc = lnet_peer_discovered(lp);
3707                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3708                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3709                                 lp->lp_state, rc);
3710                         spin_unlock(&lp->lp_lock);
3711
3712                         lnet_net_lock(LNET_LOCK_EX);
3713                         if (rc == LNET_REDISCOVER_PEER) {
3714                                 list_move(&lp->lp_dc_list,
3715                                           &the_lnet.ln_dc_request);
3716                         } else if (rc) {
3717                                 lnet_peer_discovery_error(lp, rc);
3718                         }
3719                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3720                                 lnet_peer_discovery_complete(lp);
3721                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3722                                 break;
3723
3724                 }
3725
3726                 lnet_net_unlock(LNET_LOCK_EX);
3727         }
3728
3729         CDEBUG(D_NET, "stopping\n");
3730         /*
3731          * Clean up before telling lnet_peer_discovery_stop() that
3732          * we're done. Use wake_up() below to somewhat reduce the
3733          * size of the thundering herd if there are multiple threads
3734          * waiting on discovery of a single peer.
3735          */
3736
3737         /* Queue cleanup 1: stop all pending pings and pushes. */
3738         lnet_net_lock(LNET_LOCK_EX);
3739         while (!list_empty(&the_lnet.ln_dc_working)) {
3740                 lp = list_first_entry(&the_lnet.ln_dc_working,
3741                                       struct lnet_peer, lp_dc_list);
3742                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3743                 lnet_net_unlock(LNET_LOCK_EX);
3744                 lnet_peer_cancel_discovery(lp);
3745                 lnet_net_lock(LNET_LOCK_EX);
3746         }
3747         lnet_net_unlock(LNET_LOCK_EX);
3748
3749         /* Queue cleanup 2: wait for the expired queue to clear. */
3750         while (!list_empty(&the_lnet.ln_dc_expired))
3751                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3752
3753         /* Queue cleanup 3: clear the request queue. */
3754         lnet_net_lock(LNET_LOCK_EX);
3755         while (!list_empty(&the_lnet.ln_dc_request)) {
3756                 lp = list_first_entry(&the_lnet.ln_dc_request,
3757                                       struct lnet_peer, lp_dc_list);
3758                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3759                 lnet_peer_discovery_complete(lp);
3760         }
3761         lnet_net_unlock(LNET_LOCK_EX);
3762
3763         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3764         the_lnet.ln_dc_handler = NULL;
3765
3766         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3767         wake_up(&the_lnet.ln_dc_waitq);
3768
3769         CDEBUG(D_NET, "stopped\n");
3770
3771         return 0;
3772 }
3773
3774 /* ln_api_mutex is held on entry. */
3775 int lnet_peer_discovery_start(void)
3776 {
3777         struct task_struct *task;
3778         int rc = 0;
3779
3780         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3781                 return -EALREADY;
3782
3783         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3784         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3785         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3786         if (IS_ERR(task)) {
3787                 rc = PTR_ERR(task);
3788                 CERROR("Can't start peer discovery thread: %d\n", rc);
3789
3790                 the_lnet.ln_dc_handler = NULL;
3791
3792                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3793         }
3794
3795         CDEBUG(D_NET, "discovery start: %d\n", rc);
3796
3797         return rc;
3798 }
3799
3800 /* ln_api_mutex is held on entry. */
3801 void lnet_peer_discovery_stop(void)
3802 {
3803         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3804                 return;
3805
3806         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3807         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3808
3809         /* In the LNetNIInit() path we may be stopping discovery before it
3810          * entered its work loop
3811          */
3812         if (!completion_done(&the_lnet.ln_started))
3813                 complete(&the_lnet.ln_started);
3814         else
3815                 wake_up(&the_lnet.ln_dc_waitq);
3816
3817         wait_event(the_lnet.ln_dc_waitq,
3818                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3819
3820         LASSERT(list_empty(&the_lnet.ln_dc_request));
3821         LASSERT(list_empty(&the_lnet.ln_dc_working));
3822         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3823
3824         CDEBUG(D_NET, "discovery stopped\n");
3825 }
3826
3827 /* Debugging */
3828
3829 void
3830 lnet_debug_peer(lnet_nid_t nid)
3831 {
3832         char                    *aliveness = "NA";
3833         struct lnet_peer_ni     *lp;
3834         int                     cpt;
3835
3836         cpt = lnet_cpt_of_nid(nid, NULL);
3837         lnet_net_lock(cpt);
3838
3839         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3840         if (IS_ERR(lp)) {
3841                 lnet_net_unlock(cpt);
3842                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3843                 return;
3844         }
3845
3846         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3847                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3848
3849         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3850                libcfs_nid2str(lp->lpni_nid), kref_read(&lp->lpni_kref),
3851                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3852                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3853                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3854
3855         lnet_peer_ni_decref_locked(lp);
3856
3857         lnet_net_unlock(cpt);
3858 }
3859
3860 /* Gathering information for userspace. */
3861
3862 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3863                           char aliveness[LNET_MAX_STR_LEN],
3864                           __u32 *cpt_iter, __u32 *refcount,
3865                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3866                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3867                           __u32 *peer_tx_qnob)
3868 {
3869         struct lnet_peer_table          *peer_table;
3870         struct lnet_peer_ni             *lp;
3871         int                             j;
3872         int                             lncpt;
3873         bool                            found = false;
3874
3875         /* get the number of CPTs */
3876         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3877
3878         /* if the cpt number to be examined is >= the number of cpts in
3879          * the system then indicate that there are no more cpts to examin
3880          */
3881         if (*cpt_iter >= lncpt)
3882                 return -ENOENT;
3883
3884         /* get the current table */
3885         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3886         /* if the ptable is NULL then there are no more cpts to examine */
3887         if (peer_table == NULL)
3888                 return -ENOENT;
3889
3890         lnet_net_lock(*cpt_iter);
3891
3892         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3893                 struct list_head *peers = &peer_table->pt_hash[j];
3894
3895                 list_for_each_entry(lp, peers, lpni_hashlist) {
3896                         if (peer_index-- > 0)
3897                                 continue;
3898
3899                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3900                         if (lnet_isrouter(lp) ||
3901                                 lnet_peer_aliveness_enabled(lp))
3902                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3903                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3904
3905                         *nid = lp->lpni_nid;
3906                         *refcount = kref_read(&lp->lpni_kref);
3907                         *ni_peer_tx_credits =
3908                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3909                         *peer_tx_credits = lp->lpni_txcredits;
3910                         *peer_rtr_credits = lp->lpni_rtrcredits;
3911                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
3912                         *peer_tx_qnob = lp->lpni_txqnob;
3913
3914                         found = true;
3915                 }
3916
3917         }
3918         lnet_net_unlock(*cpt_iter);
3919
3920         *cpt_iter = lncpt;
3921
3922         return found ? 0 : -ENOENT;
3923 }
3924
3925 /* ln_api_mutex is held, which keeps the peer list stable */
3926 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3927 {
3928         struct lnet_ioctl_element_stats *lpni_stats;
3929         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3930         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3931         struct lnet_peer_ni_credit_info *lpni_info;
3932         struct lnet_peer_ni *lpni;
3933         struct lnet_peer *lp;
3934         lnet_nid_t nid;
3935         __u32 size;
3936         int rc;
3937
3938         lp = lnet_find_peer(cfg->prcfg_prim_nid);
3939
3940         if (!lp) {
3941                 rc = -ENOENT;
3942                 goto out;
3943         }
3944
3945         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3946                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3947         size *= lp->lp_nnis;
3948         if (size > cfg->prcfg_size) {
3949                 cfg->prcfg_size = size;
3950                 rc = -E2BIG;
3951                 goto out_lp_decref;
3952         }
3953
3954         cfg->prcfg_prim_nid = lp->lp_primary_nid;
3955         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3956         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3957         cfg->prcfg_count = lp->lp_nnis;
3958         cfg->prcfg_size = size;
3959         cfg->prcfg_state = lp->lp_state;
3960
3961         /* Allocate helper buffers. */
3962         rc = -ENOMEM;
3963         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3964         if (!lpni_info)
3965                 goto out_lp_decref;
3966         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3967         if (!lpni_stats)
3968                 goto out_free_info;
3969         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3970         if (!lpni_msg_stats)
3971                 goto out_free_stats;
3972         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3973         if (!lpni_hstats)
3974                 goto out_free_msg_stats;
3975
3976
3977         lpni = NULL;
3978         rc = -EFAULT;
3979         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3980                 nid = lpni->lpni_nid;
3981                 if (copy_to_user(bulk, &nid, sizeof(nid)))
3982                         goto out_free_hstats;
3983                 bulk += sizeof(nid);
3984
3985                 memset(lpni_info, 0, sizeof(*lpni_info));
3986                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3987                 if (lnet_isrouter(lpni) ||
3988                         lnet_peer_aliveness_enabled(lpni))
3989                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3990                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3991
3992                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
3993                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3994                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3995                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3996                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3997                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3998                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3999                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4000                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4001                         goto out_free_hstats;
4002                 bulk += sizeof(*lpni_info);
4003
4004                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4005                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4006                                                             LNET_STATS_TYPE_SEND);
4007                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4008                                                             LNET_STATS_TYPE_RECV);
4009                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4010                                                             LNET_STATS_TYPE_DROP);
4011                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4012                         goto out_free_hstats;
4013                 bulk += sizeof(*lpni_stats);
4014                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4015                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4016                         goto out_free_hstats;
4017                 bulk += sizeof(*lpni_msg_stats);
4018                 lpni_hstats->hlpni_network_timeout =
4019                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4020                 lpni_hstats->hlpni_remote_dropped =
4021                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4022                 lpni_hstats->hlpni_remote_timeout =
4023                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4024                 lpni_hstats->hlpni_remote_error =
4025                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4026                 lpni_hstats->hlpni_health_value =
4027                   atomic_read(&lpni->lpni_healthv);
4028                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4029                         goto out_free_hstats;
4030                 bulk += sizeof(*lpni_hstats);
4031         }
4032         rc = 0;
4033
4034 out_free_hstats:
4035         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4036 out_free_msg_stats:
4037         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4038 out_free_stats:
4039         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4040 out_free_info:
4041         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4042 out_lp_decref:
4043         lnet_peer_decref_locked(lp);
4044 out:
4045         return rc;
4046 }
4047
4048 /* must hold net_lock/0 */
4049 void
4050 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4051                                      struct list_head *recovery_queue,
4052                                      time64_t now)
4053 {
4054         /* the mt could've shutdown and cleaned up the queues */
4055         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4056                 return;
4057
4058         if (!list_empty(&lpni->lpni_recovery))
4059                 return;
4060
4061         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4062                 return;
4063
4064         if (!lpni->lpni_last_alive) {
4065                 CDEBUG(D_NET,
4066                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4067                        libcfs_nid2str(lpni->lpni_nid), lpni,
4068                        lpni->lpni_last_alive);
4069                 return;
4070         }
4071
4072         if (now > lpni->lpni_last_alive + lnet_recovery_limit) {
4073                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4074                        libcfs_nid2str(lpni->lpni_nid),
4075                        lpni->lpni_last_alive);
4076                 /* Reset the ping count so that if this peer NI is added back to
4077                  * the recovery queue we will send the first ping right away.
4078                  */
4079                 lpni->lpni_ping_count = 0;
4080                 return;
4081         }
4082
4083         /* This peer NI is going on the recovery queue, so take a ref on it */
4084         lnet_peer_ni_addref_locked(lpni);
4085
4086         lnet_peer_ni_set_next_ping(lpni, now);
4087
4088         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4089                libcfs_nid2str(lpni->lpni_nid),
4090                lpni->lpni_ping_count,
4091                lpni->lpni_next_ping,
4092                lpni->lpni_last_alive,
4093                atomic_read(&lpni->lpni_healthv));
4094
4095         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4096 }
4097
4098 /* Call with the ln_api_mutex held */
4099 void
4100 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4101 {
4102         struct lnet_peer_table *ptable;
4103         struct lnet_peer *lp;
4104         struct lnet_peer_net *lpn;
4105         struct lnet_peer_ni *lpni;
4106         int lncpt;
4107         int cpt;
4108         time64_t now;
4109
4110         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4111                 return;
4112
4113         now = ktime_get_seconds();
4114
4115         if (!all) {
4116                 lnet_net_lock(LNET_LOCK_EX);
4117                 lpni = lnet_find_peer_ni_locked(nid);
4118                 if (!lpni) {
4119                         lnet_net_unlock(LNET_LOCK_EX);
4120                         return;
4121                 }
4122                 atomic_set(&lpni->lpni_healthv, value);
4123                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4124                                              &the_lnet.ln_mt_peerNIRecovq, now);
4125                 lnet_peer_ni_decref_locked(lpni);
4126                 lnet_net_unlock(LNET_LOCK_EX);
4127                 return;
4128         }
4129
4130         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4131
4132         /*
4133          * Walk all the peers and reset the health value for each one to the
4134          * specified value.
4135          */
4136         lnet_net_lock(LNET_LOCK_EX);
4137         for (cpt = 0; cpt < lncpt; cpt++) {
4138                 ptable = the_lnet.ln_peer_tables[cpt];
4139                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4140                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4141                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4142                                                     lpni_peer_nis) {
4143                                         atomic_set(&lpni->lpni_healthv, value);
4144                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4145                                              &the_lnet.ln_mt_peerNIRecovq, now);
4146                                 }
4147                         }
4148                 }
4149         }
4150         lnet_net_unlock(LNET_LOCK_EX);
4151 }
4152