Whamcloud - gitweb
9087d99793a56fce12d4055b9986117941a8d0d3
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NIDNET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(lnet_nid_t nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = nid;
265         lp->lp_disc_src_nid = LNET_NID_ANY;
266         lp->lp_disc_dst_nid = LNET_NID_ANY;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid == LNET_NID_LO_0)
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nid2str(lp->lp_primary_nid),
382                 libcfs_nid2str(lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nid2str(lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
461
462         spin_lock(&peer->lp_lock);
463         peer->lp_state |= LNET_PEER_MARK_DELETED;
464         spin_unlock(&peer->lp_lock);
465
466         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467         while (lpni != NULL) {
468                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469                 rc = lnet_peer_ni_del_locked(lpni, false);
470                 if (rc != 0)
471                         rc2 = rc;
472                 lpni = lpni2;
473         }
474
475         return rc2;
476 }
477
478 /*
479  * Discovering this peer is taking too long. Cancel any Ping or Push
480  * that discovery is waiting on by unlinking the relevant MDs. The
481  * lnet_discovery_event_handler() will proceed from here and complete
482  * the cleanup.
483  */
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
485 {
486         struct lnet_handle_md ping_mdh;
487         struct lnet_handle_md push_mdh;
488
489         LNetInvalidateMDHandle(&ping_mdh);
490         LNetInvalidateMDHandle(&push_mdh);
491
492         spin_lock(&lp->lp_lock);
493         if (lp->lp_state & LNET_PEER_PING_SENT) {
494                 ping_mdh = lp->lp_ping_mdh;
495                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
496         }
497         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498                 push_mdh = lp->lp_push_mdh;
499                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
500         }
501         spin_unlock(&lp->lp_lock);
502
503         if (!LNetMDHandleIsInvalid(ping_mdh))
504                 LNetMDUnlink(ping_mdh);
505         if (!LNetMDHandleIsInvalid(push_mdh))
506                 LNetMDUnlink(push_mdh);
507 }
508
509 static int
510 lnet_peer_del(struct lnet_peer *peer)
511 {
512         lnet_peer_cancel_discovery(peer);
513         lnet_net_lock(LNET_LOCK_EX);
514         lnet_peer_del_locked(peer);
515         lnet_net_unlock(LNET_LOCK_EX);
516
517         return 0;
518 }
519
520 /*
521  * Delete a NID from a peer. Call with ln_api_mutex held.
522  *
523  * Error codes:
524  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
525  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
526  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
527  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
528  */
529 static int
530 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
531 {
532         struct lnet_peer_ni *lpni;
533         lnet_nid_t primary_nid = lp->lp_primary_nid;
534         int rc = 0;
535         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
536
537         if (!(flags & LNET_PEER_CONFIGURED)) {
538                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
539                         rc = -EPERM;
540                         goto out;
541                 }
542         }
543         lpni = lnet_find_peer_ni_locked(nid);
544         if (!lpni) {
545                 rc = -ENOENT;
546                 goto out;
547         }
548         lnet_peer_ni_decref_locked(lpni);
549         if (lp != lpni->lpni_peer_net->lpn_peer) {
550                 rc = -ECHILD;
551                 goto out;
552         }
553
554         /*
555          * This function only allows deletion of the primary NID if it
556          * is the only NID.
557          */
558         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
559                 rc = -EBUSY;
560                 goto out;
561         }
562
563         lnet_net_lock(LNET_LOCK_EX);
564
565         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
566                 struct lnet_peer_ni *lpni2;
567                 /* assign the next peer_ni to be the primary */
568                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
569                 LASSERT(lpni2);
570                 lp->lp_primary_nid = lpni2->lpni_nid;
571         }
572         rc = lnet_peer_ni_del_locked(lpni, force);
573
574         lnet_net_unlock(LNET_LOCK_EX);
575
576 out:
577         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
578                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
579
580         return rc;
581 }
582
583 static void
584 lnet_peer_table_cleanup_locked(struct lnet_net *net,
585                                struct lnet_peer_table *ptable)
586 {
587         int                      i;
588         struct lnet_peer_ni     *next;
589         struct lnet_peer_ni     *lpni;
590         struct lnet_peer        *peer;
591
592         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
593                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
594                                          lpni_hashlist) {
595                         if (net != NULL && net != lpni->lpni_net)
596                                 continue;
597
598                         peer = lpni->lpni_peer_net->lpn_peer;
599                         if (peer->lp_primary_nid != lpni->lpni_nid) {
600                                 lnet_peer_ni_del_locked(lpni, false);
601                                 continue;
602                         }
603                         /*
604                          * Removing the primary NID implies removing
605                          * the entire peer. Advance next beyond any
606                          * peer_ni that belongs to the same peer.
607                          */
608                         list_for_each_entry_from(next, &ptable->pt_hash[i],
609                                                  lpni_hashlist) {
610                                 if (next->lpni_peer_net->lpn_peer != peer)
611                                         break;
612                         }
613                         lnet_peer_del_locked(peer);
614                 }
615         }
616 }
617
618 static void
619 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
620 {
621         wait_var_event_warning(&ptable->pt_zombies,
622                                ptable->pt_zombies == 0,
623                                "Waiting for %d zombies on peer table\n",
624                                ptable->pt_zombies);
625 }
626
627 static void
628 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
629                                 struct lnet_peer_table *ptable)
630 {
631         struct lnet_peer_ni     *lp;
632         struct lnet_peer_ni     *tmp;
633         lnet_nid_t              gw_nid;
634         int                     i;
635
636         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
637                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
638                                          lpni_hashlist) {
639                         if (net != lp->lpni_net)
640                                 continue;
641
642                         if (!lnet_isrouter(lp))
643                                 continue;
644
645                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
646
647                         lnet_net_unlock(LNET_LOCK_EX);
648                         lnet_del_route(LNET_NET_ANY, gw_nid);
649                         lnet_net_lock(LNET_LOCK_EX);
650                 }
651         }
652 }
653
654 void
655 lnet_peer_tables_cleanup(struct lnet_net *net)
656 {
657         int i;
658         struct lnet_peer_table *ptable;
659
660         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
661         /* If just deleting the peers for a NI, get rid of any routes these
662          * peers are gateways for. */
663         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
664                 lnet_net_lock(LNET_LOCK_EX);
665                 lnet_peer_table_del_rtrs_locked(net, ptable);
666                 lnet_net_unlock(LNET_LOCK_EX);
667         }
668
669         /* Start the cleanup process */
670         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
671                 lnet_net_lock(LNET_LOCK_EX);
672                 lnet_peer_table_cleanup_locked(net, ptable);
673                 lnet_net_unlock(LNET_LOCK_EX);
674         }
675
676         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
677                 lnet_peer_ni_finalize_wait(ptable);
678 }
679
680 static struct lnet_peer_ni *
681 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
682 {
683         struct list_head        *peers;
684         struct lnet_peer_ni     *lp;
685
686         if (the_lnet.ln_state != LNET_STATE_RUNNING)
687                 return NULL;
688
689         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
690         list_for_each_entry(lp, peers, lpni_hashlist) {
691                 if (lp->lpni_nid == nid) {
692                         lnet_peer_ni_addref_locked(lp);
693                         return lp;
694                 }
695         }
696
697         return NULL;
698 }
699
700 struct lnet_peer_ni *
701 lnet_find_peer_ni_locked(lnet_nid_t nid)
702 {
703         struct lnet_peer_ni *lpni;
704         struct lnet_peer_table *ptable;
705         int cpt;
706
707         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
708
709         ptable = the_lnet.ln_peer_tables[cpt];
710         lpni = lnet_get_peer_ni_locked(ptable, nid);
711
712         return lpni;
713 }
714
715 struct lnet_peer_ni *
716 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
717 {
718         struct lnet_peer_net *lpn;
719         struct lnet_peer_ni *lpni;
720
721         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
722         if (!lpn)
723                 return NULL;
724
725         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
726                 if (lpni->lpni_nid == nid)
727                         return lpni;
728         }
729
730         return NULL;
731 }
732
733 struct lnet_peer *
734 lnet_find_peer(lnet_nid_t nid)
735 {
736         struct lnet_peer_ni *lpni;
737         struct lnet_peer *lp = NULL;
738         int cpt;
739
740         cpt = lnet_net_lock_current();
741         lpni = lnet_find_peer_ni_locked(nid);
742         if (lpni) {
743                 lp = lpni->lpni_peer_net->lpn_peer;
744                 lnet_peer_addref_locked(lp);
745                 lnet_peer_ni_decref_locked(lpni);
746         }
747         lnet_net_unlock(cpt);
748
749         return lp;
750 }
751
752 struct lnet_peer_net *
753 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
754 {
755         struct lnet_peer_net *net;
756
757         if (!prev_lpn_id) {
758                 /* no net id provided return the first net */
759                 net = list_first_entry_or_null(&lp->lp_peer_nets,
760                                                struct lnet_peer_net,
761                                                lpn_peer_nets);
762
763                 return net;
764         }
765
766         /* find the net after the one provided */
767         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
768                 if (net->lpn_net_id == prev_lpn_id) {
769                         /*
770                          * if we reached the end of the list loop to the
771                          * beginning.
772                          */
773                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
774                                 return list_first_entry_or_null(&lp->lp_peer_nets,
775                                                                 struct lnet_peer_net,
776                                                                 lpn_peer_nets);
777                         else
778                                 return list_next_entry(net, lpn_peer_nets);
779                 }
780         }
781
782         return NULL;
783 }
784
785 struct lnet_peer_ni *
786 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
787                              struct lnet_peer_net *peer_net,
788                              struct lnet_peer_ni *prev)
789 {
790         struct lnet_peer_ni *lpni;
791         struct lnet_peer_net *net = peer_net;
792
793         if (!prev) {
794                 if (!net) {
795                         if (list_empty(&peer->lp_peer_nets))
796                                 return NULL;
797
798                         net = list_entry(peer->lp_peer_nets.next,
799                                          struct lnet_peer_net,
800                                          lpn_peer_nets);
801                 }
802                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
803                                   lpni_peer_nis);
804
805                 return lpni;
806         }
807
808         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
809                 /*
810                  * if you reached the end of the peer ni list and the peer
811                  * net is specified then there are no more peer nis in that
812                  * net.
813                  */
814                 if (net)
815                         return NULL;
816
817                 /*
818                  * we reached the end of this net ni list. move to the
819                  * next net
820                  */
821                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
822                     &peer->lp_peer_nets)
823                         /* no more nets and no more NIs. */
824                         return NULL;
825
826                 /* get the next net */
827                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
828                                  struct lnet_peer_net,
829                                  lpn_peer_nets);
830                 /* get the ni on it */
831                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
832                                   lpni_peer_nis);
833
834                 return lpni;
835         }
836
837         /* there are more nis left */
838         lpni = list_entry(prev->lpni_peer_nis.next,
839                           struct lnet_peer_ni, lpni_peer_nis);
840
841         return lpni;
842 }
843
844 /* Call with the ln_api_mutex held */
845 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
846 {
847         struct lnet_process_id id;
848         struct lnet_peer_table *ptable;
849         struct lnet_peer *lp;
850         __u32 count = 0;
851         __u32 size = 0;
852         int lncpt;
853         int cpt;
854         __u32 i;
855         int rc;
856
857         rc = -ESHUTDOWN;
858         if (the_lnet.ln_state != LNET_STATE_RUNNING)
859                 goto done;
860
861         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
862
863         /*
864          * Count the number of peers, and return E2BIG if the buffer
865          * is too small. We'll also return the desired size.
866          */
867         rc = -E2BIG;
868         for (cpt = 0; cpt < lncpt; cpt++) {
869                 ptable = the_lnet.ln_peer_tables[cpt];
870                 count += ptable->pt_peers;
871         }
872         size = count * sizeof(*ids);
873         if (size > *sizep)
874                 goto done;
875
876         /*
877          * Walk the peer lists and copy out the primary nids.
878          * This is safe because the peer lists are only modified
879          * while the ln_api_mutex is held. So we don't need to
880          * hold the lnet_net_lock as well, and can therefore
881          * directly call copy_to_user().
882          */
883         rc = -EFAULT;
884         memset(&id, 0, sizeof(id));
885         id.pid = LNET_PID_LUSTRE;
886         i = 0;
887         for (cpt = 0; cpt < lncpt; cpt++) {
888                 ptable = the_lnet.ln_peer_tables[cpt];
889                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
890                         if (i >= count)
891                                 goto done;
892                         id.nid = lp->lp_primary_nid;
893                         if (copy_to_user(&ids[i], &id, sizeof(id)))
894                                 goto done;
895                         i++;
896                 }
897         }
898         rc = 0;
899 done:
900         *countp = count;
901         *sizep = size;
902         return rc;
903 }
904
905 /*
906  * Start pushes to peers that need to be updated for a configuration
907  * change on this node.
908  */
909 void
910 lnet_push_update_to_peers(int force)
911 {
912         struct lnet_peer_table *ptable;
913         struct lnet_peer *lp;
914         int lncpt;
915         int cpt;
916
917         lnet_net_lock(LNET_LOCK_EX);
918         if (lnet_peer_discovery_disabled)
919                 force = 0;
920         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
921         for (cpt = 0; cpt < lncpt; cpt++) {
922                 ptable = the_lnet.ln_peer_tables[cpt];
923                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
924                         if (force) {
925                                 spin_lock(&lp->lp_lock);
926                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
927                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
928                                 spin_unlock(&lp->lp_lock);
929                         }
930                         if (lnet_peer_needs_push(lp))
931                                 lnet_peer_queue_for_discovery(lp);
932                 }
933         }
934         lnet_net_unlock(LNET_LOCK_EX);
935         wake_up(&the_lnet.ln_dc_waitq);
936 }
937
938 /* find the NID in the preferred gateways for the remote peer
939  * return:
940  *      false: list is not empty and NID is not preferred
941  *      false: list is empty
942  *      true: nid is found in the list
943  */
944 bool
945 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
946                              lnet_nid_t gw_nid)
947 {
948         struct lnet_nid_list *ne;
949
950         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
951                libcfs_nid2str(lpni->lpni_nid),
952                list_empty(&lpni->lpni_rtr_pref_nids));
953
954         if (list_empty(&lpni->lpni_rtr_pref_nids))
955                 return false;
956
957         /* iterate through all the preferred NIDs and see if any of them
958          * matches the provided gw_nid
959          */
960         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
961                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
962                        libcfs_nid2str(ne->nl_nid),
963                        libcfs_nid2str(gw_nid));
964                 if (ne->nl_nid == gw_nid)
965                         return true;
966         }
967
968         return false;
969 }
970
971 void
972 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
973 {
974         struct list_head zombies;
975         struct lnet_nid_list *ne;
976         struct lnet_nid_list *tmp;
977         int cpt = lpni->lpni_cpt;
978
979         INIT_LIST_HEAD(&zombies);
980
981         lnet_net_lock(cpt);
982         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
983         lnet_net_unlock(cpt);
984
985         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
986                 list_del(&ne->nl_list);
987                 LIBCFS_FREE(ne, sizeof(*ne));
988         }
989 }
990
991 int
992 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
993                        lnet_nid_t gw_nid)
994 {
995         int cpt = lpni->lpni_cpt;
996         struct lnet_nid_list *ne = NULL;
997
998         /* This function is called with api_mutex held. When the api_mutex
999          * is held the list can not be modified, as it is only modified as
1000          * a result of applying a UDSP and that happens under api_mutex
1001          * lock.
1002          */
1003         __must_hold(&the_lnet.ln_api_mutex);
1004
1005         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1006                 if (ne->nl_nid == gw_nid)
1007                         return -EEXIST;
1008         }
1009
1010         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1011         if (!ne)
1012                 return -ENOMEM;
1013
1014         ne->nl_nid = gw_nid;
1015
1016         /* Lock the cpt to protect against addition and checks in the
1017          * selection algorithm
1018          */
1019         lnet_net_lock(cpt);
1020         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1021         lnet_net_unlock(cpt);
1022
1023         return 0;
1024 }
1025
1026 /*
1027  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1028  * this is a preferred point-to-point path. Call with lnet_net_lock in
1029  * shared mmode.
1030  */
1031 bool
1032 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1033 {
1034         struct lnet_nid_list *ne;
1035
1036         if (lpni->lpni_pref_nnids == 0)
1037                 return false;
1038         if (lpni->lpni_pref_nnids == 1)
1039                 return lpni->lpni_pref.nid == nid;
1040         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1041                 if (ne->nl_nid == nid)
1042                         return true;
1043         }
1044         return false;
1045 }
1046
1047 /*
1048  * Set a single ni as preferred, provided no preferred ni is already
1049  * defined. Only to be used for non-multi-rail peer_ni.
1050  */
1051 int
1052 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1053 {
1054         int rc = 0;
1055
1056         spin_lock(&lpni->lpni_lock);
1057         if (nid == LNET_NID_ANY) {
1058                 rc = -EINVAL;
1059         } else if (lpni->lpni_pref_nnids > 0) {
1060                 rc = -EPERM;
1061         } else if (lpni->lpni_pref_nnids == 0) {
1062                 lpni->lpni_pref.nid = nid;
1063                 lpni->lpni_pref_nnids = 1;
1064                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1065         }
1066         spin_unlock(&lpni->lpni_lock);
1067
1068         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1069                libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1070         return rc;
1071 }
1072
1073 /*
1074  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1075  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1076  */
1077 int
1078 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1079 {
1080         int rc = 0;
1081
1082         spin_lock(&lpni->lpni_lock);
1083         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1084                 lpni->lpni_pref_nnids = 0;
1085                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1086         } else if (lpni->lpni_pref_nnids == 0) {
1087                 rc = -ENOENT;
1088         } else {
1089                 rc = -EPERM;
1090         }
1091         spin_unlock(&lpni->lpni_lock);
1092
1093         CDEBUG(D_NET, "peer %s: %d\n",
1094                libcfs_nid2str(lpni->lpni_nid), rc);
1095         return rc;
1096 }
1097
1098 void
1099 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1100 {
1101         lpni->lpni_sel_priority = priority;
1102 }
1103
1104 /*
1105  * Clear the preferred NIDs from a non-multi-rail peer.
1106  */
1107 void
1108 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1109 {
1110         struct lnet_peer_ni *lpni = NULL;
1111
1112         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1113                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1114 }
1115
1116 int
1117 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1118 {
1119         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1120         struct lnet_nid_list *ne1 = NULL;
1121         struct lnet_nid_list *ne2 = NULL;
1122         lnet_nid_t tmp_nid = LNET_NID_ANY;
1123         int rc = 0;
1124
1125         if (nid == LNET_NID_ANY) {
1126                 rc = -EINVAL;
1127                 goto out;
1128         }
1129
1130         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1131                 rc = -EEXIST;
1132                 goto out;
1133         }
1134
1135         /* A non-MR node may have only one preferred NI per peer_ni */
1136         if (lpni->lpni_pref_nnids > 0 &&
1137             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1138                 rc = -EPERM;
1139                 goto out;
1140         }
1141
1142         /* add the new preferred nid to the list of preferred nids */
1143         if (lpni->lpni_pref_nnids != 0) {
1144                 size_t alloc_size = sizeof(*ne1);
1145
1146                 if (lpni->lpni_pref_nnids == 1) {
1147                         tmp_nid = lpni->lpni_pref.nid;
1148                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1149                 }
1150
1151                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1152                         if (ne1->nl_nid == nid) {
1153                                 rc = -EEXIST;
1154                                 goto out;
1155                         }
1156                 }
1157
1158                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1159                                  alloc_size);
1160                 if (!ne1) {
1161                         rc = -ENOMEM;
1162                         goto out;
1163                 }
1164
1165                 /* move the originally stored nid to the list */
1166                 if (lpni->lpni_pref_nnids == 1) {
1167                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1168                                 lpni->lpni_cpt, alloc_size);
1169                         if (!ne2) {
1170                                 rc = -ENOMEM;
1171                                 goto out;
1172                         }
1173                         INIT_LIST_HEAD(&ne2->nl_list);
1174                         ne2->nl_nid = tmp_nid;
1175                 }
1176                 ne1->nl_nid = nid;
1177         }
1178
1179         lnet_net_lock(LNET_LOCK_EX);
1180         spin_lock(&lpni->lpni_lock);
1181         if (lpni->lpni_pref_nnids == 0) {
1182                 lpni->lpni_pref.nid = nid;
1183         } else {
1184                 if (ne2)
1185                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1186                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1187         }
1188         lpni->lpni_pref_nnids++;
1189         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1190         spin_unlock(&lpni->lpni_lock);
1191         lnet_net_unlock(LNET_LOCK_EX);
1192
1193 out:
1194         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1195                 spin_lock(&lpni->lpni_lock);
1196                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1197                 spin_unlock(&lpni->lpni_lock);
1198         }
1199         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1200                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1201         return rc;
1202 }
1203
1204 int
1205 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1206 {
1207         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1208         struct lnet_nid_list *ne = NULL;
1209         int rc = 0;
1210
1211         if (lpni->lpni_pref_nnids == 0) {
1212                 rc = -ENOENT;
1213                 goto out;
1214         }
1215
1216         if (lpni->lpni_pref_nnids == 1) {
1217                 if (lpni->lpni_pref.nid != nid) {
1218                         rc = -ENOENT;
1219                         goto out;
1220                 }
1221         } else {
1222                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1223                         if (ne->nl_nid == nid)
1224                                 goto remove_nid_entry;
1225                 }
1226                 rc = -ENOENT;
1227                 ne = NULL;
1228                 goto out;
1229         }
1230
1231 remove_nid_entry:
1232         lnet_net_lock(LNET_LOCK_EX);
1233         spin_lock(&lpni->lpni_lock);
1234         if (lpni->lpni_pref_nnids == 1)
1235                 lpni->lpni_pref.nid = LNET_NID_ANY;
1236         else {
1237                 list_del_init(&ne->nl_list);
1238                 if (lpni->lpni_pref_nnids == 2) {
1239                         struct lnet_nid_list *ne, *tmp;
1240
1241                         list_for_each_entry_safe(ne, tmp,
1242                                                  &lpni->lpni_pref.nids,
1243                                                  nl_list) {
1244                                 lpni->lpni_pref.nid = ne->nl_nid;
1245                                 list_del_init(&ne->nl_list);
1246                                 LIBCFS_FREE(ne, sizeof(*ne));
1247                         }
1248                 }
1249         }
1250         lpni->lpni_pref_nnids--;
1251         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1252         spin_unlock(&lpni->lpni_lock);
1253         lnet_net_unlock(LNET_LOCK_EX);
1254
1255         if (ne)
1256                 LIBCFS_FREE(ne, sizeof(*ne));
1257 out:
1258         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1259                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1260         return rc;
1261 }
1262
1263 void
1264 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1265 {
1266         struct list_head zombies;
1267         struct lnet_nid_list *ne;
1268         struct lnet_nid_list *tmp;
1269
1270         INIT_LIST_HEAD(&zombies);
1271
1272         lnet_net_lock(LNET_LOCK_EX);
1273         if (lpni->lpni_pref_nnids == 1)
1274                 lpni->lpni_pref.nid = LNET_NID_ANY;
1275         else if (lpni->lpni_pref_nnids > 1)
1276                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1277         lpni->lpni_pref_nnids = 0;
1278         lnet_net_unlock(LNET_LOCK_EX);
1279
1280         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1281                 list_del_init(&ne->nl_list);
1282                 LIBCFS_FREE(ne, sizeof(*ne));
1283         }
1284 }
1285
1286 lnet_nid_t
1287 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1288 {
1289         struct lnet_peer_ni *lpni;
1290         lnet_nid_t primary_nid = nid;
1291
1292         lpni = lnet_find_peer_ni_locked(nid);
1293         if (lpni) {
1294                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1295                 lnet_peer_ni_decref_locked(lpni);
1296         }
1297
1298         return primary_nid;
1299 }
1300
1301 bool
1302 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1303 __must_hold(&lp->lp_lock)
1304 {
1305         if (lnet_peer_discovery_disabled)
1306                 return true;
1307
1308         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1309             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1310                 return true;
1311         }
1312
1313         return false;
1314 }
1315
1316 /*
1317  * Peer Discovery
1318  */
1319 bool
1320 lnet_is_discovery_disabled(struct lnet_peer *lp)
1321 {
1322         bool rc = false;
1323
1324         spin_lock(&lp->lp_lock);
1325         rc = lnet_is_discovery_disabled_locked(lp);
1326         spin_unlock(&lp->lp_lock);
1327
1328         return rc;
1329 }
1330
1331 lnet_nid_t
1332 LNetPrimaryNID(lnet_nid_t nid)
1333 {
1334         struct lnet_peer *lp;
1335         struct lnet_peer_ni *lpni;
1336         lnet_nid_t primary_nid = nid;
1337         int rc = 0;
1338         int cpt;
1339
1340         if (nid == LNET_NID_LO_0)
1341                 return LNET_NID_LO_0;
1342
1343         cpt = lnet_net_lock_current();
1344         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1345         if (IS_ERR(lpni)) {
1346                 rc = PTR_ERR(lpni);
1347                 goto out_unlock;
1348         }
1349         lp = lpni->lpni_peer_net->lpn_peer;
1350
1351         /* If discovery is disabled locally then we needn't bother running
1352          * discovery here because discovery will not modify whatever
1353          * primary NID is currently set for this peer. If the specified peer is
1354          * down then this discovery can introduce long delays into the mount
1355          * process, so skip it if it isn't necessary.
1356          */
1357         while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1358                 spin_lock(&lp->lp_lock);
1359                 /* force a full discovery cycle */
1360                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1361                 spin_unlock(&lp->lp_lock);
1362
1363                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1364                 if (rc)
1365                         goto out_decref;
1366                 /* The lpni (or lp) for this NID may have changed and our ref is
1367                  * the only thing keeping the old one around. Release the ref
1368                  * and lookup the lpni again
1369                  */
1370                 lnet_peer_ni_decref_locked(lpni);
1371                 lpni = lnet_find_peer_ni_locked(nid);
1372                 if (!lpni) {
1373                         rc = -ENOENT;
1374                         goto out_unlock;
1375                 }
1376                 lp = lpni->lpni_peer_net->lpn_peer;
1377
1378                 /* If we find that the peer has discovery disabled then we will
1379                  * not modify whatever primary NID is currently set for this
1380                  * peer. Thus, we can break out of this loop even if the peer
1381                  * is not fully up to date.
1382                  */
1383                 if (lnet_is_discovery_disabled(lp))
1384                         break;
1385         }
1386         primary_nid = lp->lp_primary_nid;
1387 out_decref:
1388         lnet_peer_ni_decref_locked(lpni);
1389 out_unlock:
1390         lnet_net_unlock(cpt);
1391
1392         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1393                libcfs_nid2str(primary_nid), rc);
1394         return primary_nid;
1395 }
1396 EXPORT_SYMBOL(LNetPrimaryNID);
1397
1398 struct lnet_peer_net *
1399 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1400 {
1401         struct lnet_peer_net *peer_net;
1402         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1403                 if (peer_net->lpn_net_id == net_id)
1404                         return peer_net;
1405         }
1406         return NULL;
1407 }
1408
1409 /*
1410  * Attach a peer_ni to a peer_net and peer. This function assumes
1411  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1412  * may be attached to a different peer, in which case it will be
1413  * properly detached first. The whole operation is done atomically.
1414  *
1415  * This function consumes the reference on lpni and Always returns 0.
1416  * This is the last function called from functions that do return an
1417  * int, so returning 0 here allows the compiler to do a tail call.
1418  */
1419 static int
1420 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1421                                 struct lnet_peer_net *lpn,
1422                                 struct lnet_peer_ni *lpni,
1423                                 unsigned flags)
1424 {
1425         struct lnet_peer_table *ptable;
1426         bool new_lpn = false;
1427         int rc;
1428
1429         /* Install the new peer_ni */
1430         lnet_net_lock(LNET_LOCK_EX);
1431         /* Add peer_ni to global peer table hash, if necessary. */
1432         if (list_empty(&lpni->lpni_hashlist)) {
1433                 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1434
1435                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1436                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1437                 ptable->pt_version++;
1438                 lnet_peer_ni_addref_locked(lpni);
1439         }
1440
1441         /* Detach the peer_ni from an existing peer, if necessary. */
1442         if (lpni->lpni_peer_net) {
1443                 LASSERT(lpni->lpni_peer_net != lpn);
1444                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1445                 lnet_peer_detach_peer_ni_locked(lpni);
1446                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1447                 lpni->lpni_peer_net = NULL;
1448         }
1449
1450         /* Add peer_ni to peer_net */
1451         lpni->lpni_peer_net = lpn;
1452         if (lp->lp_primary_nid == lpni->lpni_nid)
1453                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1454         else
1455                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1456         lnet_update_peer_net_healthv(lpni);
1457         lnet_peer_net_addref_locked(lpn);
1458
1459         /* Add peer_net to peer */
1460         if (!lpn->lpn_peer) {
1461                 new_lpn = true;
1462                 lpn->lpn_peer = lp;
1463                 if (lp->lp_primary_nid == lpni->lpni_nid)
1464                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1465                 else
1466                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1467                 lnet_peer_addref_locked(lp);
1468         }
1469
1470         /* Add peer to global peer list, if necessary */
1471         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1472         if (list_empty(&lp->lp_peer_list)) {
1473                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1474                 ptable->pt_peers++;
1475         }
1476
1477
1478         /* Update peer state */
1479         spin_lock(&lp->lp_lock);
1480         if (flags & LNET_PEER_CONFIGURED) {
1481                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1482                         lp->lp_state |= LNET_PEER_CONFIGURED;
1483         }
1484         if (flags & LNET_PEER_MULTI_RAIL) {
1485                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1486                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1487                         lnet_peer_clr_non_mr_pref_nids(lp);
1488                 }
1489         }
1490         spin_unlock(&lp->lp_lock);
1491
1492         lp->lp_nnis++;
1493
1494         /* apply UDSPs */
1495         if (new_lpn) {
1496                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1497                 if (rc)
1498                         CERROR("Failed to apply UDSPs on lpn %s\n",
1499                                libcfs_net2str(lpn->lpn_net_id));
1500         }
1501         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1502         if (rc)
1503                 CERROR("Failed to apply UDSPs on lpni %s\n",
1504                        libcfs_nid2str(lpni->lpni_nid));
1505
1506         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1507                libcfs_nid2str(lp->lp_primary_nid),
1508                libcfs_nid2str(lpni->lpni_nid), flags);
1509         lnet_peer_ni_decref_locked(lpni);
1510         lnet_net_unlock(LNET_LOCK_EX);
1511
1512         return 0;
1513 }
1514
1515 /*
1516  * Create a new peer, with nid as its primary nid.
1517  *
1518  * Call with the lnet_api_mutex held.
1519  */
1520 static int
1521 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1522 {
1523         struct lnet_peer *lp;
1524         struct lnet_peer_net *lpn;
1525         struct lnet_peer_ni *lpni;
1526         int rc = 0;
1527
1528         LASSERT(nid != LNET_NID_ANY);
1529
1530         /*
1531          * No need for the lnet_net_lock here, because the
1532          * lnet_api_mutex is held.
1533          */
1534         lpni = lnet_find_peer_ni_locked(nid);
1535         if (lpni) {
1536                 /* A peer with this NID already exists. */
1537                 lp = lpni->lpni_peer_net->lpn_peer;
1538                 lnet_peer_ni_decref_locked(lpni);
1539                 /*
1540                  * This is an error if the peer was configured and the
1541                  * primary NID differs or an attempt is made to change
1542                  * the Multi-Rail flag. Otherwise the assumption is
1543                  * that an existing peer is being modified.
1544                  */
1545                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1546                         if (lp->lp_primary_nid != nid)
1547                                 rc = -EEXIST;
1548                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1549                                 rc = -EPERM;
1550                         goto out;
1551                 }
1552                 /* Delete and recreate as a configured peer. */
1553                 lnet_peer_del(lp);
1554         }
1555
1556         /* Create peer, peer_net, and peer_ni. */
1557         rc = -ENOMEM;
1558         lp = lnet_peer_alloc(nid);
1559         if (!lp)
1560                 goto out;
1561         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1562         if (!lpn)
1563                 goto out_free_lp;
1564         lpni = lnet_peer_ni_alloc(nid);
1565         if (!lpni)
1566                 goto out_free_lpn;
1567
1568         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1569
1570 out_free_lpn:
1571         LIBCFS_FREE(lpn, sizeof(*lpn));
1572 out_free_lp:
1573         LIBCFS_FREE(lp, sizeof(*lp));
1574 out:
1575         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1576                libcfs_nid2str(nid), flags, rc);
1577         return rc;
1578 }
1579
1580 /*
1581  * Add a NID to a peer. Call with ln_api_mutex held.
1582  *
1583  * Error codes:
1584  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1585  *  -EEXIST:   The NID was configured by DLC for a different peer.
1586  *  -ENOMEM:   Out of memory.
1587  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1588  *             non-multi-rail peer.
1589  */
1590 static int
1591 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1592 {
1593         struct lnet_peer_net *lpn;
1594         struct lnet_peer_ni *lpni;
1595         int rc = 0;
1596
1597         LASSERT(lp);
1598         LASSERT(nid != LNET_NID_ANY);
1599
1600         /* A configured peer can only be updated through configuration. */
1601         if (!(flags & LNET_PEER_CONFIGURED)) {
1602                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1603                         rc = -EPERM;
1604                         goto out;
1605                 }
1606         }
1607
1608         /*
1609          * The MULTI_RAIL flag can be set but not cleared, because
1610          * that would leave the peer struct in an invalid state.
1611          */
1612         if (flags & LNET_PEER_MULTI_RAIL) {
1613                 spin_lock(&lp->lp_lock);
1614                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1615                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1616                         lnet_peer_clr_non_mr_pref_nids(lp);
1617                 }
1618                 spin_unlock(&lp->lp_lock);
1619         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1620                 rc = -EPERM;
1621                 goto out;
1622         }
1623
1624         lpni = lnet_find_peer_ni_locked(nid);
1625         if (lpni) {
1626                 /*
1627                  * A peer_ni already exists. This is only a problem if
1628                  * it is not connected to this peer and was configured
1629                  * by DLC.
1630                  */
1631                 if (lpni->lpni_peer_net->lpn_peer == lp)
1632                         goto out_free_lpni;
1633                 if (lnet_peer_ni_is_configured(lpni)) {
1634                         rc = -EEXIST;
1635                         goto out_free_lpni;
1636                 }
1637                 /* If this is the primary NID, destroy the peer. */
1638                 if (lnet_peer_ni_is_primary(lpni)) {
1639                         struct lnet_peer *rtr_lp =
1640                                 lpni->lpni_peer_net->lpn_peer;
1641                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1642                         /*
1643                          * if we're trying to delete a router it means
1644                          * we're moving this peer NI to a new peer so must
1645                          * transfer router properties to the new peer
1646                          */
1647                         if (rtr_refcount > 0) {
1648                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1649                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1650                         }
1651                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1652                         lnet_peer_ni_decref_locked(lpni);
1653                         lpni = lnet_peer_ni_alloc(nid);
1654                         if (!lpni) {
1655                                 rc = -ENOMEM;
1656                                 goto out_free_lpni;
1657                         }
1658                 }
1659         } else {
1660                 lpni = lnet_peer_ni_alloc(nid);
1661                 if (!lpni) {
1662                         rc = -ENOMEM;
1663                         goto out_free_lpni;
1664                 }
1665         }
1666
1667         /*
1668          * Get the peer_net. Check that we're not adding a second
1669          * peer_ni on a peer_net of a non-multi-rail peer.
1670          */
1671         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1672         if (!lpn) {
1673                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1674                 if (!lpn) {
1675                         rc = -ENOMEM;
1676                         goto out_free_lpni;
1677                 }
1678         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1679                 rc = -ENOTUNIQ;
1680                 goto out_free_lpni;
1681         }
1682
1683         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1684
1685 out_free_lpni:
1686         lnet_peer_ni_decref_locked(lpni);
1687 out:
1688         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1689                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1690                flags, rc);
1691         return rc;
1692 }
1693
1694 /*
1695  * Update the primary NID of a peer, if possible.
1696  *
1697  * Call with the lnet_api_mutex held.
1698  */
1699 static int
1700 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1701 {
1702         lnet_nid_t old = lp->lp_primary_nid;
1703         int rc = 0;
1704
1705         if (lp->lp_primary_nid == nid)
1706                 goto out;
1707
1708         lp->lp_primary_nid = nid;
1709
1710         rc = lnet_peer_add_nid(lp, nid, flags);
1711         if (rc) {
1712                 lp->lp_primary_nid = old;
1713                 goto out;
1714         }
1715 out:
1716         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1717                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1718         return rc;
1719 }
1720
1721 /*
1722  * lpni creation initiated due to traffic either sending or receiving.
1723  */
1724 static int
1725 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1726 {
1727         struct lnet_peer *lp;
1728         struct lnet_peer_net *lpn;
1729         struct lnet_peer_ni *lpni;
1730         unsigned flags = 0;
1731         int rc = 0;
1732
1733         if (nid == LNET_NID_ANY) {
1734                 rc = -EINVAL;
1735                 goto out;
1736         }
1737
1738         /* lnet_net_lock is not needed here because ln_api_lock is held */
1739         lpni = lnet_find_peer_ni_locked(nid);
1740         if (lpni) {
1741                 /*
1742                  * We must have raced with another thread. Since we
1743                  * know next to nothing about a peer_ni created by
1744                  * traffic, we just assume everything is ok and
1745                  * return.
1746                  */
1747                 lnet_peer_ni_decref_locked(lpni);
1748                 goto out;
1749         }
1750
1751         /* Create peer, peer_net, and peer_ni. */
1752         rc = -ENOMEM;
1753         lp = lnet_peer_alloc(nid);
1754         if (!lp)
1755                 goto out;
1756         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1757         if (!lpn)
1758                 goto out_free_lp;
1759         lpni = lnet_peer_ni_alloc(nid);
1760         if (!lpni)
1761                 goto out_free_lpn;
1762         if (pref != LNET_NID_ANY)
1763                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1764
1765         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1766
1767 out_free_lpn:
1768         LIBCFS_FREE(lpn, sizeof(*lpn));
1769 out_free_lp:
1770         LIBCFS_FREE(lp, sizeof(*lp));
1771 out:
1772         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1773         return rc;
1774 }
1775
1776 /*
1777  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1778  *
1779  * This API handles the following combinations:
1780  *   Create a peer with its primary NI if only the prim_nid is provided
1781  *   Add a NID to a peer identified by the prim_nid. The peer identified
1782  *   by the prim_nid must already exist.
1783  *   The peer being created may be non-MR.
1784  *
1785  * The caller must hold ln_api_mutex. This prevents the peer from
1786  * being created/modified/deleted by a different thread.
1787  */
1788 int
1789 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1790 {
1791         struct lnet_peer *lp = NULL;
1792         struct lnet_peer_ni *lpni;
1793         unsigned flags;
1794
1795         /* The prim_nid must always be specified */
1796         if (prim_nid == LNET_NID_ANY)
1797                 return -EINVAL;
1798
1799         flags = LNET_PEER_CONFIGURED;
1800         if (mr)
1801                 flags |= LNET_PEER_MULTI_RAIL;
1802
1803         /*
1804          * If nid isn't specified, we must create a new peer with
1805          * prim_nid as its primary nid.
1806          */
1807         if (nid == LNET_NID_ANY)
1808                 return lnet_peer_add(prim_nid, flags);
1809
1810         /* Look up the prim_nid, which must exist. */
1811         lpni = lnet_find_peer_ni_locked(prim_nid);
1812         if (!lpni)
1813                 return -ENOENT;
1814         lnet_peer_ni_decref_locked(lpni);
1815         lp = lpni->lpni_peer_net->lpn_peer;
1816
1817         /* Peer must have been configured. */
1818         if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1819                 CDEBUG(D_NET, "peer %s was not configured\n",
1820                        libcfs_nid2str(prim_nid));
1821                 return -ENOENT;
1822         }
1823
1824         /* Primary NID must match */
1825         if (lp->lp_primary_nid != prim_nid) {
1826                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1827                        libcfs_nid2str(prim_nid),
1828                        libcfs_nid2str(lp->lp_primary_nid));
1829                 return -ENODEV;
1830         }
1831
1832         /* Multi-Rail flag must match. */
1833         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1834                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1835                        libcfs_nid2str(prim_nid));
1836                 return -EPERM;
1837         }
1838
1839         return lnet_peer_add_nid(lp, nid, flags);
1840 }
1841
1842 /*
1843  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1844  *
1845  * This API handles the following combinations:
1846  *   Delete a NI from a peer if both prim_nid and nid are provided.
1847  *   Delete a peer if only prim_nid is provided.
1848  *   Delete a peer if its primary nid is provided.
1849  *
1850  * The caller must hold ln_api_mutex. This prevents the peer from
1851  * being modified/deleted by a different thread.
1852  */
1853 int
1854 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1855 {
1856         struct lnet_peer *lp;
1857         struct lnet_peer_ni *lpni;
1858         unsigned flags;
1859
1860         if (prim_nid == LNET_NID_ANY)
1861                 return -EINVAL;
1862
1863         lpni = lnet_find_peer_ni_locked(prim_nid);
1864         if (!lpni)
1865                 return -ENOENT;
1866         lnet_peer_ni_decref_locked(lpni);
1867         lp = lpni->lpni_peer_net->lpn_peer;
1868
1869         if (prim_nid != lp->lp_primary_nid) {
1870                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1871                        libcfs_nid2str(prim_nid),
1872                        libcfs_nid2str(lp->lp_primary_nid));
1873                 return -ENODEV;
1874         }
1875
1876         lnet_net_lock(LNET_LOCK_EX);
1877         if (lp->lp_rtr_refcount > 0) {
1878                 lnet_net_unlock(LNET_LOCK_EX);
1879                 CERROR("%s is a router. Can not be deleted\n",
1880                        libcfs_nid2str(prim_nid));
1881                 return -EBUSY;
1882         }
1883         lnet_net_unlock(LNET_LOCK_EX);
1884
1885         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1886                 return lnet_peer_del(lp);
1887
1888         flags = LNET_PEER_CONFIGURED;
1889         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1890                 flags |= LNET_PEER_MULTI_RAIL;
1891
1892         return lnet_peer_del_nid(lp, nid, flags);
1893 }
1894
1895 void
1896 lnet_destroy_peer_ni_locked(struct kref *ref)
1897 {
1898         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1899                                                  lpni_kref);
1900         struct lnet_peer_table *ptable;
1901         struct lnet_peer_net *lpn;
1902
1903         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1904
1905         LASSERT(kref_read(&lpni->lpni_kref) == 0);
1906         LASSERT(list_empty(&lpni->lpni_txq));
1907         LASSERT(lpni->lpni_txqnob == 0);
1908         LASSERT(list_empty(&lpni->lpni_peer_nis));
1909         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1910
1911         lpn = lpni->lpni_peer_net;
1912         lpni->lpni_peer_net = NULL;
1913         lpni->lpni_net = NULL;
1914
1915         if (!list_empty(&lpni->lpni_hashlist)) {
1916                 /* remove the peer ni from the zombie list */
1917                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1918                 spin_lock(&ptable->pt_zombie_lock);
1919                 list_del_init(&lpni->lpni_hashlist);
1920                 ptable->pt_zombies--;
1921                 spin_unlock(&ptable->pt_zombie_lock);
1922         }
1923
1924         if (lpni->lpni_pref_nnids > 1) {
1925                 struct lnet_nid_list *ne, *tmp;
1926
1927                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1928                                          nl_list) {
1929                         list_del_init(&ne->nl_list);
1930                         LIBCFS_FREE(ne, sizeof(*ne));
1931                 }
1932         }
1933         LIBCFS_FREE(lpni, sizeof(*lpni));
1934
1935         if (lpn)
1936                 lnet_peer_net_decref_locked(lpn);
1937 }
1938
1939 struct lnet_peer_ni *
1940 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1941 {
1942         struct lnet_peer_ni *lpni = NULL;
1943         int rc;
1944
1945         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1946                 return ERR_PTR(-ESHUTDOWN);
1947
1948         /*
1949          * find if a peer_ni already exists.
1950          * If so then just return that.
1951          */
1952         lpni = lnet_find_peer_ni_locked(nid);
1953         if (lpni)
1954                 return lpni;
1955
1956         lnet_net_unlock(cpt);
1957
1958         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1959         if (rc) {
1960                 lpni = ERR_PTR(rc);
1961                 goto out_net_relock;
1962         }
1963
1964         lpni = lnet_find_peer_ni_locked(nid);
1965         LASSERT(lpni);
1966
1967 out_net_relock:
1968         lnet_net_lock(cpt);
1969
1970         return lpni;
1971 }
1972
1973 /*
1974  * Get a peer_ni for the given nid, create it if necessary. Takes a
1975  * hold on the peer_ni.
1976  */
1977 struct lnet_peer_ni *
1978 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1979 {
1980         struct lnet_peer_ni *lpni = NULL;
1981         int rc;
1982
1983         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1984                 return ERR_PTR(-ESHUTDOWN);
1985
1986         /*
1987          * find if a peer_ni already exists.
1988          * If so then just return that.
1989          */
1990         lpni = lnet_find_peer_ni_locked(nid);
1991         if (lpni)
1992                 return lpni;
1993
1994         /*
1995          * Slow path:
1996          * use the lnet_api_mutex to serialize the creation of the peer_ni
1997          * and the creation/deletion of the local ni/net. When a local ni is
1998          * created, if there exists a set of peer_nis on that network,
1999          * they need to be traversed and updated. When a local NI is
2000          * deleted, which could result in a network being deleted, then
2001          * all peer nis on that network need to be removed as well.
2002          *
2003          * Creation through traffic should also be serialized with
2004          * creation through DLC.
2005          */
2006         lnet_net_unlock(cpt);
2007         mutex_lock(&the_lnet.ln_api_mutex);
2008         /*
2009          * Shutdown is only set under the ln_api_lock, so a single
2010          * check here is sufficent.
2011          */
2012         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2013                 lpni = ERR_PTR(-ESHUTDOWN);
2014                 goto out_mutex_unlock;
2015         }
2016
2017         rc = lnet_peer_ni_traffic_add(nid, pref);
2018         if (rc) {
2019                 lpni = ERR_PTR(rc);
2020                 goto out_mutex_unlock;
2021         }
2022
2023         lpni = lnet_find_peer_ni_locked(nid);
2024         LASSERT(lpni);
2025
2026 out_mutex_unlock:
2027         mutex_unlock(&the_lnet.ln_api_mutex);
2028         lnet_net_lock(cpt);
2029
2030         /* Lock has been dropped, check again for shutdown. */
2031         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2032                 if (!IS_ERR(lpni))
2033                         lnet_peer_ni_decref_locked(lpni);
2034                 lpni = ERR_PTR(-ESHUTDOWN);
2035         }
2036
2037         return lpni;
2038 }
2039
2040 bool
2041 lnet_peer_gw_discovery(struct lnet_peer *lp)
2042 {
2043         bool rc = false;
2044
2045         spin_lock(&lp->lp_lock);
2046         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2047                 rc = true;
2048         spin_unlock(&lp->lp_lock);
2049
2050         return rc;
2051 }
2052
2053 bool
2054 lnet_peer_is_uptodate(struct lnet_peer *lp)
2055 {
2056         bool rc;
2057
2058         spin_lock(&lp->lp_lock);
2059         rc = lnet_peer_is_uptodate_locked(lp);
2060         spin_unlock(&lp->lp_lock);
2061         return rc;
2062 }
2063
2064 /*
2065  * Is a peer uptodate from the point of view of discovery?
2066  *
2067  * If it is currently being processed, obviously not.
2068  * A forced Ping or Push is also handled by the discovery thread.
2069  *
2070  * Otherwise look at whether the peer needs rediscovering.
2071  */
2072 bool
2073 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2074 __must_hold(&lp->lp_lock)
2075 {
2076         bool rc;
2077
2078         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2079                             LNET_PEER_FORCE_PING |
2080                             LNET_PEER_FORCE_PUSH)) {
2081                 rc = false;
2082         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2083                 rc = false;
2084         } else if (lnet_peer_needs_push(lp)) {
2085                 rc = false;
2086         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2087                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2088                         rc = true;
2089                 else
2090                         rc = false;
2091         } else {
2092                 rc = false;
2093         }
2094
2095         return rc;
2096 }
2097
2098 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2099 void
2100 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2101 {
2102         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2103          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2104          * when adding to the list and queuing the peer to ensure that we do not
2105          * strand any messages on the lp_dc_pendq. This scheme ensures the
2106          * message will be resent even if the peer is already being discovered.
2107          * Therefore we needn't check the return value of
2108          * lnet_peer_queue_for_discovery(lp).
2109          */
2110         lnet_net_lock(LNET_LOCK_EX);
2111         spin_lock(&lp->lp_lock);
2112         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2113         spin_unlock(&lp->lp_lock);
2114         lnet_peer_queue_for_discovery(lp);
2115         lnet_net_unlock(LNET_LOCK_EX);
2116 }
2117
2118 /*
2119  * Queue a peer for the attention of the discovery thread.  Call with
2120  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2121  * -EALREADY if the peer was already queued.
2122  */
2123 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2124 {
2125         int rc;
2126
2127         spin_lock(&lp->lp_lock);
2128         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2129                 lp->lp_state |= LNET_PEER_DISCOVERING;
2130         spin_unlock(&lp->lp_lock);
2131         if (list_empty(&lp->lp_dc_list)) {
2132                 lnet_peer_addref_locked(lp);
2133                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2134                 wake_up(&the_lnet.ln_dc_waitq);
2135                 rc = 0;
2136         } else {
2137                 rc = -EALREADY;
2138         }
2139
2140         CDEBUG(D_NET, "Queue peer %s: %d\n",
2141                libcfs_nid2str(lp->lp_primary_nid), rc);
2142
2143         return rc;
2144 }
2145
2146 /*
2147  * Discovery of a peer is complete. Wake all waiters on the peer.
2148  * Call with lnet_net_lock/EX held.
2149  */
2150 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2151 {
2152         struct lnet_msg *msg, *tmp;
2153         int rc = 0;
2154         LIST_HEAD(pending_msgs);
2155
2156         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2157                libcfs_nid2str(lp->lp_primary_nid));
2158
2159         list_del_init(&lp->lp_dc_list);
2160         spin_lock(&lp->lp_lock);
2161         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2162         spin_unlock(&lp->lp_lock);
2163         wake_up(&lp->lp_dc_waitq);
2164
2165         if (lp->lp_rtr_refcount > 0)
2166                 lnet_router_discovery_complete(lp);
2167
2168         lnet_net_unlock(LNET_LOCK_EX);
2169
2170         /* iterate through all pending messages and send them again */
2171         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2172                 list_del_init(&msg->msg_list);
2173                 if (lp->lp_dc_error) {
2174                         lnet_finalize(msg, lp->lp_dc_error);
2175                         continue;
2176                 }
2177
2178                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2179                        lnet_msgtyp2str(msg->msg_type),
2180                        libcfs_id2str(msg->msg_target));
2181                 rc = lnet_send(msg->msg_src_nid_param, msg,
2182                                msg->msg_rtr_nid_param);
2183                 if (rc < 0) {
2184                         CNETERR("Error sending %s to %s: %d\n",
2185                                lnet_msgtyp2str(msg->msg_type),
2186                                libcfs_id2str(msg->msg_target), rc);
2187                         lnet_finalize(msg, rc);
2188                 }
2189         }
2190         lnet_net_lock(LNET_LOCK_EX);
2191         lnet_peer_decref_locked(lp);
2192 }
2193
2194 /*
2195  * Handle inbound push.
2196  * Like any event handler, called with lnet_res_lock/CPT held.
2197  */
2198 void lnet_peer_push_event(struct lnet_event *ev)
2199 {
2200         struct lnet_ping_buffer *pbuf;
2201         struct lnet_peer *lp;
2202
2203         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2204
2205         /* lnet_find_peer() adds a refcount */
2206         lp = lnet_find_peer(ev->source.nid);
2207         if (!lp) {
2208                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2209                        libcfs_nid2str(ev->initiator.nid),
2210                        libcfs_nid2str(ev->source.nid));
2211                 pbuf->pb_needs_post = true;
2212                 return;
2213         }
2214
2215         /* Ensure peer state remains consistent while we modify it. */
2216         spin_lock(&lp->lp_lock);
2217
2218         /*
2219          * If some kind of error happened the contents of the message
2220          * cannot be used. Clear the NIDS_UPTODATE and set the
2221          * FORCE_PING flag to trigger a ping.
2222          */
2223         if (ev->status) {
2224                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2225                 lp->lp_state |= LNET_PEER_FORCE_PING;
2226                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2227                        ev->status,
2228                        libcfs_nid2str(lp->lp_primary_nid),
2229                        libcfs_nid2str(ev->source.nid));
2230                 goto out;
2231         }
2232
2233         /*
2234          * A push with invalid or corrupted info. Clear the UPTODATE
2235          * flag to trigger a ping.
2236          */
2237         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2238                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2239                 lp->lp_state |= LNET_PEER_FORCE_PING;
2240                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2241                        libcfs_nid2str(lp->lp_primary_nid));
2242                 goto out;
2243         }
2244
2245         /*
2246          * Make sure we'll allocate the correct size ping buffer when
2247          * pinging the peer.
2248          */
2249         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2250                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2251
2252         /*
2253          * A non-Multi-Rail peer is not supposed to be capable of
2254          * sending a push.
2255          */
2256         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2257                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2258                        libcfs_nid2str(lp->lp_primary_nid));
2259                 goto out;
2260         }
2261
2262         /*
2263          * The peer may have discovery disabled at its end. Set
2264          * NO_DISCOVERY as appropriate.
2265          */
2266         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2267                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2268                        libcfs_nid2str(lp->lp_primary_nid));
2269                 /*
2270                  * Mark the peer for deletion if we already know about it
2271                  * and it's going from discovery set to no discovery set
2272                  */
2273                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2274                                       LNET_PEER_DISCOVERING)) &&
2275                      lp->lp_state & LNET_PEER_DISCOVERED) {
2276                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2277                                libcfs_nid2str(lp->lp_primary_nid),
2278                                lp->lp_state);
2279                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2280                 }
2281                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2282         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2283                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2284                        libcfs_nid2str(lp->lp_primary_nid));
2285                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2286         }
2287
2288         /*
2289          * Update the MULTI_RAIL flag based on the push. If the peer
2290          * was configured with DLC then the setting should match what
2291          * DLC put in.
2292          * NB: We verified above that the MR feature bit is set in pi_features
2293          */
2294         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2295                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2296                        libcfs_nid2str(lp->lp_primary_nid), lp);
2297         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2298                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2299                       libcfs_nid2str(lp->lp_primary_nid));
2300         } else if (lnet_peer_discovery_disabled) {
2301                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2302                        libcfs_nid2str(lp->lp_primary_nid), lp);
2303         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2304                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2305                        libcfs_nid2str(lp->lp_primary_nid), lp);
2306         } else {
2307                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2308                        libcfs_nid2str(lp->lp_primary_nid), lp);
2309                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2310                 lnet_peer_clr_non_mr_pref_nids(lp);
2311         }
2312
2313         /*
2314          * Check for truncation of the Put message. Clear the
2315          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2316          * and tell discovery to allocate a bigger buffer.
2317          */
2318         if (ev->mlength < ev->rlength) {
2319                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2320                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2321                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2322                 lp->lp_state |= LNET_PEER_FORCE_PING;
2323                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2324                        libcfs_nid2str(lp->lp_primary_nid),
2325                        pbuf->pb_info.pi_nnis);
2326                 goto out;
2327         }
2328
2329         /* always assume new data */
2330         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2331         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2332
2333         /*
2334          * If there is data present that hasn't been processed yet,
2335          * we'll replace it if the Put contained newer data and it
2336          * fits. We're racing with a Ping or earlier Push in this
2337          * case.
2338          */
2339         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2340                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2341                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2342                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2343                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2344                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2345                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2346                               libcfs_nid2str(lp->lp_primary_nid),
2347                               LNET_PING_BUFFER_SEQNO(pbuf),
2348                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2349                 }
2350                 goto out;
2351         }
2352
2353         /*
2354          * Allocate a buffer to copy the data. On a failure we drop
2355          * the Push and set FORCE_PING to force the discovery
2356          * thread to fix the problem by pinging the peer.
2357          */
2358         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2359         if (!lp->lp_data) {
2360                 lp->lp_state |= LNET_PEER_FORCE_PING;
2361                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2362                        libcfs_nid2str(lp->lp_primary_nid),
2363                        LNET_PING_BUFFER_SEQNO(pbuf));
2364                 goto out;
2365         }
2366
2367         /* Success */
2368         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2369                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2370         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2371         CDEBUG(D_NET, "Received Push %s %u\n",
2372                libcfs_nid2str(lp->lp_primary_nid),
2373                LNET_PING_BUFFER_SEQNO(pbuf));
2374
2375 out:
2376         /* We've processed this buffer. It can be reposted */
2377         pbuf->pb_needs_post = true;
2378
2379         /*
2380          * Queue the peer for discovery if not done, force it on the request
2381          * queue and wake the discovery thread if the peer was already queued,
2382          * because its status changed.
2383          */
2384         spin_unlock(&lp->lp_lock);
2385         lnet_net_lock(LNET_LOCK_EX);
2386         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2387                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2388                 wake_up(&the_lnet.ln_dc_waitq);
2389         }
2390         /* Drop refcount from lookup */
2391         lnet_peer_decref_locked(lp);
2392         lnet_net_unlock(LNET_LOCK_EX);
2393 }
2394
2395 /*
2396  * Clear the discovery error state, unless we're already discovering
2397  * this peer, in which case the error is current.
2398  */
2399 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2400 {
2401         spin_lock(&lp->lp_lock);
2402         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2403                 lp->lp_dc_error = 0;
2404         spin_unlock(&lp->lp_lock);
2405 }
2406
2407 /*
2408  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2409  * dropped/retaken within this function. An lnet_peer_ni is passed in
2410  * because discovery could tear down an lnet_peer.
2411  */
2412 int
2413 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2414 {
2415         DEFINE_WAIT(wait);
2416         struct lnet_peer *lp;
2417         int rc = 0;
2418         int count = 0;
2419
2420 again:
2421         lnet_net_unlock(cpt);
2422         lnet_net_lock(LNET_LOCK_EX);
2423         lp = lpni->lpni_peer_net->lpn_peer;
2424         lnet_peer_clear_discovery_error(lp);
2425
2426         /*
2427          * We're willing to be interrupted. The lpni can become a
2428          * zombie if we race with DLC, so we must check for that.
2429          */
2430         for (;;) {
2431                 /* Keep lp alive when the lnet_net_lock is unlocked */
2432                 lnet_peer_addref_locked(lp);
2433                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2434                 if (signal_pending(current))
2435                         break;
2436                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2437                         break;
2438                 /*
2439                  * Don't repeat discovery if discovery is disabled. This is
2440                  * done to ensure we can use discovery as a standard ping as
2441                  * well for backwards compatibility with routers which do not
2442                  * have discovery or have discovery disabled
2443                  */
2444                 if (lnet_is_discovery_disabled(lp) && count > 0)
2445                         break;
2446                 if (lp->lp_dc_error)
2447                         break;
2448                 if (lnet_peer_is_uptodate(lp))
2449                         break;
2450                 lnet_peer_queue_for_discovery(lp);
2451                 count++;
2452                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2453
2454                 /*
2455                  * If caller requested a non-blocking operation then
2456                  * return immediately. Once discovery is complete any
2457                  * pending messages that were stopped due to discovery
2458                  * will be transmitted.
2459                  */
2460                 if (!block)
2461                         break;
2462
2463                 lnet_net_unlock(LNET_LOCK_EX);
2464                 schedule();
2465                 finish_wait(&lp->lp_dc_waitq, &wait);
2466                 lnet_net_lock(LNET_LOCK_EX);
2467                 lnet_peer_decref_locked(lp);
2468                 /* Peer may have changed */
2469                 lp = lpni->lpni_peer_net->lpn_peer;
2470         }
2471         finish_wait(&lp->lp_dc_waitq, &wait);
2472
2473         lnet_net_unlock(LNET_LOCK_EX);
2474         lnet_net_lock(cpt);
2475         lnet_peer_decref_locked(lp);
2476         /*
2477          * The peer may have changed, so re-check and rediscover if that turns
2478          * out to have been the case. The reference count on lp ensured that
2479          * even if it was unlinked from lpni the memory could not be recycled.
2480          * Thus the check below is sufficient to determine whether the peer
2481          * changed. If the peer changed, then lp must not be dereferenced.
2482          */
2483         if (lp != lpni->lpni_peer_net->lpn_peer)
2484                 goto again;
2485
2486         if (signal_pending(current))
2487                 rc = -EINTR;
2488         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2489                 rc = -ESHUTDOWN;
2490         else if (lp->lp_dc_error)
2491                 rc = lp->lp_dc_error;
2492         else if (!block)
2493                 CDEBUG(D_NET, "non-blocking discovery\n");
2494         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2495                 goto again;
2496
2497         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2498                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2499                libcfs_nid2str(lpni->lpni_nid), rc,
2500                (!block) ? "pending discovery" : "discovery complete");
2501
2502         return rc;
2503 }
2504
2505 /* Handle an incoming ack for a push. */
2506 static void
2507 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2508 {
2509         struct lnet_ping_buffer *pbuf;
2510
2511         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2512         spin_lock(&lp->lp_lock);
2513         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2514         lp->lp_push_error = ev->status;
2515         if (ev->status)
2516                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2517         else
2518                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2519         spin_unlock(&lp->lp_lock);
2520
2521         CDEBUG(D_NET, "peer %s ev->status %d\n",
2522                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2523 }
2524
2525 /* Handle a Reply message. This is the reply to a Ping message. */
2526 static void
2527 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2528 {
2529         struct lnet_ping_buffer *pbuf;
2530         int rc;
2531
2532         spin_lock(&lp->lp_lock);
2533
2534         lp->lp_disc_src_nid = ev->target.nid;
2535         lp->lp_disc_dst_nid = ev->source.nid;
2536
2537         /*
2538          * If some kind of error happened the contents of message
2539          * cannot be used. Set PING_FAILED to trigger a retry.
2540          */
2541         if (ev->status) {
2542                 lp->lp_state |= LNET_PEER_PING_FAILED;
2543                 lp->lp_ping_error = ev->status;
2544                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2545                        ev->status,
2546                        libcfs_nid2str(lp->lp_primary_nid),
2547                        libcfs_nid2str(ev->source.nid));
2548                 goto out;
2549         }
2550
2551         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2552         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2553                 lnet_swap_pinginfo(pbuf);
2554
2555         /*
2556          * A reply with invalid or corrupted info. Set PING_FAILED to
2557          * trigger a retry.
2558          */
2559         rc = lnet_ping_info_validate(&pbuf->pb_info);
2560         if (rc) {
2561                 lp->lp_state |= LNET_PEER_PING_FAILED;
2562                 lp->lp_ping_error = 0;
2563                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2564                        libcfs_nid2str(lp->lp_primary_nid), rc);
2565                 goto out;
2566         }
2567
2568         /*
2569          * The peer may have discovery disabled at its end. Set
2570          * NO_DISCOVERY as appropriate.
2571          */
2572         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2573             lnet_peer_discovery_disabled) {
2574                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2575                        libcfs_nid2str(lp->lp_primary_nid));
2576
2577                 /* Detect whether this peer has toggled discovery from on to
2578                  * off and whether we can delete and re-create the peer. Peers
2579                  * that were manually configured cannot be deleted by discovery.
2580                  * We need to delete this peer and re-create it if the peer was
2581                  * not configured manually, is currently considered DD capable,
2582                  * and either:
2583                  * 1. We've already discovered the peer (the peer has toggled
2584                  *    the discovery feature from on to off), or
2585                  * 2. The peer is considered MR, but it was not user configured
2586                  *    (this was a "temporary" peer created via the kernel APIs
2587                  *     that we're discovering for the first time)
2588                  */
2589                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2590                                       LNET_PEER_NO_DISCOVERY)) &&
2591                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2592                                      LNET_PEER_MULTI_RAIL))) {
2593                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2594                                libcfs_nid2str(lp->lp_primary_nid),
2595                                lp->lp_state);
2596                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2597                 }
2598                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2599         } else {
2600                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2601                        libcfs_nid2str(lp->lp_primary_nid));
2602                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2603         }
2604
2605         /*
2606          * Update the MULTI_RAIL flag based on the reply. If the peer
2607          * was configured with DLC then the setting should match what
2608          * DLC put in.
2609          */
2610         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2611                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2612                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2613                                libcfs_nid2str(lp->lp_primary_nid), lp);
2614                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2615                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2616                               libcfs_nid2str(lp->lp_primary_nid));
2617                 } else if (lnet_peer_discovery_disabled) {
2618                         CDEBUG(D_NET,
2619                                "peer %s(%p) not MR: DD disabled locally\n",
2620                                libcfs_nid2str(lp->lp_primary_nid), lp);
2621                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2622                         CDEBUG(D_NET,
2623                                "peer %s(%p) not MR: DD disabled remotely\n",
2624                                libcfs_nid2str(lp->lp_primary_nid), lp);
2625                 } else {
2626                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2627                                libcfs_nid2str(lp->lp_primary_nid), lp);
2628                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2629                         lnet_peer_clr_non_mr_pref_nids(lp);
2630                 }
2631         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2632                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2633                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2634                               libcfs_nid2str(lp->lp_primary_nid));
2635                 } else {
2636                         CERROR("Multi-Rail state vanished from %s\n",
2637                                libcfs_nid2str(lp->lp_primary_nid));
2638                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2639                 }
2640         }
2641
2642         /*
2643          * Make sure we'll allocate the correct size ping buffer when
2644          * pinging the peer.
2645          */
2646         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2647                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2648
2649         /*
2650          * Check for truncation of the Reply. Clear PING_SENT and set
2651          * PING_FAILED to trigger a retry.
2652          */
2653         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2654                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2655                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2656                 lp->lp_state |= LNET_PEER_PING_FAILED;
2657                 lp->lp_ping_error = 0;
2658                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2659                        libcfs_nid2str(lp->lp_primary_nid),
2660                        pbuf->pb_info.pi_nnis);
2661                 goto out;
2662         }
2663
2664         /*
2665          * Check the sequence numbers in the reply. These are only
2666          * available if the reply came from a Multi-Rail peer.
2667          */
2668         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2669             pbuf->pb_info.pi_nnis > 1 &&
2670             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2671                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2672                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2673                                 libcfs_nid2str(lp->lp_primary_nid),
2674                                 LNET_PING_BUFFER_SEQNO(pbuf),
2675                                 lp->lp_peer_seqno);
2676
2677                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2678         }
2679
2680         /* We're happy with the state of the data in the buffer. */
2681         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2682                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2683         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2684                 lnet_ping_buffer_decref(lp->lp_data);
2685         else
2686                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2687         lnet_ping_buffer_addref(pbuf);
2688         lp->lp_data = pbuf;
2689 out:
2690         lp->lp_state &= ~LNET_PEER_PING_SENT;
2691         spin_unlock(&lp->lp_lock);
2692
2693         lnet_net_lock(LNET_LOCK_EX);
2694         /*
2695          * If this peer is a gateway, call the routing callback to
2696          * handle the ping reply
2697          */
2698         if (lp->lp_rtr_refcount > 0)
2699                 lnet_router_discovery_ping_reply(lp);
2700         lnet_net_unlock(LNET_LOCK_EX);
2701 }
2702
2703 /*
2704  * Send event handling. Only matters for error cases, where we clean
2705  * up state on the peer and peer_ni that would otherwise be updated in
2706  * the REPLY event handler for a successful Ping, and the ACK event
2707  * handler for a successful Push.
2708  */
2709 static int
2710 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2711 {
2712         int rc = 0;
2713
2714         if (!ev->status)
2715                 goto out;
2716
2717         spin_lock(&lp->lp_lock);
2718         if (ev->msg_type == LNET_MSG_GET) {
2719                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2720                 lp->lp_state |= LNET_PEER_PING_FAILED;
2721                 lp->lp_ping_error = ev->status;
2722         } else { /* ev->msg_type == LNET_MSG_PUT */
2723                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2724                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2725                 lp->lp_push_error = ev->status;
2726         }
2727         spin_unlock(&lp->lp_lock);
2728         rc = LNET_REDISCOVER_PEER;
2729 out:
2730         CDEBUG(D_NET, "%s Send to %s: %d\n",
2731                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2732                 libcfs_nid2str(ev->target.nid), rc);
2733         return rc;
2734 }
2735
2736 /*
2737  * Unlink event handling. This event is only seen if a call to
2738  * LNetMDUnlink() caused the event to be unlinked. If this call was
2739  * made after the event was set up in LNetGet() or LNetPut() then we
2740  * assume the Ping or Push timed out.
2741  */
2742 static void
2743 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2744 {
2745         spin_lock(&lp->lp_lock);
2746         /* We've passed through LNetGet() */
2747         if (lp->lp_state & LNET_PEER_PING_SENT) {
2748                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2749                 lp->lp_state |= LNET_PEER_PING_FAILED;
2750                 lp->lp_ping_error = -ETIMEDOUT;
2751                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2752                         libcfs_nid2str(lp->lp_primary_nid));
2753         }
2754         /* We've passed through LNetPut() */
2755         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2756                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2757                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2758                 lp->lp_push_error = -ETIMEDOUT;
2759                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2760                         libcfs_nid2str(lp->lp_primary_nid));
2761         }
2762         spin_unlock(&lp->lp_lock);
2763 }
2764
2765 /*
2766  * Event handler for the discovery EQ.
2767  *
2768  * Called with lnet_res_lock(cpt) held. The cpt is the
2769  * lnet_cpt_of_cookie() of the md handle cookie.
2770  */
2771 static void lnet_discovery_event_handler(struct lnet_event *event)
2772 {
2773         struct lnet_peer *lp = event->md_user_ptr;
2774         struct lnet_ping_buffer *pbuf;
2775         int rc;
2776
2777         /* discovery needs to take another look */
2778         rc = LNET_REDISCOVER_PEER;
2779
2780         CDEBUG(D_NET, "Received event: %d\n", event->type);
2781
2782         switch (event->type) {
2783         case LNET_EVENT_ACK:
2784                 lnet_discovery_event_ack(lp, event);
2785                 break;
2786         case LNET_EVENT_REPLY:
2787                 lnet_discovery_event_reply(lp, event);
2788                 break;
2789         case LNET_EVENT_SEND:
2790                 /* Only send failure triggers a retry. */
2791                 rc = lnet_discovery_event_send(lp, event);
2792                 break;
2793         case LNET_EVENT_UNLINK:
2794                 /* LNetMDUnlink() was called */
2795                 lnet_discovery_event_unlink(lp, event);
2796                 break;
2797         default:
2798                 /* Invalid events. */
2799                 LBUG();
2800         }
2801         lnet_net_lock(LNET_LOCK_EX);
2802         if (event->unlinked) {
2803                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2804                 lnet_ping_buffer_decref(pbuf);
2805                 lnet_peer_decref_locked(lp);
2806         }
2807
2808         /* put peer back at end of request queue, if discovery not already
2809          * done */
2810         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2811             lnet_peer_queue_for_discovery(lp)) {
2812                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2813                 wake_up(&the_lnet.ln_dc_waitq);
2814         }
2815         lnet_net_unlock(LNET_LOCK_EX);
2816 }
2817
2818 /*
2819  * Build a peer from incoming data.
2820  *
2821  * The NIDs in the incoming data are supposed to be structured as follows:
2822  *  - loopback
2823  *  - primary NID
2824  *  - other NIDs in same net
2825  *  - NIDs in second net
2826  *  - NIDs in third net
2827  *  - ...
2828  * This due to the way the list of NIDs in the data is created.
2829  *
2830  * Note that this function will mark the peer uptodate unless an
2831  * ENOMEM is encontered. All other errors are due to a conflict
2832  * between the DLC configuration and what discovery sees. We treat DLC
2833  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2834  * peer from becoming stuck in discovery.
2835  */
2836 static int lnet_peer_merge_data(struct lnet_peer *lp,
2837                                 struct lnet_ping_buffer *pbuf)
2838 {
2839         struct lnet_peer_net *lpn;
2840         struct lnet_peer_ni *lpni;
2841         lnet_nid_t *curnis = NULL;
2842         struct lnet_ni_status *addnis = NULL;
2843         lnet_nid_t *delnis = NULL;
2844         unsigned flags;
2845         int ncurnis;
2846         int naddnis;
2847         int ndelnis;
2848         int nnis = 0;
2849         int i;
2850         int j;
2851         int rc;
2852
2853         flags = LNET_PEER_DISCOVERED;
2854         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2855                 flags |= LNET_PEER_MULTI_RAIL;
2856
2857         /*
2858          * Cache the routing feature for the peer; whether it is enabled
2859          * for disabled as reported by the remote peer.
2860          */
2861         spin_lock(&lp->lp_lock);
2862         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2863                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2864         else
2865                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2866         spin_unlock(&lp->lp_lock);
2867
2868         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2869         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2870         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2871         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2872         if (!curnis || !addnis || !delnis) {
2873                 rc = -ENOMEM;
2874                 goto out;
2875         }
2876         ncurnis = 0;
2877         naddnis = 0;
2878         ndelnis = 0;
2879
2880         /* Construct the list of NIDs present in peer. */
2881         lpni = NULL;
2882         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2883                 curnis[ncurnis++] = lpni->lpni_nid;
2884
2885         /*
2886          * Check for NIDs in pbuf not present in curnis[].
2887          * The loop starts at 1 to skip the loopback NID.
2888          */
2889         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2890                 for (j = 0; j < ncurnis; j++)
2891                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2892                                 break;
2893                 if (j == ncurnis)
2894                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2895         }
2896         /*
2897          * Check for NIDs in curnis[] not present in pbuf.
2898          * The nested loop starts at 1 to skip the loopback NID.
2899          *
2900          * But never add the loopback NID to delnis[]: if it is
2901          * present in curnis[] then this peer is for this node.
2902          */
2903         for (i = 0; i < ncurnis; i++) {
2904                 if (curnis[i] == LNET_NID_LO_0)
2905                         continue;
2906                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2907                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2908                                 /*
2909                                  * update the information we cache for the
2910                                  * peer with the latest information we
2911                                  * received
2912                                  */
2913                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
2914                                 if (lpni) {
2915                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2916                                         lnet_peer_ni_decref_locked(lpni);
2917                                 }
2918                                 break;
2919                         }
2920                 }
2921                 if (j == pbuf->pb_info.pi_nnis)
2922                         delnis[ndelnis++] = curnis[i];
2923         }
2924
2925         /*
2926          * If we get here and the discovery is disabled then we don't want
2927          * to add or delete any NIs. We just updated the ones we have some
2928          * information on, and call it a day
2929          */
2930         rc = 0;
2931         if (lnet_is_discovery_disabled(lp))
2932                 goto out;
2933
2934         for (i = 0; i < naddnis; i++) {
2935                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2936                 if (rc) {
2937                         CERROR("Error adding NID %s to peer %s: %d\n",
2938                                libcfs_nid2str(addnis[i].ns_nid),
2939                                libcfs_nid2str(lp->lp_primary_nid), rc);
2940                         if (rc == -ENOMEM)
2941                                 goto out;
2942                 }
2943                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2944                 if (lpni) {
2945                         lpni->lpni_ns_status = addnis[i].ns_status;
2946                         lnet_peer_ni_decref_locked(lpni);
2947                 }
2948         }
2949
2950         for (i = 0; i < ndelnis; i++) {
2951                 /*
2952                  * for routers it's okay to delete the primary_nid because
2953                  * the upper layers don't really rely on it. So if we're
2954                  * being told that the router changed its primary_nid
2955                  * then it's okay to delete it.
2956                  */
2957                 if (lp->lp_rtr_refcount > 0)
2958                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2959                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2960                 if (rc) {
2961                         CERROR("Error deleting NID %s from peer %s: %d\n",
2962                                libcfs_nid2str(delnis[i]),
2963                                libcfs_nid2str(lp->lp_primary_nid), rc);
2964                         if (rc == -ENOMEM)
2965                                 goto out;
2966                 }
2967         }
2968
2969         /* The peer net for the primary NID should be the first entry in the
2970          * peer's lp_peer_nets list, and the peer NI for the primary NID should
2971          * be the first entry in its peer net's lpn_peer_nis list.
2972          */
2973         lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
2974         if (!lpni) {
2975                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
2976                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
2977                 goto out;
2978         }
2979
2980         lnet_peer_ni_decref_locked(lpni);
2981
2982         lpn = lpni->lpni_peer_net;
2983         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
2984                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
2985
2986         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
2987                 list_move(&lpni->lpni_peer_nis,
2988                           &lpni->lpni_peer_net->lpn_peer_nis);
2989
2990         /*
2991          * Errors other than -ENOMEM are due to peers having been
2992          * configured with DLC. Ignore these because DLC overrides
2993          * Discovery.
2994          */
2995         rc = 0;
2996 out:
2997         CFS_FREE_PTR_ARRAY(curnis, nnis);
2998         CFS_FREE_PTR_ARRAY(addnis, nnis);
2999         CFS_FREE_PTR_ARRAY(delnis, nnis);
3000         lnet_ping_buffer_decref(pbuf);
3001         CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3002
3003         if (rc) {
3004                 spin_lock(&lp->lp_lock);
3005                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3006                 lp->lp_state |= LNET_PEER_FORCE_PING;
3007                 spin_unlock(&lp->lp_lock);
3008         }
3009         return rc;
3010 }
3011
3012 /*
3013  * The data in pbuf says lp is its primary peer, but the data was
3014  * received by a different peer. Try to update lp with the data.
3015  */
3016 static int
3017 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3018 {
3019         struct lnet_handle_md mdh;
3020
3021         /* Queue lp for discovery, and force it on the request queue. */
3022         lnet_net_lock(LNET_LOCK_EX);
3023         if (lnet_peer_queue_for_discovery(lp))
3024                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3025         lnet_net_unlock(LNET_LOCK_EX);
3026
3027         LNetInvalidateMDHandle(&mdh);
3028
3029         /*
3030          * Decide whether we can move the peer to the DATA_PRESENT state.
3031          *
3032          * We replace stale data for a multi-rail peer, repair PING_FAILED
3033          * status, and preempt FORCE_PING.
3034          *
3035          * If after that we have DATA_PRESENT, we merge it into this peer.
3036          */
3037         spin_lock(&lp->lp_lock);
3038         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3039                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3040                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3041                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3042                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3043                         lnet_ping_buffer_decref(pbuf);
3044                         pbuf = lp->lp_data;
3045                         lp->lp_data = NULL;
3046                 }
3047         }
3048         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3049                 lnet_ping_buffer_decref(lp->lp_data);
3050                 lp->lp_data = NULL;
3051                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3052         }
3053         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3054                 mdh = lp->lp_ping_mdh;
3055                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3056                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3057                 lp->lp_ping_error = 0;
3058         }
3059         if (lp->lp_state & LNET_PEER_FORCE_PING)
3060                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3061         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3062         spin_unlock(&lp->lp_lock);
3063
3064         if (!LNetMDHandleIsInvalid(mdh))
3065                 LNetMDUnlink(mdh);
3066
3067         if (pbuf)
3068                 return lnet_peer_merge_data(lp, pbuf);
3069
3070         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3071         return 0;
3072 }
3073
3074 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3075 {
3076         int i;
3077
3078         for (i = 0; i < pinfo->pi_nnis; i++) {
3079                 if (pinfo->pi_ni[i].ns_nid == nid)
3080                         return true;
3081         }
3082
3083         return false;
3084 }
3085
3086 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3087  * to the discovery queue a reference was taken that will prevent the peer from
3088  * actually being freed by this function. After this function exits the
3089  * discovery thread should call lnet_peer_discovery_complete() which will
3090  * drop that reference as well as wake any waiters that may also be holding a
3091  * ref on the peer
3092  */
3093 static int lnet_peer_deletion(struct lnet_peer *lp)
3094 __must_hold(&lp->lp_lock)
3095 {
3096         struct list_head rlist;
3097         struct lnet_route *route, *tmp;
3098         int sensitivity = lp->lp_health_sensitivity;
3099
3100         INIT_LIST_HEAD(&rlist);
3101
3102         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3103                           LNET_PEER_FORCE_PUSH);
3104         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3105                libcfs_nid2str(lp->lp_primary_nid), lp, lp->lp_state);
3106
3107         /* no-op if lnet_peer_del() has already been called on this peer */
3108         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3109                 return 0;
3110
3111         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3112                 return -ESHUTDOWN;
3113
3114         spin_unlock(&lp->lp_lock);
3115
3116         mutex_lock(&the_lnet.ln_api_mutex);
3117
3118         lnet_net_lock(LNET_LOCK_EX);
3119         /* remove the peer from the discovery work
3120          * queue if it's on there in preparation
3121          * of deleting it.
3122          */
3123         if (!list_empty(&lp->lp_dc_list))
3124                 list_del_init(&lp->lp_dc_list);
3125         list_for_each_entry_safe(route, tmp,
3126                                  &lp->lp_routes,
3127                                  lr_gwlist)
3128                 lnet_move_route(route, NULL, &rlist);
3129         lnet_net_unlock(LNET_LOCK_EX);
3130
3131         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3132         lnet_peer_del(lp);
3133
3134         list_for_each_entry_safe(route, tmp,
3135                                  &rlist, lr_list) {
3136                 /* re-add these routes */
3137                 lnet_add_route(route->lr_net,
3138                                route->lr_hops,
3139                                route->lr_nid,
3140                                route->lr_priority,
3141                                sensitivity);
3142                 LIBCFS_FREE(route, sizeof(*route));
3143         }
3144
3145         mutex_unlock(&the_lnet.ln_api_mutex);
3146
3147         spin_lock(&lp->lp_lock);
3148
3149         return 0;
3150 }
3151
3152 /*
3153  * Update a peer using the data received.
3154  */
3155 static int lnet_peer_data_present(struct lnet_peer *lp)
3156 __must_hold(&lp->lp_lock)
3157 {
3158         struct lnet_ping_buffer *pbuf;
3159         struct lnet_peer_ni *lpni;
3160         lnet_nid_t nid = LNET_NID_ANY;
3161         unsigned flags;
3162         int rc = 0;
3163
3164         pbuf = lp->lp_data;
3165         lp->lp_data = NULL;
3166         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3167         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3168         spin_unlock(&lp->lp_lock);
3169
3170         /*
3171          * Modifications of peer structures are done while holding the
3172          * ln_api_mutex. A global lock is required because we may be
3173          * modifying multiple peer structures, and a mutex greatly
3174          * simplifies memory management.
3175          *
3176          * The actual changes to the data structures must also protect
3177          * against concurrent lookups, for which the lnet_net_lock in
3178          * LNET_LOCK_EX mode is used.
3179          */
3180         mutex_lock(&the_lnet.ln_api_mutex);
3181         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3182                 rc = -ESHUTDOWN;
3183                 goto out;
3184         }
3185
3186         /*
3187          * If this peer is not on the peer list then it is being torn
3188          * down, and our reference count may be all that is keeping it
3189          * alive. Don't do any work on it.
3190          */
3191         if (list_empty(&lp->lp_peer_list))
3192                 goto out;
3193
3194         flags = LNET_PEER_DISCOVERED;
3195         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3196                 flags |= LNET_PEER_MULTI_RAIL;
3197
3198         /*
3199          * Check whether the primary NID in the message matches the
3200          * primary NID of the peer. If it does, update the peer, if
3201          * it it does not, check whether there is already a peer with
3202          * that primary NID. If no such peer exists, try to update
3203          * the primary NID of the current peer (allowed if it was
3204          * created due to message traffic) and complete the update.
3205          * If the peer did exist, hand off the data to it.
3206          *
3207          * The peer for the loopback interface is a special case: this
3208          * is the peer for the local node, and we want to set its
3209          * primary NID to the correct value here. Moreover, this peer
3210          * can show up with only the loopback NID in the ping buffer.
3211          */
3212         if (pbuf->pb_info.pi_nnis <= 1)
3213                 goto out;
3214         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3215         if (lp->lp_primary_nid == LNET_NID_LO_0) {
3216                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3217                 if (!rc)
3218                         rc = lnet_peer_merge_data(lp, pbuf);
3219         /*
3220          * if the primary nid of the peer is present in the ping info returned
3221          * from the peer, but it's not the local primary peer we have
3222          * cached and discovery is disabled, then we don't want to update
3223          * our local peer info, by adding or removing NIDs, we just want
3224          * to update the status of the nids that we currently have
3225          * recorded in that peer.
3226          */
3227         } else if (lp->lp_primary_nid == nid ||
3228                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3229                     lnet_is_discovery_disabled(lp))) {
3230                 rc = lnet_peer_merge_data(lp, pbuf);
3231         } else {
3232                 lpni = lnet_find_peer_ni_locked(nid);
3233                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3234                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3235                         if (rc) {
3236                                 CERROR("Primary NID error %s versus %s: %d\n",
3237                                        libcfs_nid2str(lp->lp_primary_nid),
3238                                        libcfs_nid2str(nid), rc);
3239                         } else {
3240                                 rc = lnet_peer_merge_data(lp, pbuf);
3241                         }
3242                         if (lpni)
3243                                 lnet_peer_ni_decref_locked(lpni);
3244                 } else {
3245                         struct lnet_peer *new_lp;
3246                         new_lp = lpni->lpni_peer_net->lpn_peer;
3247                         /*
3248                          * if lp has discovery/MR enabled that means new_lp
3249                          * should have discovery/MR enabled as well, since
3250                          * it's the same peer, which we're about to merge
3251                          */
3252                         spin_lock(&lp->lp_lock);
3253                         spin_lock(&new_lp->lp_lock);
3254                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3255                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3256                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3257                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3258                         /* If we're processing a ping reply then we may be
3259                          * about to send a push to the peer that we ping'd.
3260                          * Since the ping reply that we're processing was
3261                          * received by lp, we need to set the discovery source
3262                          * NID for new_lp to the NID stored in lp.
3263                          */
3264                         if (lp->lp_disc_src_nid != LNET_NID_ANY) {
3265                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3266                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3267                         }
3268                         spin_unlock(&new_lp->lp_lock);
3269                         spin_unlock(&lp->lp_lock);
3270
3271                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3272                         lnet_consolidate_routes_locked(lp, new_lp);
3273                         lnet_peer_ni_decref_locked(lpni);
3274                 }
3275         }
3276 out:
3277         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3278                lp->lp_state);
3279         mutex_unlock(&the_lnet.ln_api_mutex);
3280
3281         spin_lock(&lp->lp_lock);
3282         /* Tell discovery to re-check the peer immediately. */
3283         if (!rc)
3284                 rc = LNET_REDISCOVER_PEER;
3285         return rc;
3286 }
3287
3288 /*
3289  * A ping failed. Clear the PING_FAILED state and set the
3290  * FORCE_PING state, to ensure a retry even if discovery is
3291  * disabled. This avoids being left with incorrect state.
3292  */
3293 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3294 __must_hold(&lp->lp_lock)
3295 {
3296         struct lnet_handle_md mdh;
3297         int rc;
3298
3299         mdh = lp->lp_ping_mdh;
3300         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3301         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3302         lp->lp_state |= LNET_PEER_FORCE_PING;
3303         rc = lp->lp_ping_error;
3304         lp->lp_ping_error = 0;
3305         spin_unlock(&lp->lp_lock);
3306
3307         if (!LNetMDHandleIsInvalid(mdh))
3308                 LNetMDUnlink(mdh);
3309
3310         CDEBUG(D_NET, "peer %s:%d\n",
3311                libcfs_nid2str(lp->lp_primary_nid), rc);
3312
3313         spin_lock(&lp->lp_lock);
3314         return rc ? rc : LNET_REDISCOVER_PEER;
3315 }
3316
3317 /* Active side of ping. */
3318 static int lnet_peer_send_ping(struct lnet_peer *lp)
3319 __must_hold(&lp->lp_lock)
3320 {
3321         int nnis;
3322         int rc;
3323         int cpt;
3324
3325         lp->lp_state |= LNET_PEER_PING_SENT;
3326         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3327         spin_unlock(&lp->lp_lock);
3328
3329         cpt = lnet_net_lock_current();
3330         /* Refcount for MD. */
3331         lnet_peer_addref_locked(lp);
3332         lnet_net_unlock(cpt);
3333
3334         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3335
3336         rc = lnet_send_ping(lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
3337                             the_lnet.ln_dc_handler, false);
3338
3339         /*
3340          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3341          * refcount on the peer, otherwise LNetMDUnlink will be called
3342          * which will eventually do that.
3343          */
3344         if (rc > 0) {
3345                 lnet_net_lock(cpt);
3346                 lnet_peer_decref_locked(lp);
3347                 lnet_net_unlock(cpt);
3348                 rc = -rc; /* change the rc to negative value */
3349                 goto fail_error;
3350         } else if (rc < 0) {
3351                 goto fail_error;
3352         }
3353
3354         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3355
3356         spin_lock(&lp->lp_lock);
3357         return 0;
3358
3359 fail_error:
3360         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3361         /*
3362          * The errors that get us here are considered hard errors and
3363          * cause Discovery to terminate. So we clear PING_SENT, but do
3364          * not set either PING_FAILED or FORCE_PING. In fact we need
3365          * to clear PING_FAILED, because the unlink event handler will
3366          * have set it if we called LNetMDUnlink() above.
3367          */
3368         spin_lock(&lp->lp_lock);
3369         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3370         return rc;
3371 }
3372
3373 /*
3374  * This function exists because you cannot call LNetMDUnlink() from an
3375  * event handler.
3376  */
3377 static int lnet_peer_push_failed(struct lnet_peer *lp)
3378 __must_hold(&lp->lp_lock)
3379 {
3380         struct lnet_handle_md mdh;
3381         int rc;
3382
3383         mdh = lp->lp_push_mdh;
3384         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3385         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3386         rc = lp->lp_push_error;
3387         lp->lp_push_error = 0;
3388         spin_unlock(&lp->lp_lock);
3389
3390         if (!LNetMDHandleIsInvalid(mdh))
3391                 LNetMDUnlink(mdh);
3392
3393         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3394         spin_lock(&lp->lp_lock);
3395         return rc ? rc : LNET_REDISCOVER_PEER;
3396 }
3397
3398 /*
3399  * Mark the peer as discovered.
3400  */
3401 static int lnet_peer_discovered(struct lnet_peer *lp)
3402 __must_hold(&lp->lp_lock)
3403 {
3404         lp->lp_state |= LNET_PEER_DISCOVERED;
3405         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3406                           LNET_PEER_REDISCOVER);
3407
3408         lp->lp_dc_error = 0;
3409
3410         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3411
3412         return 0;
3413 }
3414
3415 /* Active side of push. */
3416 static int lnet_peer_send_push(struct lnet_peer *lp)
3417 __must_hold(&lp->lp_lock)
3418 {
3419         struct lnet_ping_buffer *pbuf;
3420         struct lnet_process_id id;
3421         struct lnet_md md;
3422         int cpt;
3423         int rc;
3424
3425         /* Don't push to a non-multi-rail peer. */
3426         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3427                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3428                 /* if peer's NIDs are uptodate then peer is discovered */
3429                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3430                         rc = lnet_peer_discovered(lp);
3431                         return rc;
3432                 }
3433
3434                 return 0;
3435         }
3436
3437         lp->lp_state |= LNET_PEER_PUSH_SENT;
3438         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3439         spin_unlock(&lp->lp_lock);
3440
3441         cpt = lnet_net_lock_current();
3442         pbuf = the_lnet.ln_ping_target;
3443         lnet_ping_buffer_addref(pbuf);
3444         lnet_net_unlock(cpt);
3445
3446         /* Push source MD */
3447         md.start     = &pbuf->pb_info;
3448         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3449         md.threshold = 2; /* Put/Ack */
3450         md.max_size  = 0;
3451         md.options   = LNET_MD_TRACK_RESPONSE;
3452         md.handler   = the_lnet.ln_dc_handler;
3453         md.user_ptr  = lp;
3454
3455         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3456         if (rc) {
3457                 lnet_ping_buffer_decref(pbuf);
3458                 CERROR("Can't bind push source MD: %d\n", rc);
3459                 goto fail_error;
3460         }
3461
3462         cpt = lnet_net_lock_current();
3463         /* Refcount for MD. */
3464         lnet_peer_addref_locked(lp);
3465         id.pid = LNET_PID_LUSTRE;
3466         if (lp->lp_disc_dst_nid != LNET_NID_ANY)
3467                 id.nid = lp->lp_disc_dst_nid;
3468         else
3469                 id.nid = lp->lp_primary_nid;
3470         lnet_net_unlock(cpt);
3471
3472         rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3473                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3474                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3475
3476         /*
3477          * reset the discovery nid. There is no need to restrict sending
3478          * from that source, if we call lnet_push_update_to_peers(). It'll
3479          * get set to a specific NID, if we initiate discovery from the
3480          * scratch
3481          */
3482         lp->lp_disc_src_nid = LNET_NID_ANY;
3483         lp->lp_disc_dst_nid = LNET_NID_ANY;
3484
3485         if (rc)
3486                 goto fail_unlink;
3487
3488         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3489
3490         spin_lock(&lp->lp_lock);
3491         return 0;
3492
3493 fail_unlink:
3494         LNetMDUnlink(lp->lp_push_mdh);
3495         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3496 fail_error:
3497         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3498         /*
3499          * The errors that get us here are considered hard errors and
3500          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3501          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3502          * because the unlink event handler will have set it if we
3503          * called LNetMDUnlink() above.
3504          */
3505         spin_lock(&lp->lp_lock);
3506         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3507         return rc;
3508 }
3509
3510 /*
3511  * An unrecoverable error was encountered during discovery.
3512  * Set error status in peer and abort discovery.
3513  */
3514 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3515 {
3516         CDEBUG(D_NET, "Discovery error %s: %d\n",
3517                libcfs_nid2str(lp->lp_primary_nid), error);
3518
3519         spin_lock(&lp->lp_lock);
3520         lp->lp_dc_error = error;
3521         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3522         lp->lp_state |= LNET_PEER_REDISCOVER;
3523         spin_unlock(&lp->lp_lock);
3524 }
3525
3526 /*
3527  * Wait for work to be queued or some other change that must be
3528  * attended to. Returns non-zero if the discovery thread should shut
3529  * down.
3530  */
3531 static int lnet_peer_discovery_wait_for_work(void)
3532 {
3533         int cpt;
3534         int rc = 0;
3535
3536         DEFINE_WAIT(wait);
3537
3538         cpt = lnet_net_lock_current();
3539         for (;;) {
3540                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3541                                 TASK_INTERRUPTIBLE);
3542                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3543                         break;
3544                 if (lnet_push_target_resize_needed() ||
3545                     the_lnet.ln_push_target->pb_needs_post)
3546                         break;
3547                 if (!list_empty(&the_lnet.ln_dc_request))
3548                         break;
3549                 if (!list_empty(&the_lnet.ln_msg_resend))
3550                         break;
3551                 lnet_net_unlock(cpt);
3552
3553                 /*
3554                  * wakeup max every second to check if there are peers that
3555                  * have been stuck on the working queue for greater than
3556                  * the peer timeout.
3557                  */
3558                 schedule_timeout(cfs_time_seconds(1));
3559                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3560                 cpt = lnet_net_lock_current();
3561         }
3562         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3563
3564         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3565                 rc = -ESHUTDOWN;
3566
3567         lnet_net_unlock(cpt);
3568
3569         CDEBUG(D_NET, "woken: %d\n", rc);
3570
3571         return rc;
3572 }
3573
3574 /*
3575  * Messages that were pending on a destroyed peer will be put on a global
3576  * resend list. The message resend list will be checked by
3577  * the discovery thread when it wakes up, and will resend messages. These
3578  * messages can still be sendable in the case the lpni which was the initial
3579  * cause of the message re-queue was transfered to another peer.
3580  *
3581  * It is possible that LNet could be shutdown while we're iterating
3582  * through the list. lnet_shudown_lndnets() will attempt to access the
3583  * resend list, but will have to wait until the spinlock is released, by
3584  * which time there shouldn't be any more messages on the resend list.
3585  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3586  * for the messages so they can be released. The other case is that
3587  * lnet_shudown_lndnets() can finalize all the messages before this
3588  * function can visit the resend list, in which case this function will be
3589  * a no-op.
3590  */
3591 static void lnet_resend_msgs(void)
3592 {
3593         struct lnet_msg *msg, *tmp;
3594         LIST_HEAD(resend);
3595         int rc;
3596
3597         spin_lock(&the_lnet.ln_msg_resend_lock);
3598         list_splice(&the_lnet.ln_msg_resend, &resend);
3599         spin_unlock(&the_lnet.ln_msg_resend_lock);
3600
3601         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3602                 list_del_init(&msg->msg_list);
3603                 rc = lnet_send(msg->msg_src_nid_param, msg,
3604                                msg->msg_rtr_nid_param);
3605                 if (rc < 0) {
3606                         CNETERR("Error sending %s to %s: %d\n",
3607                                lnet_msgtyp2str(msg->msg_type),
3608                                libcfs_id2str(msg->msg_target), rc);
3609                         lnet_finalize(msg, rc);
3610                 }
3611         }
3612 }
3613
3614 /* The discovery thread. */
3615 static int lnet_peer_discovery(void *arg)
3616 {
3617         struct lnet_peer *lp;
3618         int rc;
3619
3620         wait_for_completion(&the_lnet.ln_started);
3621
3622         CDEBUG(D_NET, "started\n");
3623
3624         for (;;) {
3625                 if (lnet_peer_discovery_wait_for_work())
3626                         break;
3627
3628                 if (lnet_push_target_resize_needed())
3629                         lnet_push_target_resize();
3630                 else if (the_lnet.ln_push_target->pb_needs_post)
3631                         lnet_push_target_post(the_lnet.ln_push_target,
3632                                               &the_lnet.ln_push_target_md);
3633
3634                 lnet_resend_msgs();
3635
3636                 lnet_net_lock(LNET_LOCK_EX);
3637                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3638                         lnet_net_unlock(LNET_LOCK_EX);
3639                         break;
3640                 }
3641
3642                 /*
3643                  * Process all incoming discovery work requests.  When
3644                  * discovery must wait on a peer to change state, it
3645                  * is added to the tail of the ln_dc_working queue. A
3646                  * timestamp keeps track of when the peer was added,
3647                  * so we can time out discovery requests that take too
3648                  * long.
3649                  */
3650                 while (!list_empty(&the_lnet.ln_dc_request)) {
3651                         lp = list_first_entry(&the_lnet.ln_dc_request,
3652                                               struct lnet_peer, lp_dc_list);
3653                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3654                         /*
3655                          * set the time the peer was put on the dc_working
3656                          * queue. It shouldn't remain on the queue
3657                          * forever, in case the GET message (for ping)
3658                          * doesn't get a REPLY or the PUT message (for
3659                          * push) doesn't get an ACK.
3660                          */
3661                         lp->lp_last_queued = ktime_get_real_seconds();
3662                         lnet_net_unlock(LNET_LOCK_EX);
3663
3664                         if (lnet_push_target_resize_needed())
3665                                 lnet_push_target_resize();
3666                         else if (the_lnet.ln_push_target->pb_needs_post)
3667                                 lnet_push_target_post(the_lnet.ln_push_target,
3668                                                       &the_lnet.ln_push_target_md);
3669
3670                         /*
3671                          * Select an action depending on the state of
3672                          * the peer and whether discovery is disabled.
3673                          * The check whether discovery is disabled is
3674                          * done after the code that handles processing
3675                          * for arrived data, cleanup for failures, and
3676                          * forcing a Ping or Push.
3677                          */
3678                         spin_lock(&lp->lp_lock);
3679                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3680                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3681                                 lp->lp_state);
3682                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3683                                             LNET_PEER_MARK_DELETED))
3684                                 rc = lnet_peer_deletion(lp);
3685                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3686                                 rc = lnet_peer_data_present(lp);
3687                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3688                                 rc = lnet_peer_ping_failed(lp);
3689                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3690                                 rc = lnet_peer_push_failed(lp);
3691                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3692                                 rc = lnet_peer_send_ping(lp);
3693                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3694                                 rc = lnet_peer_send_push(lp);
3695                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3696                                 rc = lnet_peer_send_ping(lp);
3697                         else if (lnet_peer_needs_push(lp))
3698                                 rc = lnet_peer_send_push(lp);
3699                         else
3700                                 rc = lnet_peer_discovered(lp);
3701                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3702                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3703                                 lp->lp_state, rc);
3704                         spin_unlock(&lp->lp_lock);
3705
3706                         lnet_net_lock(LNET_LOCK_EX);
3707                         if (rc == LNET_REDISCOVER_PEER) {
3708                                 list_move(&lp->lp_dc_list,
3709                                           &the_lnet.ln_dc_request);
3710                         } else if (rc) {
3711                                 lnet_peer_discovery_error(lp, rc);
3712                         }
3713                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3714                                 lnet_peer_discovery_complete(lp);
3715                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3716                                 break;
3717
3718                 }
3719
3720                 lnet_net_unlock(LNET_LOCK_EX);
3721         }
3722
3723         CDEBUG(D_NET, "stopping\n");
3724         /*
3725          * Clean up before telling lnet_peer_discovery_stop() that
3726          * we're done. Use wake_up() below to somewhat reduce the
3727          * size of the thundering herd if there are multiple threads
3728          * waiting on discovery of a single peer.
3729          */
3730
3731         /* Queue cleanup 1: stop all pending pings and pushes. */
3732         lnet_net_lock(LNET_LOCK_EX);
3733         while (!list_empty(&the_lnet.ln_dc_working)) {
3734                 lp = list_first_entry(&the_lnet.ln_dc_working,
3735                                       struct lnet_peer, lp_dc_list);
3736                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3737                 lnet_net_unlock(LNET_LOCK_EX);
3738                 lnet_peer_cancel_discovery(lp);
3739                 lnet_net_lock(LNET_LOCK_EX);
3740         }
3741         lnet_net_unlock(LNET_LOCK_EX);
3742
3743         /* Queue cleanup 2: wait for the expired queue to clear. */
3744         while (!list_empty(&the_lnet.ln_dc_expired))
3745                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3746
3747         /* Queue cleanup 3: clear the request queue. */
3748         lnet_net_lock(LNET_LOCK_EX);
3749         while (!list_empty(&the_lnet.ln_dc_request)) {
3750                 lp = list_first_entry(&the_lnet.ln_dc_request,
3751                                       struct lnet_peer, lp_dc_list);
3752                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3753                 lnet_peer_discovery_complete(lp);
3754         }
3755         lnet_net_unlock(LNET_LOCK_EX);
3756
3757         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3758         the_lnet.ln_dc_handler = NULL;
3759
3760         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3761         wake_up(&the_lnet.ln_dc_waitq);
3762
3763         CDEBUG(D_NET, "stopped\n");
3764
3765         return 0;
3766 }
3767
3768 /* ln_api_mutex is held on entry. */
3769 int lnet_peer_discovery_start(void)
3770 {
3771         struct task_struct *task;
3772         int rc = 0;
3773
3774         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3775                 return -EALREADY;
3776
3777         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3778         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3779         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3780         if (IS_ERR(task)) {
3781                 rc = PTR_ERR(task);
3782                 CERROR("Can't start peer discovery thread: %d\n", rc);
3783
3784                 the_lnet.ln_dc_handler = NULL;
3785
3786                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3787         }
3788
3789         CDEBUG(D_NET, "discovery start: %d\n", rc);
3790
3791         return rc;
3792 }
3793
3794 /* ln_api_mutex is held on entry. */
3795 void lnet_peer_discovery_stop(void)
3796 {
3797         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3798                 return;
3799
3800         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3801         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3802
3803         /* In the LNetNIInit() path we may be stopping discovery before it
3804          * entered its work loop
3805          */
3806         if (!completion_done(&the_lnet.ln_started))
3807                 complete(&the_lnet.ln_started);
3808         else
3809                 wake_up(&the_lnet.ln_dc_waitq);
3810
3811         wait_event(the_lnet.ln_dc_waitq,
3812                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3813
3814         LASSERT(list_empty(&the_lnet.ln_dc_request));
3815         LASSERT(list_empty(&the_lnet.ln_dc_working));
3816         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3817
3818         CDEBUG(D_NET, "discovery stopped\n");
3819 }
3820
3821 /* Debugging */
3822
3823 void
3824 lnet_debug_peer(lnet_nid_t nid)
3825 {
3826         char                    *aliveness = "NA";
3827         struct lnet_peer_ni     *lp;
3828         int                     cpt;
3829
3830         cpt = lnet_cpt_of_nid(nid, NULL);
3831         lnet_net_lock(cpt);
3832
3833         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3834         if (IS_ERR(lp)) {
3835                 lnet_net_unlock(cpt);
3836                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3837                 return;
3838         }
3839
3840         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3841                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3842
3843         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3844                libcfs_nid2str(lp->lpni_nid), kref_read(&lp->lpni_kref),
3845                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3846                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3847                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3848
3849         lnet_peer_ni_decref_locked(lp);
3850
3851         lnet_net_unlock(cpt);
3852 }
3853
3854 /* Gathering information for userspace. */
3855
3856 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3857                           char aliveness[LNET_MAX_STR_LEN],
3858                           __u32 *cpt_iter, __u32 *refcount,
3859                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3860                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3861                           __u32 *peer_tx_qnob)
3862 {
3863         struct lnet_peer_table          *peer_table;
3864         struct lnet_peer_ni             *lp;
3865         int                             j;
3866         int                             lncpt;
3867         bool                            found = false;
3868
3869         /* get the number of CPTs */
3870         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3871
3872         /* if the cpt number to be examined is >= the number of cpts in
3873          * the system then indicate that there are no more cpts to examin
3874          */
3875         if (*cpt_iter >= lncpt)
3876                 return -ENOENT;
3877
3878         /* get the current table */
3879         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3880         /* if the ptable is NULL then there are no more cpts to examine */
3881         if (peer_table == NULL)
3882                 return -ENOENT;
3883
3884         lnet_net_lock(*cpt_iter);
3885
3886         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3887                 struct list_head *peers = &peer_table->pt_hash[j];
3888
3889                 list_for_each_entry(lp, peers, lpni_hashlist) {
3890                         if (peer_index-- > 0)
3891                                 continue;
3892
3893                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3894                         if (lnet_isrouter(lp) ||
3895                                 lnet_peer_aliveness_enabled(lp))
3896                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3897                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3898
3899                         *nid = lp->lpni_nid;
3900                         *refcount = kref_read(&lp->lpni_kref);
3901                         *ni_peer_tx_credits =
3902                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3903                         *peer_tx_credits = lp->lpni_txcredits;
3904                         *peer_rtr_credits = lp->lpni_rtrcredits;
3905                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
3906                         *peer_tx_qnob = lp->lpni_txqnob;
3907
3908                         found = true;
3909                 }
3910
3911         }
3912         lnet_net_unlock(*cpt_iter);
3913
3914         *cpt_iter = lncpt;
3915
3916         return found ? 0 : -ENOENT;
3917 }
3918
3919 /* ln_api_mutex is held, which keeps the peer list stable */
3920 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3921 {
3922         struct lnet_ioctl_element_stats *lpni_stats;
3923         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3924         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3925         struct lnet_peer_ni_credit_info *lpni_info;
3926         struct lnet_peer_ni *lpni;
3927         struct lnet_peer *lp;
3928         lnet_nid_t nid;
3929         __u32 size;
3930         int rc;
3931
3932         lp = lnet_find_peer(cfg->prcfg_prim_nid);
3933
3934         if (!lp) {
3935                 rc = -ENOENT;
3936                 goto out;
3937         }
3938
3939         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3940                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3941         size *= lp->lp_nnis;
3942         if (size > cfg->prcfg_size) {
3943                 cfg->prcfg_size = size;
3944                 rc = -E2BIG;
3945                 goto out_lp_decref;
3946         }
3947
3948         cfg->prcfg_prim_nid = lp->lp_primary_nid;
3949         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3950         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3951         cfg->prcfg_count = lp->lp_nnis;
3952         cfg->prcfg_size = size;
3953         cfg->prcfg_state = lp->lp_state;
3954
3955         /* Allocate helper buffers. */
3956         rc = -ENOMEM;
3957         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3958         if (!lpni_info)
3959                 goto out_lp_decref;
3960         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3961         if (!lpni_stats)
3962                 goto out_free_info;
3963         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3964         if (!lpni_msg_stats)
3965                 goto out_free_stats;
3966         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3967         if (!lpni_hstats)
3968                 goto out_free_msg_stats;
3969
3970
3971         lpni = NULL;
3972         rc = -EFAULT;
3973         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3974                 nid = lpni->lpni_nid;
3975                 if (copy_to_user(bulk, &nid, sizeof(nid)))
3976                         goto out_free_hstats;
3977                 bulk += sizeof(nid);
3978
3979                 memset(lpni_info, 0, sizeof(*lpni_info));
3980                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3981                 if (lnet_isrouter(lpni) ||
3982                         lnet_peer_aliveness_enabled(lpni))
3983                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3984                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3985
3986                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
3987                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3988                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3989                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3990                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3991                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3992                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3993                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3994                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3995                         goto out_free_hstats;
3996                 bulk += sizeof(*lpni_info);
3997
3998                 memset(lpni_stats, 0, sizeof(*lpni_stats));
3999                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4000                                                             LNET_STATS_TYPE_SEND);
4001                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4002                                                             LNET_STATS_TYPE_RECV);
4003                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4004                                                             LNET_STATS_TYPE_DROP);
4005                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4006                         goto out_free_hstats;
4007                 bulk += sizeof(*lpni_stats);
4008                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4009                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4010                         goto out_free_hstats;
4011                 bulk += sizeof(*lpni_msg_stats);
4012                 lpni_hstats->hlpni_network_timeout =
4013                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4014                 lpni_hstats->hlpni_remote_dropped =
4015                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4016                 lpni_hstats->hlpni_remote_timeout =
4017                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4018                 lpni_hstats->hlpni_remote_error =
4019                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4020                 lpni_hstats->hlpni_health_value =
4021                   atomic_read(&lpni->lpni_healthv);
4022                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4023                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4024                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4025                         goto out_free_hstats;
4026                 bulk += sizeof(*lpni_hstats);
4027         }
4028         rc = 0;
4029
4030 out_free_hstats:
4031         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4032 out_free_msg_stats:
4033         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4034 out_free_stats:
4035         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4036 out_free_info:
4037         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4038 out_lp_decref:
4039         lnet_peer_decref_locked(lp);
4040 out:
4041         return rc;
4042 }
4043
4044 /* must hold net_lock/0 */
4045 void
4046 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4047                                      struct list_head *recovery_queue,
4048                                      time64_t now)
4049 {
4050         /* the mt could've shutdown and cleaned up the queues */
4051         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4052                 return;
4053
4054         if (!list_empty(&lpni->lpni_recovery))
4055                 return;
4056
4057         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4058                 return;
4059
4060         if (!lpni->lpni_last_alive) {
4061                 CDEBUG(D_NET,
4062                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4063                        libcfs_nid2str(lpni->lpni_nid), lpni,
4064                        lpni->lpni_last_alive);
4065                 return;
4066         }
4067
4068         if (lnet_recovery_limit &&
4069             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4070                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4071                        libcfs_nid2str(lpni->lpni_nid),
4072                        lpni->lpni_last_alive);
4073                 /* Reset the ping count so that if this peer NI is added back to
4074                  * the recovery queue we will send the first ping right away.
4075                  */
4076                 lpni->lpni_ping_count = 0;
4077                 return;
4078         }
4079
4080         /* This peer NI is going on the recovery queue, so take a ref on it */
4081         lnet_peer_ni_addref_locked(lpni);
4082
4083         lnet_peer_ni_set_next_ping(lpni, now);
4084
4085         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4086                libcfs_nid2str(lpni->lpni_nid),
4087                lpni->lpni_ping_count,
4088                lpni->lpni_next_ping,
4089                lpni->lpni_last_alive,
4090                atomic_read(&lpni->lpni_healthv));
4091
4092         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4093 }
4094
4095 /* Call with the ln_api_mutex held */
4096 void
4097 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4098 {
4099         struct lnet_peer_table *ptable;
4100         struct lnet_peer *lp;
4101         struct lnet_peer_net *lpn;
4102         struct lnet_peer_ni *lpni;
4103         int lncpt;
4104         int cpt;
4105         time64_t now;
4106
4107         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4108                 return;
4109
4110         now = ktime_get_seconds();
4111
4112         if (!all) {
4113                 lnet_net_lock(LNET_LOCK_EX);
4114                 lpni = lnet_find_peer_ni_locked(nid);
4115                 if (!lpni) {
4116                         lnet_net_unlock(LNET_LOCK_EX);
4117                         return;
4118                 }
4119                 lnet_set_lpni_healthv_locked(lpni, value);
4120                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4121                                              &the_lnet.ln_mt_peerNIRecovq, now);
4122                 lnet_peer_ni_decref_locked(lpni);
4123                 lnet_net_unlock(LNET_LOCK_EX);
4124                 return;
4125         }
4126
4127         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4128
4129         /*
4130          * Walk all the peers and reset the health value for each one to the
4131          * specified value.
4132          */
4133         lnet_net_lock(LNET_LOCK_EX);
4134         for (cpt = 0; cpt < lncpt; cpt++) {
4135                 ptable = the_lnet.ln_peer_tables[cpt];
4136                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4137                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4138                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4139                                                     lpni_peer_nis) {
4140                                         lnet_set_lpni_healthv_locked(lpni,
4141                                                                      value);
4142                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4143                                              &the_lnet.ln_mt_peerNIRecovq, now);
4144                                 }
4145                         }
4146                 }
4147         }
4148         lnet_net_unlock(LNET_LOCK_EX);
4149 }
4150