Whamcloud - gitweb
LU-15509 lnet: Ping buffer ref leak in lnet_peer_data_present
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(struct lnet_nid *nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = *nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NID_NET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(struct lnet_nid *nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = *nid;
265         lp->lp_disc_src_nid = LNET_ANY_NID;
266         lp->lp_disc_dst_nid = LNET_ANY_NID;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid_is_lo0(nid))
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nidstr(&lp->lp_primary_nid),
382                 libcfs_nidstr(&lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nidstr(&lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
461
462         spin_lock(&peer->lp_lock);
463         peer->lp_state |= LNET_PEER_MARK_DELETED;
464         spin_unlock(&peer->lp_lock);
465
466         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467         while (lpni != NULL) {
468                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469                 rc = lnet_peer_ni_del_locked(lpni, false);
470                 if (rc != 0)
471                         rc2 = rc;
472                 lpni = lpni2;
473         }
474
475         return rc2;
476 }
477
478 /*
479  * Discovering this peer is taking too long. Cancel any Ping or Push
480  * that discovery is waiting on by unlinking the relevant MDs. The
481  * lnet_discovery_event_handler() will proceed from here and complete
482  * the cleanup.
483  */
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
485 {
486         struct lnet_handle_md ping_mdh;
487         struct lnet_handle_md push_mdh;
488
489         LNetInvalidateMDHandle(&ping_mdh);
490         LNetInvalidateMDHandle(&push_mdh);
491
492         spin_lock(&lp->lp_lock);
493         if (lp->lp_state & LNET_PEER_PING_SENT) {
494                 ping_mdh = lp->lp_ping_mdh;
495                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
496         }
497         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498                 push_mdh = lp->lp_push_mdh;
499                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
500         }
501         spin_unlock(&lp->lp_lock);
502
503         if (!LNetMDHandleIsInvalid(ping_mdh))
504                 LNetMDUnlink(ping_mdh);
505         if (!LNetMDHandleIsInvalid(push_mdh))
506                 LNetMDUnlink(push_mdh);
507 }
508
509 static int
510 lnet_peer_del(struct lnet_peer *peer)
511 {
512         int rc;
513
514         lnet_peer_cancel_discovery(peer);
515         lnet_net_lock(LNET_LOCK_EX);
516         rc = lnet_peer_del_locked(peer);
517         lnet_net_unlock(LNET_LOCK_EX);
518
519         return rc;
520 }
521
522 /*
523  * Delete a NID from a peer. Call with ln_api_mutex held.
524  *
525  * Error codes:
526  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
527  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
528  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
529  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
530  */
531 static int
532 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
533 {
534         struct lnet_peer_ni *lpni;
535         struct lnet_nid primary_nid = lp->lp_primary_nid;
536         struct lnet_nid nid;
537         int rc = 0;
538         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
539
540         lnet_nid4_to_nid(nid4, &nid);
541         if (!(flags & LNET_PEER_CONFIGURED)) {
542                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
543                         rc = -EPERM;
544                         goto out;
545                 }
546         }
547
548         lpni = lnet_peer_ni_find_locked(&nid);
549         if (!lpni) {
550                 rc = -ENOENT;
551                 goto out;
552         }
553         lnet_peer_ni_decref_locked(lpni);
554         if (lp != lpni->lpni_peer_net->lpn_peer) {
555                 rc = -ECHILD;
556                 goto out;
557         }
558
559         /*
560          * This function only allows deletion of the primary NID if it
561          * is the only NID.
562          */
563         if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
564                 rc = -EBUSY;
565                 goto out;
566         }
567
568         lnet_net_lock(LNET_LOCK_EX);
569
570         if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
571                 struct lnet_peer_ni *lpni2;
572                 /* assign the next peer_ni to be the primary */
573                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
574                 LASSERT(lpni2);
575                 lp->lp_primary_nid = lpni2->lpni_nid;
576         }
577         rc = lnet_peer_ni_del_locked(lpni, force);
578
579         lnet_net_unlock(LNET_LOCK_EX);
580
581 out:
582         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
583                libcfs_nidstr(&primary_nid), libcfs_nidstr(&nid),
584                flags, rc);
585
586         return rc;
587 }
588
589 static void
590 lnet_peer_table_cleanup_locked(struct lnet_net *net,
591                                struct lnet_peer_table *ptable)
592 {
593         int                      i;
594         struct lnet_peer_ni     *next;
595         struct lnet_peer_ni     *lpni;
596         struct lnet_peer        *peer;
597
598         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
599                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
600                                          lpni_hashlist) {
601                         if (net != NULL && net != lpni->lpni_net)
602                                 continue;
603
604                         peer = lpni->lpni_peer_net->lpn_peer;
605                         if (!nid_same(&peer->lp_primary_nid,
606                                        &lpni->lpni_nid)) {
607                                 lnet_peer_ni_del_locked(lpni, false);
608                                 continue;
609                         }
610                         /*
611                          * Removing the primary NID implies removing
612                          * the entire peer. Advance next beyond any
613                          * peer_ni that belongs to the same peer.
614                          */
615                         list_for_each_entry_from(next, &ptable->pt_hash[i],
616                                                  lpni_hashlist) {
617                                 if (next->lpni_peer_net->lpn_peer != peer)
618                                         break;
619                         }
620                         lnet_peer_del_locked(peer);
621                 }
622         }
623 }
624
625 static void
626 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
627 {
628         wait_var_event_warning(&ptable->pt_zombies,
629                                ptable->pt_zombies == 0,
630                                "Waiting for %d zombies on peer table\n",
631                                ptable->pt_zombies);
632 }
633
634 static void
635 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
636                                 struct lnet_peer_table *ptable)
637 {
638         struct lnet_peer_ni     *lp;
639         struct lnet_peer_ni     *tmp;
640         struct lnet_nid         gw_nid;
641         int                     i;
642
643         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
644                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
645                                          lpni_hashlist) {
646                         if (net != lp->lpni_net)
647                                 continue;
648
649                         if (!lnet_isrouter(lp))
650                                 continue;
651
652                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
653
654                         lnet_net_unlock(LNET_LOCK_EX);
655                         lnet_del_route(LNET_NET_ANY, &gw_nid);
656                         lnet_net_lock(LNET_LOCK_EX);
657                 }
658         }
659 }
660
661 void
662 lnet_peer_tables_cleanup(struct lnet_net *net)
663 {
664         int i;
665         struct lnet_peer_table *ptable;
666
667         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
668         /* If just deleting the peers for a NI, get rid of any routes these
669          * peers are gateways for. */
670         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
671                 lnet_net_lock(LNET_LOCK_EX);
672                 lnet_peer_table_del_rtrs_locked(net, ptable);
673                 lnet_net_unlock(LNET_LOCK_EX);
674         }
675
676         /* Start the cleanup process */
677         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
678                 lnet_net_lock(LNET_LOCK_EX);
679                 lnet_peer_table_cleanup_locked(net, ptable);
680                 lnet_net_unlock(LNET_LOCK_EX);
681         }
682
683         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
684                 lnet_peer_ni_finalize_wait(ptable);
685 }
686
687 static struct lnet_peer_ni *
688 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
689 {
690         struct list_head        *peers;
691         struct lnet_peer_ni     *lp;
692
693         if (the_lnet.ln_state != LNET_STATE_RUNNING)
694                 return NULL;
695
696         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
697         list_for_each_entry(lp, peers, lpni_hashlist) {
698                 if (nid_same(&lp->lpni_nid, nid)) {
699                         lnet_peer_ni_addref_locked(lp);
700                         return lp;
701                 }
702         }
703
704         return NULL;
705 }
706
707 struct lnet_peer_ni *
708 lnet_find_peer_ni_locked(lnet_nid_t nid4)
709 {
710         struct lnet_peer_ni *lpni;
711         struct lnet_peer_table *ptable;
712         int cpt;
713         struct lnet_nid nid;
714
715         lnet_nid4_to_nid(nid4, &nid);
716
717         cpt = lnet_nid_cpt_hash(&nid, LNET_CPT_NUMBER);
718
719         ptable = the_lnet.ln_peer_tables[cpt];
720         lpni = lnet_get_peer_ni_locked(ptable, &nid);
721
722         return lpni;
723 }
724
725 struct lnet_peer_ni *
726 lnet_peer_ni_find_locked(struct lnet_nid *nid)
727 {
728         struct lnet_peer_ni *lpni;
729         struct lnet_peer_table *ptable;
730         int cpt;
731
732         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
733
734         ptable = the_lnet.ln_peer_tables[cpt];
735         lpni = lnet_get_peer_ni_locked(ptable, nid);
736
737         return lpni;
738 }
739
740 struct lnet_peer_ni *
741 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
742 {
743         struct lnet_peer_net *lpn;
744         struct lnet_peer_ni *lpni;
745
746         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
747         if (!lpn)
748                 return NULL;
749
750         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
751                 if (lnet_nid_to_nid4(&lpni->lpni_nid) == nid)
752                         return lpni;
753         }
754
755         return NULL;
756 }
757
758 struct lnet_peer_ni *
759 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
760 {
761         struct lnet_peer_net *lpn;
762         struct lnet_peer_ni *lpni;
763
764         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
765         if (!lpn)
766                 return NULL;
767
768         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
769                 if (nid_same(&lpni->lpni_nid, nid))
770                         return lpni;
771         }
772
773         return NULL;
774 }
775
776 struct lnet_peer *
777 lnet_find_peer4(lnet_nid_t nid)
778 {
779         struct lnet_peer_ni *lpni;
780         struct lnet_peer *lp = NULL;
781         int cpt;
782
783         cpt = lnet_net_lock_current();
784         lpni = lnet_find_peer_ni_locked(nid);
785         if (lpni) {
786                 lp = lpni->lpni_peer_net->lpn_peer;
787                 lnet_peer_addref_locked(lp);
788                 lnet_peer_ni_decref_locked(lpni);
789         }
790         lnet_net_unlock(cpt);
791
792         return lp;
793 }
794
795 struct lnet_peer *
796 lnet_find_peer(struct lnet_nid *nid)
797 {
798         struct lnet_peer_ni *lpni;
799         struct lnet_peer *lp = NULL;
800         int cpt;
801
802         cpt = lnet_net_lock_current();
803         lpni = lnet_peer_ni_find_locked(nid);
804         if (lpni) {
805                 lp = lpni->lpni_peer_net->lpn_peer;
806                 lnet_peer_addref_locked(lp);
807                 lnet_peer_ni_decref_locked(lpni);
808         }
809         lnet_net_unlock(cpt);
810
811         return lp;
812 }
813
814 struct lnet_peer_net *
815 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
816 {
817         struct lnet_peer_net *net;
818
819         if (!prev_lpn_id) {
820                 /* no net id provided return the first net */
821                 net = list_first_entry_or_null(&lp->lp_peer_nets,
822                                                struct lnet_peer_net,
823                                                lpn_peer_nets);
824
825                 return net;
826         }
827
828         /* find the net after the one provided */
829         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
830                 if (net->lpn_net_id == prev_lpn_id) {
831                         /*
832                          * if we reached the end of the list loop to the
833                          * beginning.
834                          */
835                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
836                                 return list_first_entry_or_null(&lp->lp_peer_nets,
837                                                                 struct lnet_peer_net,
838                                                                 lpn_peer_nets);
839                         else
840                                 return list_next_entry(net, lpn_peer_nets);
841                 }
842         }
843
844         return NULL;
845 }
846
847 struct lnet_peer_ni *
848 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
849                              struct lnet_peer_net *peer_net,
850                              struct lnet_peer_ni *prev)
851 {
852         struct lnet_peer_ni *lpni;
853         struct lnet_peer_net *net = peer_net;
854
855         if (!prev) {
856                 if (!net) {
857                         if (list_empty(&peer->lp_peer_nets))
858                                 return NULL;
859
860                         net = list_entry(peer->lp_peer_nets.next,
861                                          struct lnet_peer_net,
862                                          lpn_peer_nets);
863                 }
864                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
865                                   lpni_peer_nis);
866
867                 return lpni;
868         }
869
870         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
871                 /*
872                  * if you reached the end of the peer ni list and the peer
873                  * net is specified then there are no more peer nis in that
874                  * net.
875                  */
876                 if (net)
877                         return NULL;
878
879                 /*
880                  * we reached the end of this net ni list. move to the
881                  * next net
882                  */
883                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
884                     &peer->lp_peer_nets)
885                         /* no more nets and no more NIs. */
886                         return NULL;
887
888                 /* get the next net */
889                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
890                                  struct lnet_peer_net,
891                                  lpn_peer_nets);
892                 /* get the ni on it */
893                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
894                                   lpni_peer_nis);
895
896                 return lpni;
897         }
898
899         /* there are more nis left */
900         lpni = list_entry(prev->lpni_peer_nis.next,
901                           struct lnet_peer_ni, lpni_peer_nis);
902
903         return lpni;
904 }
905
906 /* Call with the ln_api_mutex held */
907 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
908 {
909         struct lnet_process_id id;
910         struct lnet_peer_table *ptable;
911         struct lnet_peer *lp;
912         __u32 count = 0;
913         __u32 size = 0;
914         int lncpt;
915         int cpt;
916         __u32 i;
917         int rc;
918
919         rc = -ESHUTDOWN;
920         if (the_lnet.ln_state != LNET_STATE_RUNNING)
921                 goto done;
922
923         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
924
925         /*
926          * Count the number of peers, and return E2BIG if the buffer
927          * is too small. We'll also return the desired size.
928          */
929         rc = -E2BIG;
930         for (cpt = 0; cpt < lncpt; cpt++) {
931                 ptable = the_lnet.ln_peer_tables[cpt];
932                 count += ptable->pt_peers;
933         }
934         size = count * sizeof(*ids);
935         if (size > *sizep)
936                 goto done;
937
938         /*
939          * Walk the peer lists and copy out the primary nids.
940          * This is safe because the peer lists are only modified
941          * while the ln_api_mutex is held. So we don't need to
942          * hold the lnet_net_lock as well, and can therefore
943          * directly call copy_to_user().
944          */
945         rc = -EFAULT;
946         memset(&id, 0, sizeof(id));
947         id.pid = LNET_PID_LUSTRE;
948         i = 0;
949         for (cpt = 0; cpt < lncpt; cpt++) {
950                 ptable = the_lnet.ln_peer_tables[cpt];
951                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
952                         if (!nid_is_nid4(&lp->lp_primary_nid))
953                                 continue;
954                         if (i >= count)
955                                 goto done;
956                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
957                         if (copy_to_user(&ids[i], &id, sizeof(id)))
958                                 goto done;
959                         i++;
960                 }
961         }
962         rc = 0;
963 done:
964         *countp = count;
965         *sizep = size;
966         return rc;
967 }
968
969 /*
970  * Start pushes to peers that need to be updated for a configuration
971  * change on this node.
972  */
973 void
974 lnet_push_update_to_peers(int force)
975 {
976         struct lnet_peer_table *ptable;
977         struct lnet_peer *lp;
978         int lncpt;
979         int cpt;
980
981         lnet_net_lock(LNET_LOCK_EX);
982         if (lnet_peer_discovery_disabled)
983                 force = 0;
984         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
985         for (cpt = 0; cpt < lncpt; cpt++) {
986                 ptable = the_lnet.ln_peer_tables[cpt];
987                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
988                         if (force) {
989                                 spin_lock(&lp->lp_lock);
990                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
991                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
992                                 spin_unlock(&lp->lp_lock);
993                         }
994                         if (lnet_peer_needs_push(lp))
995                                 lnet_peer_queue_for_discovery(lp);
996                 }
997         }
998         lnet_net_unlock(LNET_LOCK_EX);
999         wake_up(&the_lnet.ln_dc_waitq);
1000 }
1001
1002 /* find the NID in the preferred gateways for the remote peer
1003  * return:
1004  *      false: list is not empty and NID is not preferred
1005  *      false: list is empty
1006  *      true: nid is found in the list
1007  */
1008 bool
1009 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
1010                              struct lnet_nid *gw_nid)
1011 {
1012         struct lnet_nid_list *ne;
1013
1014         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
1015                libcfs_nidstr(&lpni->lpni_nid),
1016                list_empty(&lpni->lpni_rtr_pref_nids));
1017
1018         if (list_empty(&lpni->lpni_rtr_pref_nids))
1019                 return false;
1020
1021         /* iterate through all the preferred NIDs and see if any of them
1022          * matches the provided gw_nid
1023          */
1024         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1025                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1026                        libcfs_nidstr(&ne->nl_nid),
1027                        libcfs_nidstr(gw_nid));
1028                 if (nid_same(&ne->nl_nid, gw_nid))
1029                         return true;
1030         }
1031
1032         return false;
1033 }
1034
1035 void
1036 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
1037 {
1038         struct list_head zombies;
1039         struct lnet_nid_list *ne;
1040         struct lnet_nid_list *tmp;
1041         int cpt = lpni->lpni_cpt;
1042
1043         INIT_LIST_HEAD(&zombies);
1044
1045         lnet_net_lock(cpt);
1046         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1047         lnet_net_unlock(cpt);
1048
1049         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1050                 list_del(&ne->nl_list);
1051                 LIBCFS_FREE(ne, sizeof(*ne));
1052         }
1053 }
1054
1055 int
1056 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1057                        struct lnet_nid *gw_nid)
1058 {
1059         int cpt = lpni->lpni_cpt;
1060         struct lnet_nid_list *ne = NULL;
1061
1062         /* This function is called with api_mutex held. When the api_mutex
1063          * is held the list can not be modified, as it is only modified as
1064          * a result of applying a UDSP and that happens under api_mutex
1065          * lock.
1066          */
1067         __must_hold(&the_lnet.ln_api_mutex);
1068
1069         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1070                 if (nid_same(&ne->nl_nid, gw_nid))
1071                         return -EEXIST;
1072         }
1073
1074         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1075         if (!ne)
1076                 return -ENOMEM;
1077
1078         ne->nl_nid = *gw_nid;
1079
1080         /* Lock the cpt to protect against addition and checks in the
1081          * selection algorithm
1082          */
1083         lnet_net_lock(cpt);
1084         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1085         lnet_net_unlock(cpt);
1086
1087         return 0;
1088 }
1089
1090 /*
1091  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1092  * this is a preferred point-to-point path. Call with lnet_net_lock in
1093  * shared mmode.
1094  */
1095 bool
1096 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1097 {
1098         struct lnet_nid_list *ne;
1099
1100         if (lpni->lpni_pref_nnids == 0)
1101                 return false;
1102         if (lpni->lpni_pref_nnids == 1)
1103                 return nid_same(&lpni->lpni_pref.nid, nid);
1104         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1105                 if (nid_same(&ne->nl_nid, nid))
1106                         return true;
1107         }
1108         return false;
1109 }
1110
1111 /*
1112  * Set a single ni as preferred, provided no preferred ni is already
1113  * defined. Only to be used for non-multi-rail peer_ni.
1114  */
1115 int
1116 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1117                                   struct lnet_nid *nid)
1118 {
1119         int rc = 0;
1120
1121         if (!nid)
1122                 return -EINVAL;
1123         spin_lock(&lpni->lpni_lock);
1124         if (LNET_NID_IS_ANY(nid)) {
1125                 rc = -EINVAL;
1126         } else if (lpni->lpni_pref_nnids > 0) {
1127                 rc = -EPERM;
1128         } else if (lpni->lpni_pref_nnids == 0) {
1129                 lpni->lpni_pref.nid = *nid;
1130                 lpni->lpni_pref_nnids = 1;
1131                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1132         }
1133         spin_unlock(&lpni->lpni_lock);
1134
1135         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1136                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1137         return rc;
1138 }
1139
1140 /*
1141  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1142  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1143  */
1144 int
1145 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1146 {
1147         int rc = 0;
1148
1149         spin_lock(&lpni->lpni_lock);
1150         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1151                 lpni->lpni_pref_nnids = 0;
1152                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1153         } else if (lpni->lpni_pref_nnids == 0) {
1154                 rc = -ENOENT;
1155         } else {
1156                 rc = -EPERM;
1157         }
1158         spin_unlock(&lpni->lpni_lock);
1159
1160         CDEBUG(D_NET, "peer %s: %d\n",
1161                libcfs_nidstr(&lpni->lpni_nid), rc);
1162         return rc;
1163 }
1164
1165 void
1166 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1167 {
1168         lpni->lpni_sel_priority = priority;
1169 }
1170
1171 /*
1172  * Clear the preferred NIDs from a non-multi-rail peer.
1173  */
1174 void
1175 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1176 {
1177         struct lnet_peer_ni *lpni = NULL;
1178
1179         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1180                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1181 }
1182
1183 int
1184 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1185 {
1186         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1187         struct lnet_nid_list *ne1 = NULL;
1188         struct lnet_nid_list *ne2 = NULL;
1189         struct lnet_nid *tmp_nid = NULL;
1190         int rc = 0;
1191
1192         if (LNET_NID_IS_ANY(nid)) {
1193                 rc = -EINVAL;
1194                 goto out;
1195         }
1196
1197         if (lpni->lpni_pref_nnids == 1 &&
1198             nid_same(&lpni->lpni_pref.nid, nid)) {
1199                 rc = -EEXIST;
1200                 goto out;
1201         }
1202
1203         /* A non-MR node may have only one preferred NI per peer_ni */
1204         if (lpni->lpni_pref_nnids > 0 &&
1205             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1206                 rc = -EPERM;
1207                 goto out;
1208         }
1209
1210         /* add the new preferred nid to the list of preferred nids */
1211         if (lpni->lpni_pref_nnids != 0) {
1212                 size_t alloc_size = sizeof(*ne1);
1213
1214                 if (lpni->lpni_pref_nnids == 1) {
1215                         tmp_nid = &lpni->lpni_pref.nid;
1216                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1217                 }
1218
1219                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1220                         if (nid_same(&ne1->nl_nid, nid)) {
1221                                 rc = -EEXIST;
1222                                 goto out;
1223                         }
1224                 }
1225
1226                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1227                                  alloc_size);
1228                 if (!ne1) {
1229                         rc = -ENOMEM;
1230                         goto out;
1231                 }
1232
1233                 /* move the originally stored nid to the list */
1234                 if (lpni->lpni_pref_nnids == 1) {
1235                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1236                                 lpni->lpni_cpt, alloc_size);
1237                         if (!ne2) {
1238                                 rc = -ENOMEM;
1239                                 goto out;
1240                         }
1241                         INIT_LIST_HEAD(&ne2->nl_list);
1242                         ne2->nl_nid = *tmp_nid;
1243                 }
1244                 ne1->nl_nid = *nid;
1245         }
1246
1247         lnet_net_lock(LNET_LOCK_EX);
1248         spin_lock(&lpni->lpni_lock);
1249         if (lpni->lpni_pref_nnids == 0) {
1250                 lpni->lpni_pref.nid = *nid;
1251         } else {
1252                 if (ne2)
1253                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1254                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1255         }
1256         lpni->lpni_pref_nnids++;
1257         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1258         spin_unlock(&lpni->lpni_lock);
1259         lnet_net_unlock(LNET_LOCK_EX);
1260
1261 out:
1262         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1263                 spin_lock(&lpni->lpni_lock);
1264                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1265                 spin_unlock(&lpni->lpni_lock);
1266         }
1267         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1268                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1269         return rc;
1270 }
1271
1272 int
1273 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1274 {
1275         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1276         struct lnet_nid_list *ne = NULL;
1277         int rc = 0;
1278
1279         if (lpni->lpni_pref_nnids == 0) {
1280                 rc = -ENOENT;
1281                 goto out;
1282         }
1283
1284         if (lpni->lpni_pref_nnids == 1) {
1285                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1286                         rc = -ENOENT;
1287                         goto out;
1288                 }
1289         } else {
1290                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1291                         if (nid_same(&ne->nl_nid, nid))
1292                                 goto remove_nid_entry;
1293                 }
1294                 rc = -ENOENT;
1295                 ne = NULL;
1296                 goto out;
1297         }
1298
1299 remove_nid_entry:
1300         lnet_net_lock(LNET_LOCK_EX);
1301         spin_lock(&lpni->lpni_lock);
1302         if (lpni->lpni_pref_nnids == 1)
1303                 lpni->lpni_pref.nid = LNET_ANY_NID;
1304         else {
1305                 list_del_init(&ne->nl_list);
1306                 if (lpni->lpni_pref_nnids == 2) {
1307                         struct lnet_nid_list *ne, *tmp;
1308
1309                         list_for_each_entry_safe(ne, tmp,
1310                                                  &lpni->lpni_pref.nids,
1311                                                  nl_list) {
1312                                 lpni->lpni_pref.nid = ne->nl_nid;
1313                                 list_del_init(&ne->nl_list);
1314                                 LIBCFS_FREE(ne, sizeof(*ne));
1315                         }
1316                 }
1317         }
1318         lpni->lpni_pref_nnids--;
1319         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1320         spin_unlock(&lpni->lpni_lock);
1321         lnet_net_unlock(LNET_LOCK_EX);
1322
1323         if (ne)
1324                 LIBCFS_FREE(ne, sizeof(*ne));
1325 out:
1326         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1327                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1328         return rc;
1329 }
1330
1331 void
1332 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1333 {
1334         struct list_head zombies;
1335         struct lnet_nid_list *ne;
1336         struct lnet_nid_list *tmp;
1337
1338         INIT_LIST_HEAD(&zombies);
1339
1340         lnet_net_lock(LNET_LOCK_EX);
1341         if (lpni->lpni_pref_nnids == 1)
1342                 lpni->lpni_pref.nid = LNET_ANY_NID;
1343         else if (lpni->lpni_pref_nnids > 1)
1344                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1345         lpni->lpni_pref_nnids = 0;
1346         lnet_net_unlock(LNET_LOCK_EX);
1347
1348         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1349                 list_del_init(&ne->nl_list);
1350                 LIBCFS_FREE(ne, sizeof(*ne));
1351         }
1352 }
1353
1354 void
1355 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1356 {
1357         struct lnet_peer_ni *lpni;
1358
1359         *result = *nid;
1360         lpni = lnet_peer_ni_find_locked(nid);
1361         if (lpni) {
1362                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1363                 lnet_peer_ni_decref_locked(lpni);
1364         }
1365 }
1366
1367 bool
1368 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1369 __must_hold(&lp->lp_lock)
1370 {
1371         if (lnet_peer_discovery_disabled)
1372                 return true;
1373
1374         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1375             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1376                 return true;
1377         }
1378
1379         return false;
1380 }
1381
1382 /*
1383  * Peer Discovery
1384  */
1385 bool
1386 lnet_is_discovery_disabled(struct lnet_peer *lp)
1387 {
1388         bool rc = false;
1389
1390         spin_lock(&lp->lp_lock);
1391         rc = lnet_is_discovery_disabled_locked(lp);
1392         spin_unlock(&lp->lp_lock);
1393
1394         return rc;
1395 }
1396
1397 int
1398 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1399 {
1400         lnet_nid_t pnid = 0;
1401         bool mr;
1402         int i, rc;
1403
1404         if (!nids || num_nids < 1)
1405                 return -EINVAL;
1406
1407         rc = LNetNIInit(LNET_PID_ANY);
1408         if (rc < 0)
1409                 return rc;
1410
1411         mutex_lock(&the_lnet.ln_api_mutex);
1412
1413         mr = lnet_peer_discovery_disabled == 0;
1414
1415         rc = 0;
1416         for (i = 0; i < num_nids; i++) {
1417                 if (nids[i] == LNET_NID_LO_0)
1418                         continue;
1419
1420                 if (!pnid) {
1421                         pnid = nids[i];
1422                         rc = lnet_add_peer_ni(pnid, LNET_NID_ANY, mr, true);
1423                 } else if (lnet_peer_discovery_disabled) {
1424                         rc = lnet_add_peer_ni(nids[i], LNET_NID_ANY, mr, true);
1425                 } else {
1426                         rc = lnet_add_peer_ni(pnid, nids[i], mr, true);
1427                 }
1428
1429                 if (rc && rc != -EEXIST)
1430                         goto unlock;
1431         }
1432
1433 unlock:
1434         mutex_unlock(&the_lnet.ln_api_mutex);
1435
1436         LNetNIFini();
1437
1438         return rc == -EEXIST ? 0 : rc;
1439 }
1440 EXPORT_SYMBOL(LNetAddPeer);
1441
1442 void LNetPrimaryNID(struct lnet_nid *nid)
1443 {
1444         struct lnet_peer *lp;
1445         struct lnet_peer_ni *lpni;
1446         struct lnet_nid orig;
1447         int rc = 0;
1448         int cpt;
1449
1450         if (!nid || nid_is_lo0(nid))
1451                 return;
1452         orig = *nid;
1453
1454         cpt = lnet_net_lock_current();
1455         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1456         if (IS_ERR(lpni)) {
1457                 rc = PTR_ERR(lpni);
1458                 goto out_unlock;
1459         }
1460         lp = lpni->lpni_peer_net->lpn_peer;
1461
1462         /* If discovery is disabled locally then we needn't bother running
1463          * discovery here because discovery will not modify whatever
1464          * primary NID is currently set for this peer. If the specified peer is
1465          * down then this discovery can introduce long delays into the mount
1466          * process, so skip it if it isn't necessary.
1467          */
1468         while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1469                 spin_lock(&lp->lp_lock);
1470                 /* force a full discovery cycle */
1471                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1472                 spin_unlock(&lp->lp_lock);
1473
1474                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1475                 if (rc)
1476                         goto out_decref;
1477                 /* The lpni (or lp) for this NID may have changed and our ref is
1478                  * the only thing keeping the old one around. Release the ref
1479                  * and lookup the lpni again
1480                  */
1481                 lnet_peer_ni_decref_locked(lpni);
1482                 lpni = lnet_peer_ni_find_locked(nid);
1483                 if (!lpni) {
1484                         rc = -ENOENT;
1485                         goto out_unlock;
1486                 }
1487                 lp = lpni->lpni_peer_net->lpn_peer;
1488
1489                 /* If we find that the peer has discovery disabled then we will
1490                  * not modify whatever primary NID is currently set for this
1491                  * peer. Thus, we can break out of this loop even if the peer
1492                  * is not fully up to date.
1493                  */
1494                 if (lnet_is_discovery_disabled(lp))
1495                         break;
1496         }
1497         *nid = lp->lp_primary_nid;
1498 out_decref:
1499         lnet_peer_ni_decref_locked(lpni);
1500 out_unlock:
1501         lnet_net_unlock(cpt);
1502
1503         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1504                libcfs_nidstr(nid), rc);
1505 }
1506 EXPORT_SYMBOL(LNetPrimaryNID);
1507
1508 struct lnet_peer_net *
1509 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1510 {
1511         struct lnet_peer_net *peer_net;
1512         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1513                 if (peer_net->lpn_net_id == net_id)
1514                         return peer_net;
1515         }
1516         return NULL;
1517 }
1518
1519 /*
1520  * Attach a peer_ni to a peer_net and peer. This function assumes
1521  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1522  * may be attached to a different peer, in which case it will be
1523  * properly detached first. The whole operation is done atomically.
1524  *
1525  * This function consumes the reference on lpni and Always returns 0.
1526  * This is the last function called from functions that do return an
1527  * int, so returning 0 here allows the compiler to do a tail call.
1528  */
1529 static int
1530 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1531                                 struct lnet_peer_net *lpn,
1532                                 struct lnet_peer_ni *lpni,
1533                                 unsigned flags)
1534 {
1535         struct lnet_peer_table *ptable;
1536         bool new_lpn = false;
1537         int rc;
1538
1539         /* Install the new peer_ni */
1540         lnet_net_lock(LNET_LOCK_EX);
1541         /* Add peer_ni to global peer table hash, if necessary. */
1542         if (list_empty(&lpni->lpni_hashlist)) {
1543                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1544
1545                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1546                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1547                 ptable->pt_version++;
1548                 lnet_peer_ni_addref_locked(lpni);
1549         }
1550
1551         /* Detach the peer_ni from an existing peer, if necessary. */
1552         if (lpni->lpni_peer_net) {
1553                 LASSERT(lpni->lpni_peer_net != lpn);
1554                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1555                 lnet_peer_detach_peer_ni_locked(lpni);
1556                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1557                 lpni->lpni_peer_net = NULL;
1558         }
1559
1560         /* Add peer_ni to peer_net */
1561         lpni->lpni_peer_net = lpn;
1562         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1563                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1564         else
1565                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1566         lnet_update_peer_net_healthv(lpni);
1567         lnet_peer_net_addref_locked(lpn);
1568
1569         /* Add peer_net to peer */
1570         if (!lpn->lpn_peer) {
1571                 new_lpn = true;
1572                 lpn->lpn_peer = lp;
1573                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1574                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1575                 else
1576                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1577                 lnet_peer_addref_locked(lp);
1578         }
1579
1580         /* Add peer to global peer list, if necessary */
1581         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1582         if (list_empty(&lp->lp_peer_list)) {
1583                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1584                 ptable->pt_peers++;
1585         }
1586
1587
1588         /* Update peer state */
1589         spin_lock(&lp->lp_lock);
1590         if (flags & LNET_PEER_CONFIGURED) {
1591                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1592                         lp->lp_state |= LNET_PEER_CONFIGURED;
1593         }
1594         if (flags & LNET_PEER_MULTI_RAIL) {
1595                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1596                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1597                         lnet_peer_clr_non_mr_pref_nids(lp);
1598                 }
1599         }
1600         spin_unlock(&lp->lp_lock);
1601
1602         lp->lp_nnis++;
1603
1604         /* apply UDSPs */
1605         if (new_lpn) {
1606                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1607                 if (rc)
1608                         CERROR("Failed to apply UDSPs on lpn %s\n",
1609                                libcfs_net2str(lpn->lpn_net_id));
1610         }
1611         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1612         if (rc)
1613                 CERROR("Failed to apply UDSPs on lpni %s\n",
1614                        libcfs_nidstr(&lpni->lpni_nid));
1615
1616         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1617                libcfs_nidstr(&lp->lp_primary_nid),
1618                libcfs_nidstr(&lpni->lpni_nid), flags);
1619         lnet_peer_ni_decref_locked(lpni);
1620         lnet_net_unlock(LNET_LOCK_EX);
1621
1622         return 0;
1623 }
1624
1625 /*
1626  * Create a new peer, with nid as its primary nid.
1627  *
1628  * Call with the lnet_api_mutex held.
1629  */
1630 static int
1631 lnet_peer_add(lnet_nid_t nid4, unsigned int flags)
1632 {
1633         struct lnet_nid nid;
1634         struct lnet_peer *lp;
1635         struct lnet_peer_net *lpn;
1636         struct lnet_peer_ni *lpni;
1637         int rc = 0;
1638
1639         LASSERT(nid4 != LNET_NID_ANY);
1640
1641         /*
1642          * No need for the lnet_net_lock here, because the
1643          * lnet_api_mutex is held.
1644          */
1645         lpni = lnet_find_peer_ni_locked(nid4);
1646         if (lpni) {
1647                 /* A peer with this NID already exists. */
1648                 lp = lpni->lpni_peer_net->lpn_peer;
1649                 lnet_peer_ni_decref_locked(lpni);
1650                 /*
1651                  * This is an error if the peer was configured and the
1652                  * primary NID differs or an attempt is made to change
1653                  * the Multi-Rail flag. Otherwise the assumption is
1654                  * that an existing peer is being modified.
1655                  */
1656                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1657                         if (lnet_nid_to_nid4(&lp->lp_primary_nid) != nid4)
1658                                 rc = -EEXIST;
1659                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1660                                 rc = -EPERM;
1661                         goto out;
1662                 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1663                         if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid4) {
1664                                 rc = -EEXIST;
1665                                 goto out;
1666                         }
1667                 }
1668                 /* Delete and recreate as a configured peer. */
1669                 rc = lnet_peer_del(lp);
1670                 if (rc)
1671                         goto out;
1672         }
1673
1674         /* Create peer, peer_net, and peer_ni. */
1675         rc = -ENOMEM;
1676         lnet_nid4_to_nid(nid4, &nid);
1677         lp = lnet_peer_alloc(&nid);
1678         if (!lp)
1679                 goto out;
1680         lpn = lnet_peer_net_alloc(LNET_NID_NET(&nid));
1681         if (!lpn)
1682                 goto out_free_lp;
1683         lpni = lnet_peer_ni_alloc(&nid);
1684         if (!lpni)
1685                 goto out_free_lpn;
1686
1687         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1688
1689 out_free_lpn:
1690         LIBCFS_FREE(lpn, sizeof(*lpn));
1691 out_free_lp:
1692         LIBCFS_FREE(lp, sizeof(*lp));
1693 out:
1694         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1695                libcfs_nid2str(nid4), flags, rc);
1696         return rc;
1697 }
1698
1699 /*
1700  * Add a NID to a peer. Call with ln_api_mutex held.
1701  *
1702  * Error codes:
1703  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1704  *  -EEXIST:   The NID was configured by DLC for a different peer.
1705  *  -ENOMEM:   Out of memory.
1706  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1707  *             non-multi-rail peer.
1708  */
1709 static int
1710 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
1711 {
1712         struct lnet_peer_net *lpn;
1713         struct lnet_peer_ni *lpni;
1714         struct lnet_nid nid;
1715         int rc = 0;
1716
1717         LASSERT(lp);
1718         LASSERT(nid4 != LNET_NID_ANY);
1719
1720         lnet_nid4_to_nid(nid4, &nid);
1721
1722         /* A configured peer can only be updated through configuration. */
1723         if (!(flags & LNET_PEER_CONFIGURED)) {
1724                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1725                         rc = -EPERM;
1726                         goto out;
1727                 }
1728         }
1729
1730         /*
1731          * The MULTI_RAIL flag can be set but not cleared, because
1732          * that would leave the peer struct in an invalid state.
1733          */
1734         if (flags & LNET_PEER_MULTI_RAIL) {
1735                 spin_lock(&lp->lp_lock);
1736                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1737                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1738                         lnet_peer_clr_non_mr_pref_nids(lp);
1739                 }
1740                 spin_unlock(&lp->lp_lock);
1741         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1742                 rc = -EPERM;
1743                 goto out;
1744         }
1745
1746         lpni = lnet_find_peer_ni_locked(nid4);
1747         if (lpni) {
1748                 /*
1749                  * A peer_ni already exists. This is only a problem if
1750                  * it is not connected to this peer and was configured
1751                  * by DLC.
1752                  */
1753                 if (lpni->lpni_peer_net->lpn_peer == lp)
1754                         goto out_free_lpni;
1755                 if (lnet_peer_ni_is_configured(lpni)) {
1756                         rc = -EEXIST;
1757                         goto out_free_lpni;
1758                 }
1759                 /* If this is the primary NID, destroy the peer. */
1760                 if (lnet_peer_ni_is_primary(lpni)) {
1761                         struct lnet_peer *rtr_lp =
1762                                 lpni->lpni_peer_net->lpn_peer;
1763                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1764                         /*
1765                          * if we're trying to delete a router it means
1766                          * we're moving this peer NI to a new peer so must
1767                          * transfer router properties to the new peer
1768                          */
1769                         if (rtr_refcount > 0) {
1770                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1771                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1772                         }
1773                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1774                         lnet_peer_ni_decref_locked(lpni);
1775                         lpni = lnet_peer_ni_alloc(&nid);
1776                         if (!lpni) {
1777                                 rc = -ENOMEM;
1778                                 goto out_free_lpni;
1779                         }
1780                 }
1781         } else {
1782                 lpni = lnet_peer_ni_alloc(&nid);
1783                 if (!lpni) {
1784                         rc = -ENOMEM;
1785                         goto out_free_lpni;
1786                 }
1787         }
1788
1789         /*
1790          * Get the peer_net. Check that we're not adding a second
1791          * peer_ni on a peer_net of a non-multi-rail peer.
1792          */
1793         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid4));
1794         if (!lpn) {
1795                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid4));
1796                 if (!lpn) {
1797                         rc = -ENOMEM;
1798                         goto out_free_lpni;
1799                 }
1800         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1801                 rc = -ENOTUNIQ;
1802                 goto out_free_lpni;
1803         }
1804
1805         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1806
1807 out_free_lpni:
1808         lnet_peer_ni_decref_locked(lpni);
1809 out:
1810         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1811                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nid2str(nid4),
1812                flags, rc);
1813         return rc;
1814 }
1815
1816 /*
1817  * Update the primary NID of a peer, if possible.
1818  *
1819  * Call with the lnet_api_mutex held.
1820  */
1821 static int
1822 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid,
1823                           unsigned int flags)
1824 {
1825         struct lnet_nid old = lp->lp_primary_nid;
1826         int rc = 0;
1827
1828         if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid)
1829                 goto out;
1830
1831         lnet_nid4_to_nid(nid, &lp->lp_primary_nid);
1832
1833         rc = lnet_peer_add_nid(lp, nid, flags);
1834         if (rc) {
1835                 lp->lp_primary_nid = old;
1836                 goto out;
1837         }
1838 out:
1839         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1840                libcfs_nidstr(&old), libcfs_nid2str(nid), rc);
1841
1842         return rc;
1843 }
1844
1845 /*
1846  * lpni creation initiated due to traffic either sending or receiving.
1847  * Callers must hold ln_api_mutex
1848  * Ref taken on lnet_peer_ni returned by this function
1849  */
1850 static struct lnet_peer_ni *
1851 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1852 __must_hold(&the_lnet.ln_api_mutex)
1853 {
1854         struct lnet_peer *lp = NULL;
1855         struct lnet_peer_net *lpn = NULL;
1856         struct lnet_peer_ni *lpni;
1857         unsigned flags = 0;
1858         int rc = 0;
1859
1860         if (LNET_NID_IS_ANY(nid)) {
1861                 rc = -EINVAL;
1862                 goto out_err;
1863         }
1864
1865         /* lnet_net_lock is not needed here because ln_api_lock is held */
1866         lpni = lnet_peer_ni_find_locked(nid);
1867         if (lpni) {
1868                 /*
1869                  * We must have raced with another thread. Since we
1870                  * know next to nothing about a peer_ni created by
1871                  * traffic, we just assume everything is ok and
1872                  * return.
1873                  */
1874                 goto out;
1875         }
1876
1877         /* Create peer, peer_net, and peer_ni. */
1878         rc = -ENOMEM;
1879         lp = lnet_peer_alloc(nid);
1880         if (!lp)
1881                 goto out_err;
1882         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1883         if (!lpn)
1884                 goto out_err;
1885         lpni = lnet_peer_ni_alloc(nid);
1886         if (!lpni)
1887                 goto out_err;
1888         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1889
1890         /* lnet_peer_attach_peer_ni() always returns 0 */
1891         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1892
1893         lnet_peer_ni_addref_locked(lpni);
1894
1895 out_err:
1896         if (rc) {
1897                 if (lpn)
1898                         LIBCFS_FREE(lpn, sizeof(*lpn));
1899                 if (lp)
1900                         LIBCFS_FREE(lp, sizeof(*lp));
1901                 lpni = ERR_PTR(rc);
1902         }
1903 out:
1904         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1905         return lpni;
1906 }
1907
1908 /*
1909  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1910  *
1911  * This API handles the following combinations:
1912  *   Create a peer with its primary NI if only the prim_nid is provided
1913  *   Add a NID to a peer identified by the prim_nid. The peer identified
1914  *   by the prim_nid must already exist.
1915  *   The peer being created may be non-MR.
1916  *
1917  * The caller must hold ln_api_mutex. This prevents the peer from
1918  * being created/modified/deleted by a different thread.
1919  */
1920 int
1921 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr, bool temp)
1922 {
1923         struct lnet_peer *lp = NULL;
1924         struct lnet_peer_ni *lpni;
1925         unsigned int flags = 0;
1926
1927         /* The prim_nid must always be specified */
1928         if (prim_nid == LNET_NID_ANY)
1929                 return -EINVAL;
1930
1931         if (!temp)
1932                 flags = LNET_PEER_CONFIGURED;
1933
1934         if (mr)
1935                 flags |= LNET_PEER_MULTI_RAIL;
1936
1937         /*
1938          * If nid isn't specified, we must create a new peer with
1939          * prim_nid as its primary nid.
1940          */
1941         if (nid == LNET_NID_ANY)
1942                 return lnet_peer_add(prim_nid, flags);
1943
1944         /* Look up the prim_nid, which must exist. */
1945         lpni = lnet_find_peer_ni_locked(prim_nid);
1946         if (!lpni)
1947                 return -ENOENT;
1948         lnet_peer_ni_decref_locked(lpni);
1949         lp = lpni->lpni_peer_net->lpn_peer;
1950
1951         /* Peer must have been configured. */
1952         if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1953                 CDEBUG(D_NET, "peer %s was not configured\n",
1954                        libcfs_nid2str(prim_nid));
1955                 return -ENOENT;
1956         }
1957
1958         /* Primary NID must match */
1959         if (lnet_nid_to_nid4(&lp->lp_primary_nid) != prim_nid) {
1960                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1961                        libcfs_nid2str(prim_nid),
1962                        libcfs_nidstr(&lp->lp_primary_nid));
1963                 return -ENODEV;
1964         }
1965
1966         /* Multi-Rail flag must match. */
1967         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1968                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1969                        libcfs_nid2str(prim_nid));
1970                 return -EPERM;
1971         }
1972
1973         return lnet_peer_add_nid(lp, nid, flags);
1974 }
1975
1976 /*
1977  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1978  *
1979  * This API handles the following combinations:
1980  *   Delete a NI from a peer if both prim_nid and nid are provided.
1981  *   Delete a peer if only prim_nid is provided.
1982  *   Delete a peer if its primary nid is provided.
1983  *
1984  * The caller must hold ln_api_mutex. This prevents the peer from
1985  * being modified/deleted by a different thread.
1986  */
1987 int
1988 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1989 {
1990         struct lnet_peer *lp;
1991         struct lnet_peer_ni *lpni;
1992         unsigned flags;
1993
1994         if (prim_nid == LNET_NID_ANY)
1995                 return -EINVAL;
1996
1997         lpni = lnet_find_peer_ni_locked(prim_nid);
1998         if (!lpni)
1999                 return -ENOENT;
2000         lnet_peer_ni_decref_locked(lpni);
2001         lp = lpni->lpni_peer_net->lpn_peer;
2002
2003         if (prim_nid != lnet_nid_to_nid4(&lp->lp_primary_nid)) {
2004                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2005                        libcfs_nid2str(prim_nid),
2006                        libcfs_nidstr(&lp->lp_primary_nid));
2007                 return -ENODEV;
2008         }
2009
2010         lnet_net_lock(LNET_LOCK_EX);
2011         if (lp->lp_rtr_refcount > 0) {
2012                 lnet_net_unlock(LNET_LOCK_EX);
2013                 CERROR("%s is a router. Can not be deleted\n",
2014                        libcfs_nid2str(prim_nid));
2015                 return -EBUSY;
2016         }
2017         lnet_net_unlock(LNET_LOCK_EX);
2018
2019         if (nid == LNET_NID_ANY || nid == lnet_nid_to_nid4(&lp->lp_primary_nid))
2020                 return lnet_peer_del(lp);
2021
2022         flags = LNET_PEER_CONFIGURED;
2023         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2024                 flags |= LNET_PEER_MULTI_RAIL;
2025
2026         return lnet_peer_del_nid(lp, nid, flags);
2027 }
2028
2029 void
2030 lnet_destroy_peer_ni_locked(struct kref *ref)
2031 {
2032         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2033                                                  lpni_kref);
2034         struct lnet_peer_table *ptable;
2035         struct lnet_peer_net *lpn;
2036
2037         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2038
2039         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2040         LASSERT(list_empty(&lpni->lpni_txq));
2041         LASSERT(lpni->lpni_txqnob == 0);
2042         LASSERT(list_empty(&lpni->lpni_peer_nis));
2043         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2044
2045         lpn = lpni->lpni_peer_net;
2046         lpni->lpni_peer_net = NULL;
2047         lpni->lpni_net = NULL;
2048
2049         if (!list_empty(&lpni->lpni_hashlist)) {
2050                 /* remove the peer ni from the zombie list */
2051                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2052                 spin_lock(&ptable->pt_zombie_lock);
2053                 list_del_init(&lpni->lpni_hashlist);
2054                 ptable->pt_zombies--;
2055                 spin_unlock(&ptable->pt_zombie_lock);
2056         }
2057
2058         if (lpni->lpni_pref_nnids > 1) {
2059                 struct lnet_nid_list *ne, *tmp;
2060
2061                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2062                                          nl_list) {
2063                         list_del_init(&ne->nl_list);
2064                         LIBCFS_FREE(ne, sizeof(*ne));
2065                 }
2066         }
2067         LIBCFS_FREE(lpni, sizeof(*lpni));
2068
2069         if (lpn)
2070                 lnet_peer_net_decref_locked(lpn);
2071 }
2072
2073 struct lnet_peer_ni *
2074 lnet_nid2peerni_ex(struct lnet_nid *nid)
2075 __must_hold(&the_lnet.ln_api_mutex)
2076 {
2077         struct lnet_peer_ni *lpni = NULL;
2078
2079         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2080                 return ERR_PTR(-ESHUTDOWN);
2081
2082         /*
2083          * find if a peer_ni already exists.
2084          * If so then just return that.
2085          */
2086         lpni = lnet_peer_ni_find_locked(nid);
2087         if (lpni)
2088                 return lpni;
2089
2090         lnet_net_unlock(LNET_LOCK_EX);
2091
2092         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2093
2094         lnet_net_lock(LNET_LOCK_EX);
2095
2096         return lpni;
2097 }
2098
2099 /*
2100  * Get a peer_ni for the given nid, create it if necessary. Takes a
2101  * hold on the peer_ni.
2102  */
2103 struct lnet_peer_ni *
2104 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2105                         struct lnet_nid *pref, int cpt)
2106 {
2107         struct lnet_peer_ni *lpni = NULL;
2108
2109         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2110                 return ERR_PTR(-ESHUTDOWN);
2111
2112         /*
2113          * find if a peer_ni already exists.
2114          * If so then just return that.
2115          */
2116         lpni = lnet_peer_ni_find_locked(nid);
2117         if (lpni)
2118                 return lpni;
2119
2120         /*
2121          * Slow path:
2122          * use the lnet_api_mutex to serialize the creation of the peer_ni
2123          * and the creation/deletion of the local ni/net. When a local ni is
2124          * created, if there exists a set of peer_nis on that network,
2125          * they need to be traversed and updated. When a local NI is
2126          * deleted, which could result in a network being deleted, then
2127          * all peer nis on that network need to be removed as well.
2128          *
2129          * Creation through traffic should also be serialized with
2130          * creation through DLC.
2131          */
2132         lnet_net_unlock(cpt);
2133         mutex_lock(&the_lnet.ln_api_mutex);
2134         /*
2135          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2136          * check here is sufficent.
2137          */
2138         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2139                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2140
2141         mutex_unlock(&the_lnet.ln_api_mutex);
2142         lnet_net_lock(cpt);
2143
2144         /* Lock has been dropped, check again for shutdown. */
2145         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2146                 if (!IS_ERR_OR_NULL(lpni))
2147                         lnet_peer_ni_decref_locked(lpni);
2148                 lpni = ERR_PTR(-ESHUTDOWN);
2149         }
2150
2151         return lpni;
2152 }
2153
2154 struct lnet_peer_ni *
2155 lnet_nid2peerni_locked(lnet_nid_t nid4, lnet_nid_t pref4, int cpt)
2156 {
2157         struct lnet_nid nid, pref;
2158
2159         lnet_nid4_to_nid(nid4, &nid);
2160         lnet_nid4_to_nid(pref4, &pref);
2161         if (pref4 == LNET_NID_ANY)
2162                 return lnet_peerni_by_nid_locked(&nid, NULL, cpt);
2163         else
2164                 return lnet_peerni_by_nid_locked(&nid, &pref, cpt);
2165 }
2166
2167 bool
2168 lnet_peer_gw_discovery(struct lnet_peer *lp)
2169 {
2170         bool rc = false;
2171
2172         spin_lock(&lp->lp_lock);
2173         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2174                 rc = true;
2175         spin_unlock(&lp->lp_lock);
2176
2177         return rc;
2178 }
2179
2180 bool
2181 lnet_peer_is_uptodate(struct lnet_peer *lp)
2182 {
2183         bool rc;
2184
2185         spin_lock(&lp->lp_lock);
2186         rc = lnet_peer_is_uptodate_locked(lp);
2187         spin_unlock(&lp->lp_lock);
2188         return rc;
2189 }
2190
2191 /*
2192  * Is a peer uptodate from the point of view of discovery?
2193  *
2194  * If it is currently being processed, obviously not.
2195  * A forced Ping or Push is also handled by the discovery thread.
2196  *
2197  * Otherwise look at whether the peer needs rediscovering.
2198  */
2199 bool
2200 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2201 __must_hold(&lp->lp_lock)
2202 {
2203         bool rc;
2204
2205         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2206                             LNET_PEER_FORCE_PING |
2207                             LNET_PEER_FORCE_PUSH)) {
2208                 rc = false;
2209         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2210                 rc = false;
2211         } else if (lnet_peer_needs_push(lp)) {
2212                 rc = false;
2213         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2214                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2215                         rc = true;
2216                 else
2217                         rc = false;
2218         } else {
2219                 rc = false;
2220         }
2221
2222         return rc;
2223 }
2224
2225 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2226 void
2227 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2228 {
2229         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2230          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2231          * when adding to the list and queuing the peer to ensure that we do not
2232          * strand any messages on the lp_dc_pendq. This scheme ensures the
2233          * message will be resent even if the peer is already being discovered.
2234          * Therefore we needn't check the return value of
2235          * lnet_peer_queue_for_discovery(lp).
2236          */
2237         lnet_net_lock(LNET_LOCK_EX);
2238         spin_lock(&lp->lp_lock);
2239         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2240         spin_unlock(&lp->lp_lock);
2241         lnet_peer_queue_for_discovery(lp);
2242         lnet_net_unlock(LNET_LOCK_EX);
2243 }
2244
2245 /*
2246  * Queue a peer for the attention of the discovery thread.  Call with
2247  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2248  * -EALREADY if the peer was already queued.
2249  */
2250 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2251 {
2252         int rc;
2253
2254         spin_lock(&lp->lp_lock);
2255         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2256                 lp->lp_state |= LNET_PEER_DISCOVERING;
2257         spin_unlock(&lp->lp_lock);
2258         if (list_empty(&lp->lp_dc_list)) {
2259                 lnet_peer_addref_locked(lp);
2260                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2261                 wake_up(&the_lnet.ln_dc_waitq);
2262                 rc = 0;
2263         } else {
2264                 rc = -EALREADY;
2265         }
2266
2267         CDEBUG(D_NET, "Queue peer %s: %d\n",
2268                libcfs_nidstr(&lp->lp_primary_nid), rc);
2269
2270         return rc;
2271 }
2272
2273 /*
2274  * Discovery of a peer is complete. Wake all waiters on the peer.
2275  * Call with lnet_net_lock/EX held.
2276  */
2277 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2278 {
2279         struct lnet_msg *msg, *tmp;
2280         int rc = 0;
2281         LIST_HEAD(pending_msgs);
2282
2283         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2284                libcfs_nidstr(&lp->lp_primary_nid));
2285
2286         list_del_init(&lp->lp_dc_list);
2287         spin_lock(&lp->lp_lock);
2288         if (dc_error) {
2289                 lp->lp_dc_error = dc_error;
2290                 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2291                 lp->lp_state |= LNET_PEER_REDISCOVER;
2292         }
2293         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2294         spin_unlock(&lp->lp_lock);
2295         wake_up(&lp->lp_dc_waitq);
2296
2297         if (lp->lp_rtr_refcount > 0)
2298                 lnet_router_discovery_complete(lp);
2299
2300         lnet_net_unlock(LNET_LOCK_EX);
2301
2302         /* iterate through all pending messages and send them again */
2303         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2304                 list_del_init(&msg->msg_list);
2305                 if (dc_error) {
2306                         lnet_finalize(msg, dc_error);
2307                         continue;
2308                 }
2309
2310                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2311                        lnet_msgtyp2str(msg->msg_type),
2312                        libcfs_idstr(&msg->msg_target));
2313                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2314                                &msg->msg_rtr_nid_param);
2315                 if (rc < 0) {
2316                         CNETERR("Error sending %s to %s: %d\n",
2317                                lnet_msgtyp2str(msg->msg_type),
2318                                libcfs_idstr(&msg->msg_target), rc);
2319                         lnet_finalize(msg, rc);
2320                 }
2321         }
2322         lnet_net_lock(LNET_LOCK_EX);
2323         lnet_peer_decref_locked(lp);
2324 }
2325
2326 /*
2327  * Handle inbound push.
2328  * Like any event handler, called with lnet_res_lock/CPT held.
2329  */
2330 void lnet_peer_push_event(struct lnet_event *ev)
2331 {
2332         struct lnet_ping_buffer *pbuf;
2333         struct lnet_peer *lp;
2334
2335         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2336
2337         /* lnet_find_peer() adds a refcount */
2338         lp = lnet_find_peer(&ev->source.nid);
2339         if (!lp) {
2340                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2341                        libcfs_nidstr(&ev->initiator.nid),
2342                        libcfs_nidstr(&ev->source.nid));
2343                 pbuf->pb_needs_post = true;
2344                 return;
2345         }
2346
2347         /* Ensure peer state remains consistent while we modify it. */
2348         spin_lock(&lp->lp_lock);
2349
2350         /*
2351          * If some kind of error happened the contents of the message
2352          * cannot be used. Clear the NIDS_UPTODATE and set the
2353          * FORCE_PING flag to trigger a ping.
2354          */
2355         if (ev->status) {
2356                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2357                 lp->lp_state |= LNET_PEER_FORCE_PING;
2358                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2359                        ev->status,
2360                        libcfs_nidstr(&lp->lp_primary_nid),
2361                        libcfs_nidstr(&ev->source.nid));
2362                 goto out;
2363         }
2364
2365         /*
2366          * A push with invalid or corrupted info. Clear the UPTODATE
2367          * flag to trigger a ping.
2368          */
2369         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2370                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2371                 lp->lp_state |= LNET_PEER_FORCE_PING;
2372                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2373                        libcfs_nidstr(&lp->lp_primary_nid));
2374                 goto out;
2375         }
2376
2377         /*
2378          * Make sure we'll allocate the correct size ping buffer when
2379          * pinging the peer.
2380          */
2381         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2382                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2383
2384         /*
2385          * A non-Multi-Rail peer is not supposed to be capable of
2386          * sending a push.
2387          */
2388         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2389                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2390                        libcfs_nidstr(&lp->lp_primary_nid));
2391                 goto out;
2392         }
2393
2394         /*
2395          * The peer may have discovery disabled at its end. Set
2396          * NO_DISCOVERY as appropriate.
2397          */
2398         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2399                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2400                        libcfs_nidstr(&lp->lp_primary_nid));
2401                 /*
2402                  * Mark the peer for deletion if we already know about it
2403                  * and it's going from discovery set to no discovery set
2404                  */
2405                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2406                                       LNET_PEER_DISCOVERING)) &&
2407                      lp->lp_state & LNET_PEER_DISCOVERED) {
2408                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2409                                libcfs_nidstr(&lp->lp_primary_nid),
2410                                lp->lp_state);
2411                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2412                 }
2413                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2414         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2415                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2416                        libcfs_nidstr(&lp->lp_primary_nid));
2417                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2418         }
2419
2420         /*
2421          * Update the MULTI_RAIL flag based on the push. If the peer
2422          * was configured with DLC then the setting should match what
2423          * DLC put in.
2424          * NB: We verified above that the MR feature bit is set in pi_features
2425          */
2426         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2427                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2428                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2429         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2430                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2431                       libcfs_nidstr(&lp->lp_primary_nid));
2432         } else if (lnet_peer_discovery_disabled) {
2433                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2434                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2435         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2436                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2437                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2438         } else {
2439                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2440                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2441                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2442                 lnet_peer_clr_non_mr_pref_nids(lp);
2443         }
2444
2445         /*
2446          * Check for truncation of the Put message. Clear the
2447          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2448          * and tell discovery to allocate a bigger buffer.
2449          */
2450         if (ev->mlength < ev->rlength) {
2451                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2452                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2453                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2454                 lp->lp_state |= LNET_PEER_FORCE_PING;
2455                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2456                        libcfs_nidstr(&lp->lp_primary_nid),
2457                        pbuf->pb_info.pi_nnis);
2458                 goto out;
2459         }
2460
2461         /* always assume new data */
2462         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2463         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2464
2465         /*
2466          * If there is data present that hasn't been processed yet,
2467          * we'll replace it if the Put contained newer data and it
2468          * fits. We're racing with a Ping or earlier Push in this
2469          * case.
2470          */
2471         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2472                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2473                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2474                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2475                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2476                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2477                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2478                               libcfs_nidstr(&lp->lp_primary_nid),
2479                               LNET_PING_BUFFER_SEQNO(pbuf),
2480                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2481                 }
2482                 goto out;
2483         }
2484
2485         /*
2486          * Allocate a buffer to copy the data. On a failure we drop
2487          * the Push and set FORCE_PING to force the discovery
2488          * thread to fix the problem by pinging the peer.
2489          */
2490         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2491         if (!lp->lp_data) {
2492                 lp->lp_state |= LNET_PEER_FORCE_PING;
2493                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2494                        libcfs_nidstr(&lp->lp_primary_nid),
2495                        LNET_PING_BUFFER_SEQNO(pbuf));
2496                 goto out;
2497         }
2498
2499         /* Success */
2500         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2501                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2502         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2503         CDEBUG(D_NET, "Received Push %s %u\n",
2504                libcfs_nidstr(&lp->lp_primary_nid),
2505                LNET_PING_BUFFER_SEQNO(pbuf));
2506
2507 out:
2508         /* We've processed this buffer. It can be reposted */
2509         pbuf->pb_needs_post = true;
2510
2511         /*
2512          * Queue the peer for discovery if not done, force it on the request
2513          * queue and wake the discovery thread if the peer was already queued,
2514          * because its status changed.
2515          */
2516         spin_unlock(&lp->lp_lock);
2517         lnet_net_lock(LNET_LOCK_EX);
2518         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2519                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2520                 wake_up(&the_lnet.ln_dc_waitq);
2521         }
2522         /* Drop refcount from lookup */
2523         lnet_peer_decref_locked(lp);
2524         lnet_net_unlock(LNET_LOCK_EX);
2525 }
2526
2527 /*
2528  * Clear the discovery error state, unless we're already discovering
2529  * this peer, in which case the error is current.
2530  */
2531 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2532 {
2533         spin_lock(&lp->lp_lock);
2534         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2535                 lp->lp_dc_error = 0;
2536         spin_unlock(&lp->lp_lock);
2537 }
2538
2539 /*
2540  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2541  * dropped/retaken within this function. An lnet_peer_ni is passed in
2542  * because discovery could tear down an lnet_peer.
2543  */
2544 int
2545 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2546 {
2547         DEFINE_WAIT(wait);
2548         struct lnet_peer *lp;
2549         int rc = 0;
2550         int count = 0;
2551
2552 again:
2553         lnet_net_unlock(cpt);
2554         lnet_net_lock(LNET_LOCK_EX);
2555         lp = lpni->lpni_peer_net->lpn_peer;
2556         lnet_peer_clear_discovery_error(lp);
2557
2558         /*
2559          * We're willing to be interrupted. The lpni can become a
2560          * zombie if we race with DLC, so we must check for that.
2561          */
2562         for (;;) {
2563                 /* Keep lp alive when the lnet_net_lock is unlocked */
2564                 lnet_peer_addref_locked(lp);
2565                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2566                 if (signal_pending(current))
2567                         break;
2568                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2569                         break;
2570                 /*
2571                  * Don't repeat discovery if discovery is disabled. This is
2572                  * done to ensure we can use discovery as a standard ping as
2573                  * well for backwards compatibility with routers which do not
2574                  * have discovery or have discovery disabled
2575                  */
2576                 if (lnet_is_discovery_disabled(lp) && count > 0)
2577                         break;
2578                 if (lp->lp_dc_error)
2579                         break;
2580                 if (lnet_peer_is_uptodate(lp))
2581                         break;
2582                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2583                         break;
2584                 lnet_peer_queue_for_discovery(lp);
2585                 count++;
2586                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2587
2588                 /*
2589                  * If caller requested a non-blocking operation then
2590                  * return immediately. Once discovery is complete any
2591                  * pending messages that were stopped due to discovery
2592                  * will be transmitted.
2593                  */
2594                 if (!block)
2595                         break;
2596
2597                 lnet_net_unlock(LNET_LOCK_EX);
2598                 schedule();
2599                 finish_wait(&lp->lp_dc_waitq, &wait);
2600                 lnet_net_lock(LNET_LOCK_EX);
2601                 lnet_peer_decref_locked(lp);
2602                 /* Peer may have changed */
2603                 lp = lpni->lpni_peer_net->lpn_peer;
2604         }
2605         finish_wait(&lp->lp_dc_waitq, &wait);
2606
2607         lnet_net_unlock(LNET_LOCK_EX);
2608         lnet_net_lock(cpt);
2609         lnet_peer_decref_locked(lp);
2610         /*
2611          * The peer may have changed, so re-check and rediscover if that turns
2612          * out to have been the case. The reference count on lp ensured that
2613          * even if it was unlinked from lpni the memory could not be recycled.
2614          * Thus the check below is sufficient to determine whether the peer
2615          * changed. If the peer changed, then lp must not be dereferenced.
2616          */
2617         if (lp != lpni->lpni_peer_net->lpn_peer)
2618                 goto again;
2619
2620         if (signal_pending(current))
2621                 rc = -EINTR;
2622         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2623                 rc = -ESHUTDOWN;
2624         else if (lp->lp_dc_error)
2625                 rc = lp->lp_dc_error;
2626         else if (!block)
2627                 CDEBUG(D_NET, "non-blocking discovery\n");
2628         else if (!lnet_peer_is_uptodate(lp) &&
2629                  !(lnet_is_discovery_disabled(lp) ||
2630                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2631                 goto again;
2632
2633         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2634                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2635                libcfs_nidstr(&lpni->lpni_nid), rc,
2636                (!block) ? "pending discovery" : "discovery complete");
2637
2638         return rc;
2639 }
2640
2641 /* Handle an incoming ack for a push. */
2642 static void
2643 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2644 {
2645         struct lnet_ping_buffer *pbuf;
2646
2647         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2648         spin_lock(&lp->lp_lock);
2649         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2650         lp->lp_push_error = ev->status;
2651         if (ev->status)
2652                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2653         else
2654                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2655         spin_unlock(&lp->lp_lock);
2656
2657         CDEBUG(D_NET, "peer %s ev->status %d\n",
2658                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2659 }
2660
2661 /* Handle a Reply message. This is the reply to a Ping message. */
2662 static void
2663 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2664 {
2665         struct lnet_ping_buffer *pbuf;
2666         int rc;
2667
2668         spin_lock(&lp->lp_lock);
2669
2670         lp->lp_disc_src_nid = ev->target.nid;
2671         lp->lp_disc_dst_nid = ev->source.nid;
2672
2673         /*
2674          * If some kind of error happened the contents of message
2675          * cannot be used. Set PING_FAILED to trigger a retry.
2676          */
2677         if (ev->status) {
2678                 lp->lp_state |= LNET_PEER_PING_FAILED;
2679                 lp->lp_ping_error = ev->status;
2680                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2681                        ev->status,
2682                        libcfs_nidstr(&lp->lp_primary_nid),
2683                        libcfs_nidstr(&ev->source.nid));
2684                 goto out;
2685         }
2686
2687         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2688         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2689                 lnet_swap_pinginfo(pbuf);
2690
2691         /*
2692          * A reply with invalid or corrupted info. Set PING_FAILED to
2693          * trigger a retry.
2694          */
2695         rc = lnet_ping_info_validate(&pbuf->pb_info);
2696         if (rc) {
2697                 lp->lp_state |= LNET_PEER_PING_FAILED;
2698                 lp->lp_ping_error = 0;
2699                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2700                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2701                 goto out;
2702         }
2703
2704         /*
2705          * The peer may have discovery disabled at its end. Set
2706          * NO_DISCOVERY as appropriate.
2707          */
2708         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2709             lnet_peer_discovery_disabled) {
2710                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2711                        libcfs_nidstr(&lp->lp_primary_nid));
2712
2713                 /* Detect whether this peer has toggled discovery from on to
2714                  * off and whether we can delete and re-create the peer. Peers
2715                  * that were manually configured cannot be deleted by discovery.
2716                  * We need to delete this peer and re-create it if the peer was
2717                  * not configured manually, is currently considered DD capable,
2718                  * and either:
2719                  * 1. We've already discovered the peer (the peer has toggled
2720                  *    the discovery feature from on to off), or
2721                  * 2. The peer is considered MR, but it was not user configured
2722                  *    (this was a "temporary" peer created via the kernel APIs
2723                  *     that we're discovering for the first time)
2724                  */
2725                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2726                                       LNET_PEER_NO_DISCOVERY)) &&
2727                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2728                                      LNET_PEER_MULTI_RAIL))) {
2729                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2730                                libcfs_nidstr(&lp->lp_primary_nid),
2731                                lp->lp_state);
2732                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2733                 }
2734                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2735         } else {
2736                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2737                        libcfs_nidstr(&lp->lp_primary_nid));
2738                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2739         }
2740
2741         /*
2742          * Update the MULTI_RAIL flag based on the reply. If the peer
2743          * was configured with DLC then the setting should match what
2744          * DLC put in.
2745          */
2746         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2747                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2748                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2749                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2750                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2751                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2752                               libcfs_nidstr(&lp->lp_primary_nid));
2753                 } else if (lnet_peer_discovery_disabled) {
2754                         CDEBUG(D_NET,
2755                                "peer %s(%p) not MR: DD disabled locally\n",
2756                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2757                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2758                         CDEBUG(D_NET,
2759                                "peer %s(%p) not MR: DD disabled remotely\n",
2760                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2761                 } else {
2762                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2763                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2764                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2765                         lnet_peer_clr_non_mr_pref_nids(lp);
2766                 }
2767         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2768                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2769                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2770                               libcfs_nidstr(&lp->lp_primary_nid));
2771                 } else {
2772                         CERROR("Multi-Rail state vanished from %s\n",
2773                                libcfs_nidstr(&lp->lp_primary_nid));
2774                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2775                 }
2776         }
2777
2778         /*
2779          * Make sure we'll allocate the correct size ping buffer when
2780          * pinging the peer.
2781          */
2782         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2783                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2784
2785         /*
2786          * Check for truncation of the Reply. Clear PING_SENT and set
2787          * PING_FAILED to trigger a retry.
2788          */
2789         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2790                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2791                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2792                 lp->lp_state |= LNET_PEER_PING_FAILED;
2793                 lp->lp_ping_error = 0;
2794                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2795                        libcfs_nidstr(&lp->lp_primary_nid),
2796                        pbuf->pb_info.pi_nnis);
2797                 goto out;
2798         }
2799
2800         /*
2801          * Check the sequence numbers in the reply. These are only
2802          * available if the reply came from a Multi-Rail peer.
2803          */
2804         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2805             pbuf->pb_info.pi_nnis > 1 &&
2806             lnet_nid_to_nid4(&lp->lp_primary_nid) ==
2807             pbuf->pb_info.pi_ni[1].ns_nid) {
2808                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2809                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2810                                 libcfs_nidstr(&lp->lp_primary_nid),
2811                                 LNET_PING_BUFFER_SEQNO(pbuf),
2812                                 lp->lp_peer_seqno);
2813
2814                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2815         }
2816
2817         /* We're happy with the state of the data in the buffer. */
2818         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2819                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2820                lp->lp_state);
2821         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2822                 lnet_ping_buffer_decref(lp->lp_data);
2823         else
2824                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2825         lnet_ping_buffer_addref(pbuf);
2826         lp->lp_data = pbuf;
2827 out:
2828         lp->lp_state &= ~LNET_PEER_PING_SENT;
2829         spin_unlock(&lp->lp_lock);
2830
2831         lnet_net_lock(LNET_LOCK_EX);
2832         /*
2833          * If this peer is a gateway, call the routing callback to
2834          * handle the ping reply
2835          */
2836         if (lp->lp_rtr_refcount > 0)
2837                 lnet_router_discovery_ping_reply(lp);
2838         lnet_net_unlock(LNET_LOCK_EX);
2839 }
2840
2841 /*
2842  * Send event handling. Only matters for error cases, where we clean
2843  * up state on the peer and peer_ni that would otherwise be updated in
2844  * the REPLY event handler for a successful Ping, and the ACK event
2845  * handler for a successful Push.
2846  */
2847 static int
2848 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2849 {
2850         int rc = 0;
2851
2852         if (!ev->status)
2853                 goto out;
2854
2855         spin_lock(&lp->lp_lock);
2856         if (ev->msg_type == LNET_MSG_GET) {
2857                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2858                 lp->lp_state |= LNET_PEER_PING_FAILED;
2859                 lp->lp_ping_error = ev->status;
2860         } else { /* ev->msg_type == LNET_MSG_PUT */
2861                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2862                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2863                 lp->lp_push_error = ev->status;
2864         }
2865         spin_unlock(&lp->lp_lock);
2866         rc = LNET_REDISCOVER_PEER;
2867 out:
2868         CDEBUG(D_NET, "%s Send to %s: %d\n",
2869                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2870                 libcfs_nidstr(&ev->target.nid), rc);
2871         return rc;
2872 }
2873
2874 /*
2875  * Unlink event handling. This event is only seen if a call to
2876  * LNetMDUnlink() caused the event to be unlinked. If this call was
2877  * made after the event was set up in LNetGet() or LNetPut() then we
2878  * assume the Ping or Push timed out.
2879  */
2880 static void
2881 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2882 {
2883         spin_lock(&lp->lp_lock);
2884         /* We've passed through LNetGet() */
2885         if (lp->lp_state & LNET_PEER_PING_SENT) {
2886                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2887                 lp->lp_state |= LNET_PEER_PING_FAILED;
2888                 lp->lp_ping_error = -ETIMEDOUT;
2889                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2890                         libcfs_nidstr(&lp->lp_primary_nid));
2891         }
2892         /* We've passed through LNetPut() */
2893         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2894                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2895                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2896                 lp->lp_push_error = -ETIMEDOUT;
2897                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2898                         libcfs_nidstr(&lp->lp_primary_nid));
2899         }
2900         spin_unlock(&lp->lp_lock);
2901 }
2902
2903 /*
2904  * Event handler for the discovery EQ.
2905  *
2906  * Called with lnet_res_lock(cpt) held. The cpt is the
2907  * lnet_cpt_of_cookie() of the md handle cookie.
2908  */
2909 static void lnet_discovery_event_handler(struct lnet_event *event)
2910 {
2911         struct lnet_peer *lp = event->md_user_ptr;
2912         struct lnet_ping_buffer *pbuf;
2913         int rc;
2914
2915         /* discovery needs to take another look */
2916         rc = LNET_REDISCOVER_PEER;
2917
2918         CDEBUG(D_NET, "Received event: %d\n", event->type);
2919
2920         switch (event->type) {
2921         case LNET_EVENT_ACK:
2922                 lnet_discovery_event_ack(lp, event);
2923                 break;
2924         case LNET_EVENT_REPLY:
2925                 lnet_discovery_event_reply(lp, event);
2926                 break;
2927         case LNET_EVENT_SEND:
2928                 /* Only send failure triggers a retry. */
2929                 rc = lnet_discovery_event_send(lp, event);
2930                 break;
2931         case LNET_EVENT_UNLINK:
2932                 /* LNetMDUnlink() was called */
2933                 lnet_discovery_event_unlink(lp, event);
2934                 break;
2935         default:
2936                 /* Invalid events. */
2937                 LBUG();
2938         }
2939         lnet_net_lock(LNET_LOCK_EX);
2940         if (event->unlinked) {
2941                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2942                 lnet_ping_buffer_decref(pbuf);
2943                 lnet_peer_decref_locked(lp);
2944         }
2945
2946         /* put peer back at end of request queue, if discovery not already
2947          * done */
2948         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2949             lnet_peer_queue_for_discovery(lp)) {
2950                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2951                 wake_up(&the_lnet.ln_dc_waitq);
2952         }
2953         lnet_net_unlock(LNET_LOCK_EX);
2954 }
2955
2956 /*
2957  * Build a peer from incoming data.
2958  *
2959  * The NIDs in the incoming data are supposed to be structured as follows:
2960  *  - loopback
2961  *  - primary NID
2962  *  - other NIDs in same net
2963  *  - NIDs in second net
2964  *  - NIDs in third net
2965  *  - ...
2966  * This due to the way the list of NIDs in the data is created.
2967  *
2968  * Note that this function will mark the peer uptodate unless an
2969  * ENOMEM is encontered. All other errors are due to a conflict
2970  * between the DLC configuration and what discovery sees. We treat DLC
2971  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2972  * peer from becoming stuck in discovery.
2973  */
2974 static int lnet_peer_merge_data(struct lnet_peer *lp,
2975                                 struct lnet_ping_buffer *pbuf)
2976 {
2977         struct lnet_peer_net *lpn;
2978         struct lnet_peer_ni *lpni;
2979         lnet_nid_t *curnis = NULL;
2980         struct lnet_ni_status *addnis = NULL;
2981         lnet_nid_t *delnis = NULL;
2982         unsigned flags;
2983         int ncurnis;
2984         int naddnis;
2985         int ndelnis;
2986         int nnis = 0;
2987         int i;
2988         int j;
2989         int rc;
2990
2991         flags = LNET_PEER_DISCOVERED;
2992         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2993                 flags |= LNET_PEER_MULTI_RAIL;
2994
2995         /*
2996          * Cache the routing feature for the peer; whether it is enabled
2997          * for disabled as reported by the remote peer.
2998          */
2999         spin_lock(&lp->lp_lock);
3000         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3001                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3002         else
3003                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3004         spin_unlock(&lp->lp_lock);
3005
3006         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
3007         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3008         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3009         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3010         if (!curnis || !addnis || !delnis) {
3011                 rc = -ENOMEM;
3012                 goto out;
3013         }
3014         ncurnis = 0;
3015         naddnis = 0;
3016         ndelnis = 0;
3017
3018         /* Construct the list of NIDs present in peer. */
3019         lpni = NULL;
3020         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3021                 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
3022
3023         /*
3024          * Check for NIDs in pbuf not present in curnis[].
3025          * The loop starts at 1 to skip the loopback NID.
3026          */
3027         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
3028                 for (j = 0; j < ncurnis; j++)
3029                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
3030                                 break;
3031                 if (j == ncurnis)
3032                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
3033         }
3034         /*
3035          * Check for NIDs in curnis[] not present in pbuf.
3036          * The nested loop starts at 1 to skip the loopback NID.
3037          *
3038          * But never add the loopback NID to delnis[]: if it is
3039          * present in curnis[] then this peer is for this node.
3040          */
3041         for (i = 0; i < ncurnis; i++) {
3042                 if (curnis[i] == LNET_NID_LO_0)
3043                         continue;
3044                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
3045                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
3046                                 /*
3047                                  * update the information we cache for the
3048                                  * peer with the latest information we
3049                                  * received
3050                                  */
3051                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
3052                                 if (lpni) {
3053                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
3054                                         lnet_peer_ni_decref_locked(lpni);
3055                                 }
3056                                 break;
3057                         }
3058                 }
3059                 if (j == pbuf->pb_info.pi_nnis)
3060                         delnis[ndelnis++] = curnis[i];
3061         }
3062
3063         /*
3064          * If we get here and the discovery is disabled then we don't want
3065          * to add or delete any NIs. We just updated the ones we have some
3066          * information on, and call it a day
3067          */
3068         rc = 0;
3069         if (lnet_is_discovery_disabled(lp))
3070                 goto out;
3071
3072         for (i = 0; i < naddnis; i++) {
3073                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
3074                 if (rc) {
3075                         CERROR("Error adding NID %s to peer %s: %d\n",
3076                                libcfs_nid2str(addnis[i].ns_nid),
3077                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3078                         if (rc == -ENOMEM)
3079                                 goto out;
3080                 }
3081                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
3082                 if (lpni) {
3083                         lpni->lpni_ns_status = addnis[i].ns_status;
3084                         lnet_peer_ni_decref_locked(lpni);
3085                 }
3086         }
3087
3088         for (i = 0; i < ndelnis; i++) {
3089                 /*
3090                  * for routers it's okay to delete the primary_nid because
3091                  * the upper layers don't really rely on it. So if we're
3092                  * being told that the router changed its primary_nid
3093                  * then it's okay to delete it.
3094                  */
3095                 if (lp->lp_rtr_refcount > 0)
3096                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3097                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
3098                 if (rc) {
3099                         CERROR("Error deleting NID %s from peer %s: %d\n",
3100                                libcfs_nid2str(delnis[i]),
3101                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3102                         if (rc == -ENOMEM)
3103                                 goto out;
3104                 }
3105         }
3106
3107         /* The peer net for the primary NID should be the first entry in the
3108          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3109          * be the first entry in its peer net's lpn_peer_nis list.
3110          */
3111         lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
3112         if (!lpni) {
3113                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3114                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3115                 goto out;
3116         }
3117
3118         lnet_peer_ni_decref_locked(lpni);
3119
3120         lpn = lpni->lpni_peer_net;
3121         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3122                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3123
3124         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3125                 list_move(&lpni->lpni_peer_nis,
3126                           &lpni->lpni_peer_net->lpn_peer_nis);
3127
3128         /*
3129          * Errors other than -ENOMEM are due to peers having been
3130          * configured with DLC. Ignore these because DLC overrides
3131          * Discovery.
3132          */
3133         rc = 0;
3134 out:
3135         CFS_FREE_PTR_ARRAY(curnis, nnis);
3136         CFS_FREE_PTR_ARRAY(addnis, nnis);
3137         CFS_FREE_PTR_ARRAY(delnis, nnis);
3138         lnet_ping_buffer_decref(pbuf);
3139         CDEBUG(D_NET, "peer %s (%p): %d\n",
3140                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3141
3142         if (rc) {
3143                 spin_lock(&lp->lp_lock);
3144                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3145                 lp->lp_state |= LNET_PEER_FORCE_PING;
3146                 spin_unlock(&lp->lp_lock);
3147         }
3148         return rc;
3149 }
3150
3151 /*
3152  * The data in pbuf says lp is its primary peer, but the data was
3153  * received by a different peer. Try to update lp with the data.
3154  */
3155 static int
3156 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3157 {
3158         struct lnet_handle_md mdh;
3159
3160         /* Queue lp for discovery, and force it on the request queue. */
3161         lnet_net_lock(LNET_LOCK_EX);
3162         if (lnet_peer_queue_for_discovery(lp))
3163                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3164         lnet_net_unlock(LNET_LOCK_EX);
3165
3166         LNetInvalidateMDHandle(&mdh);
3167
3168         /*
3169          * Decide whether we can move the peer to the DATA_PRESENT state.
3170          *
3171          * We replace stale data for a multi-rail peer, repair PING_FAILED
3172          * status, and preempt FORCE_PING.
3173          *
3174          * If after that we have DATA_PRESENT, we merge it into this peer.
3175          */
3176         spin_lock(&lp->lp_lock);
3177         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3178                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3179                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3180                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3181                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3182                         lnet_ping_buffer_decref(pbuf);
3183                         pbuf = lp->lp_data;
3184                         lp->lp_data = NULL;
3185                 }
3186         }
3187         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3188                 lnet_ping_buffer_decref(lp->lp_data);
3189                 lp->lp_data = NULL;
3190                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3191         }
3192         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3193                 mdh = lp->lp_ping_mdh;
3194                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3195                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3196                 lp->lp_ping_error = 0;
3197         }
3198         if (lp->lp_state & LNET_PEER_FORCE_PING)
3199                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3200         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3201         spin_unlock(&lp->lp_lock);
3202
3203         if (!LNetMDHandleIsInvalid(mdh))
3204                 LNetMDUnlink(mdh);
3205
3206         if (pbuf)
3207                 return lnet_peer_merge_data(lp, pbuf);
3208
3209         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3210         return 0;
3211 }
3212
3213 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3214 {
3215         int i;
3216
3217         for (i = 0; i < pinfo->pi_nnis; i++) {
3218                 if (pinfo->pi_ni[i].ns_nid == nid)
3219                         return true;
3220         }
3221
3222         return false;
3223 }
3224
3225 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3226  * to the discovery queue a reference was taken that will prevent the peer from
3227  * actually being freed by this function. After this function exits the
3228  * discovery thread should call lnet_peer_discovery_complete() which will
3229  * drop that reference as well as wake any waiters that may also be holding a
3230  * ref on the peer
3231  */
3232 static int lnet_peer_deletion(struct lnet_peer *lp)
3233 __must_hold(&lp->lp_lock)
3234 {
3235         struct list_head rlist;
3236         struct lnet_route *route, *tmp;
3237         int sensitivity = lp->lp_health_sensitivity;
3238         int rc;
3239
3240         INIT_LIST_HEAD(&rlist);
3241
3242         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3243                           LNET_PEER_FORCE_PUSH);
3244         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3245                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3246
3247         /* no-op if lnet_peer_del() has already been called on this peer */
3248         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3249                 return 0;
3250
3251         spin_unlock(&lp->lp_lock);
3252
3253         mutex_lock(&the_lnet.ln_api_mutex);
3254         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3255             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3256                 mutex_unlock(&the_lnet.ln_api_mutex);
3257                 spin_lock(&lp->lp_lock);
3258                 return -ESHUTDOWN;
3259         }
3260
3261         lnet_net_lock(LNET_LOCK_EX);
3262         /* remove the peer from the discovery work
3263          * queue if it's on there in preparation
3264          * of deleting it.
3265          */
3266         if (!list_empty(&lp->lp_dc_list))
3267                 list_del_init(&lp->lp_dc_list);
3268         list_for_each_entry_safe(route, tmp,
3269                                  &lp->lp_routes,
3270                                  lr_gwlist)
3271                 lnet_move_route(route, NULL, &rlist);
3272         lnet_net_unlock(LNET_LOCK_EX);
3273
3274         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3275         rc = lnet_peer_del(lp);
3276         if (rc)
3277                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3278                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3279
3280         list_for_each_entry_safe(route, tmp,
3281                                  &rlist, lr_list) {
3282                 /* re-add these routes */
3283                 lnet_add_route(route->lr_net,
3284                                route->lr_hops,
3285                                &route->lr_nid,
3286                                route->lr_priority,
3287                                sensitivity);
3288                 LIBCFS_FREE(route, sizeof(*route));
3289         }
3290
3291         mutex_unlock(&the_lnet.ln_api_mutex);
3292
3293         spin_lock(&lp->lp_lock);
3294
3295         return 0;
3296 }
3297
3298 /*
3299  * Update a peer using the data received.
3300  */
3301 static int lnet_peer_data_present(struct lnet_peer *lp)
3302 __must_hold(&lp->lp_lock)
3303 {
3304         struct lnet_ping_buffer *pbuf;
3305         struct lnet_peer_ni *lpni;
3306         lnet_nid_t nid = LNET_NID_ANY;
3307         unsigned flags;
3308         int rc = 0;
3309
3310         pbuf = lp->lp_data;
3311         lp->lp_data = NULL;
3312         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3313         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3314         spin_unlock(&lp->lp_lock);
3315
3316         /*
3317          * Modifications of peer structures are done while holding the
3318          * ln_api_mutex. A global lock is required because we may be
3319          * modifying multiple peer structures, and a mutex greatly
3320          * simplifies memory management.
3321          *
3322          * The actual changes to the data structures must also protect
3323          * against concurrent lookups, for which the lnet_net_lock in
3324          * LNET_LOCK_EX mode is used.
3325          */
3326         mutex_lock(&the_lnet.ln_api_mutex);
3327         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3328                 rc = -ESHUTDOWN;
3329                 goto out;
3330         }
3331
3332         /*
3333          * If this peer is not on the peer list then it is being torn
3334          * down, and our reference count may be all that is keeping it
3335          * alive. Don't do any work on it.
3336          */
3337         if (list_empty(&lp->lp_peer_list)) {
3338                 lnet_ping_buffer_decref(pbuf);
3339                 goto out;
3340         }
3341
3342         flags = LNET_PEER_DISCOVERED;
3343         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3344                 flags |= LNET_PEER_MULTI_RAIL;
3345
3346         /*
3347          * Check whether the primary NID in the message matches the
3348          * primary NID of the peer. If it does, update the peer, if
3349          * it it does not, check whether there is already a peer with
3350          * that primary NID. If no such peer exists, try to update
3351          * the primary NID of the current peer (allowed if it was
3352          * created due to message traffic) and complete the update.
3353          * If the peer did exist, hand off the data to it.
3354          *
3355          * The peer for the loopback interface is a special case: this
3356          * is the peer for the local node, and we want to set its
3357          * primary NID to the correct value here. Moreover, this peer
3358          * can show up with only the loopback NID in the ping buffer.
3359          */
3360         if (pbuf->pb_info.pi_nnis <= 1) {
3361                 lnet_ping_buffer_decref(pbuf);
3362                 goto out;
3363         }
3364         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3365         if (nid_is_lo0(&lp->lp_primary_nid)) {
3366                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3367                 if (rc)
3368                         lnet_ping_buffer_decref(pbuf);
3369                 else
3370                         rc = lnet_peer_merge_data(lp, pbuf);
3371         /*
3372          * if the primary nid of the peer is present in the ping info returned
3373          * from the peer, but it's not the local primary peer we have
3374          * cached and discovery is disabled, then we don't want to update
3375          * our local peer info, by adding or removing NIDs, we just want
3376          * to update the status of the nids that we currently have
3377          * recorded in that peer.
3378          */
3379         } else if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid ||
3380                    (lnet_is_nid_in_ping_info(lnet_nid_to_nid4(&lp->lp_primary_nid),
3381                                              &pbuf->pb_info) &&
3382                     lnet_is_discovery_disabled(lp))) {
3383                 rc = lnet_peer_merge_data(lp, pbuf);
3384         } else {
3385                 lpni = lnet_find_peer_ni_locked(nid);
3386                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3387                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3388                         if (rc) {
3389                                 CERROR("Primary NID error %s versus %s: %d\n",
3390                                        libcfs_nidstr(&lp->lp_primary_nid),
3391                                        libcfs_nid2str(nid), rc);
3392                                 lnet_ping_buffer_decref(pbuf);
3393                         } else {
3394                                 rc = lnet_peer_merge_data(lp, pbuf);
3395                         }
3396                         if (lpni)
3397                                 lnet_peer_ni_decref_locked(lpni);
3398                 } else {
3399                         struct lnet_peer *new_lp;
3400                         new_lp = lpni->lpni_peer_net->lpn_peer;
3401                         /*
3402                          * if lp has discovery/MR enabled that means new_lp
3403                          * should have discovery/MR enabled as well, since
3404                          * it's the same peer, which we're about to merge
3405                          */
3406                         spin_lock(&lp->lp_lock);
3407                         spin_lock(&new_lp->lp_lock);
3408                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3409                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3410                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3411                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3412                         /* If we're processing a ping reply then we may be
3413                          * about to send a push to the peer that we ping'd.
3414                          * Since the ping reply that we're processing was
3415                          * received by lp, we need to set the discovery source
3416                          * NID for new_lp to the NID stored in lp.
3417                          */
3418                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3419                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3420                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3421                         }
3422                         spin_unlock(&new_lp->lp_lock);
3423                         spin_unlock(&lp->lp_lock);
3424
3425                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3426                         lnet_consolidate_routes_locked(lp, new_lp);
3427                         lnet_peer_ni_decref_locked(lpni);
3428                 }
3429         }
3430 out:
3431         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3432                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3433                lp->lp_state);
3434         mutex_unlock(&the_lnet.ln_api_mutex);
3435
3436         spin_lock(&lp->lp_lock);
3437         /* Tell discovery to re-check the peer immediately. */
3438         if (!rc)
3439                 rc = LNET_REDISCOVER_PEER;
3440         return rc;
3441 }
3442
3443 /*
3444  * A ping failed. Clear the PING_FAILED state and set the
3445  * FORCE_PING state, to ensure a retry even if discovery is
3446  * disabled. This avoids being left with incorrect state.
3447  */
3448 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3449 __must_hold(&lp->lp_lock)
3450 {
3451         struct lnet_handle_md mdh;
3452         int rc;
3453
3454         mdh = lp->lp_ping_mdh;
3455         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3456         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3457         lp->lp_state |= LNET_PEER_FORCE_PING;
3458         rc = lp->lp_ping_error;
3459         lp->lp_ping_error = 0;
3460         spin_unlock(&lp->lp_lock);
3461
3462         if (!LNetMDHandleIsInvalid(mdh))
3463                 LNetMDUnlink(mdh);
3464
3465         CDEBUG(D_NET, "peer %s:%d\n",
3466                libcfs_nidstr(&lp->lp_primary_nid), rc);
3467
3468         spin_lock(&lp->lp_lock);
3469         return rc ? rc : LNET_REDISCOVER_PEER;
3470 }
3471
3472 /* Active side of ping. */
3473 static int lnet_peer_send_ping(struct lnet_peer *lp)
3474 __must_hold(&lp->lp_lock)
3475 {
3476         int nnis;
3477         int rc;
3478         int cpt;
3479
3480         lp->lp_state |= LNET_PEER_PING_SENT;
3481         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3482         spin_unlock(&lp->lp_lock);
3483
3484         cpt = lnet_net_lock_current();
3485         /* Refcount for MD. */
3486         lnet_peer_addref_locked(lp);
3487         lnet_net_unlock(cpt);
3488
3489         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3490
3491         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
3492                             the_lnet.ln_dc_handler, false);
3493
3494         /*
3495          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3496          * refcount on the peer, otherwise LNetMDUnlink will be called
3497          * which will eventually do that.
3498          */
3499         if (rc > 0) {
3500                 lnet_net_lock(cpt);
3501                 lnet_peer_decref_locked(lp);
3502                 lnet_net_unlock(cpt);
3503                 rc = -rc; /* change the rc to negative value */
3504                 goto fail_error;
3505         } else if (rc < 0) {
3506                 goto fail_error;
3507         }
3508
3509         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3510
3511         spin_lock(&lp->lp_lock);
3512         return 0;
3513
3514 fail_error:
3515         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3516         /*
3517          * The errors that get us here are considered hard errors and
3518          * cause Discovery to terminate. So we clear PING_SENT, but do
3519          * not set either PING_FAILED or FORCE_PING. In fact we need
3520          * to clear PING_FAILED, because the unlink event handler will
3521          * have set it if we called LNetMDUnlink() above.
3522          */
3523         spin_lock(&lp->lp_lock);
3524         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3525         return rc;
3526 }
3527
3528 /*
3529  * This function exists because you cannot call LNetMDUnlink() from an
3530  * event handler.
3531  */
3532 static int lnet_peer_push_failed(struct lnet_peer *lp)
3533 __must_hold(&lp->lp_lock)
3534 {
3535         struct lnet_handle_md mdh;
3536         int rc;
3537
3538         mdh = lp->lp_push_mdh;
3539         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3540         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3541         rc = lp->lp_push_error;
3542         lp->lp_push_error = 0;
3543         spin_unlock(&lp->lp_lock);
3544
3545         if (!LNetMDHandleIsInvalid(mdh))
3546                 LNetMDUnlink(mdh);
3547
3548         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3549         spin_lock(&lp->lp_lock);
3550         return rc ? rc : LNET_REDISCOVER_PEER;
3551 }
3552
3553 /*
3554  * Mark the peer as discovered.
3555  */
3556 static int lnet_peer_discovered(struct lnet_peer *lp)
3557 __must_hold(&lp->lp_lock)
3558 {
3559         lp->lp_state |= LNET_PEER_DISCOVERED;
3560         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3561                           LNET_PEER_REDISCOVER);
3562
3563         lp->lp_dc_error = 0;
3564
3565         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3566
3567         return 0;
3568 }
3569
3570 /* Active side of push. */
3571 static int lnet_peer_send_push(struct lnet_peer *lp)
3572 __must_hold(&lp->lp_lock)
3573 {
3574         struct lnet_ping_buffer *pbuf;
3575         struct lnet_processid id;
3576         struct lnet_md md;
3577         int cpt;
3578         int rc;
3579
3580         /* Don't push to a non-multi-rail peer. */
3581         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3582                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3583                 /* if peer's NIDs are uptodate then peer is discovered */
3584                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3585                         rc = lnet_peer_discovered(lp);
3586                         return rc;
3587                 }
3588
3589                 return 0;
3590         }
3591
3592         lp->lp_state |= LNET_PEER_PUSH_SENT;
3593         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3594         spin_unlock(&lp->lp_lock);
3595
3596         cpt = lnet_net_lock_current();
3597         pbuf = the_lnet.ln_ping_target;
3598         lnet_ping_buffer_addref(pbuf);
3599         lnet_net_unlock(cpt);
3600
3601         /* Push source MD */
3602         md.start     = &pbuf->pb_info;
3603         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3604         md.threshold = 2; /* Put/Ack */
3605         md.max_size  = 0;
3606         md.options   = LNET_MD_TRACK_RESPONSE;
3607         md.handler   = the_lnet.ln_dc_handler;
3608         md.user_ptr  = lp;
3609
3610         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3611         if (rc) {
3612                 lnet_ping_buffer_decref(pbuf);
3613                 CERROR("Can't bind push source MD: %d\n", rc);
3614                 goto fail_error;
3615         }
3616
3617         cpt = lnet_net_lock_current();
3618         /* Refcount for MD. */
3619         lnet_peer_addref_locked(lp);
3620         id.pid = LNET_PID_LUSTRE;
3621         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3622                 id.nid = lp->lp_disc_dst_nid;
3623         else
3624                 id.nid = lp->lp_primary_nid;
3625         lnet_net_unlock(cpt);
3626
3627         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3628                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3629                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3630
3631         /*
3632          * reset the discovery nid. There is no need to restrict sending
3633          * from that source, if we call lnet_push_update_to_peers(). It'll
3634          * get set to a specific NID, if we initiate discovery from the
3635          * scratch
3636          */
3637         lp->lp_disc_src_nid = LNET_ANY_NID;
3638         lp->lp_disc_dst_nid = LNET_ANY_NID;
3639
3640         if (rc)
3641                 goto fail_unlink;
3642
3643         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3644
3645         spin_lock(&lp->lp_lock);
3646         return 0;
3647
3648 fail_unlink:
3649         LNetMDUnlink(lp->lp_push_mdh);
3650         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3651 fail_error:
3652         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3653                lp, rc);
3654         /*
3655          * The errors that get us here are considered hard errors and
3656          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3657          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3658          * because the unlink event handler will have set it if we
3659          * called LNetMDUnlink() above.
3660          */
3661         spin_lock(&lp->lp_lock);
3662         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3663         return rc;
3664 }
3665
3666 /*
3667  * Wait for work to be queued or some other change that must be
3668  * attended to. Returns non-zero if the discovery thread should shut
3669  * down.
3670  */
3671 static int lnet_peer_discovery_wait_for_work(void)
3672 {
3673         int cpt;
3674         int rc = 0;
3675
3676         DEFINE_WAIT(wait);
3677
3678         cpt = lnet_net_lock_current();
3679         for (;;) {
3680                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3681                                 TASK_INTERRUPTIBLE);
3682                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3683                         break;
3684                 if (lnet_push_target_resize_needed() ||
3685                     the_lnet.ln_push_target->pb_needs_post)
3686                         break;
3687                 if (!list_empty(&the_lnet.ln_dc_request))
3688                         break;
3689                 if (!list_empty(&the_lnet.ln_msg_resend))
3690                         break;
3691                 lnet_net_unlock(cpt);
3692
3693                 /*
3694                  * wakeup max every second to check if there are peers that
3695                  * have been stuck on the working queue for greater than
3696                  * the peer timeout.
3697                  */
3698                 schedule_timeout(cfs_time_seconds(1));
3699                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3700                 cpt = lnet_net_lock_current();
3701         }
3702         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3703
3704         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3705                 rc = -ESHUTDOWN;
3706
3707         lnet_net_unlock(cpt);
3708
3709         CDEBUG(D_NET, "woken: %d\n", rc);
3710
3711         return rc;
3712 }
3713
3714 /*
3715  * Messages that were pending on a destroyed peer will be put on a global
3716  * resend list. The message resend list will be checked by
3717  * the discovery thread when it wakes up, and will resend messages. These
3718  * messages can still be sendable in the case the lpni which was the initial
3719  * cause of the message re-queue was transfered to another peer.
3720  *
3721  * It is possible that LNet could be shutdown while we're iterating
3722  * through the list. lnet_shudown_lndnets() will attempt to access the
3723  * resend list, but will have to wait until the spinlock is released, by
3724  * which time there shouldn't be any more messages on the resend list.
3725  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3726  * for the messages so they can be released. The other case is that
3727  * lnet_shudown_lndnets() can finalize all the messages before this
3728  * function can visit the resend list, in which case this function will be
3729  * a no-op.
3730  */
3731 static void lnet_resend_msgs(void)
3732 {
3733         struct lnet_msg *msg, *tmp;
3734         LIST_HEAD(resend);
3735         int rc;
3736
3737         spin_lock(&the_lnet.ln_msg_resend_lock);
3738         list_splice(&the_lnet.ln_msg_resend, &resend);
3739         spin_unlock(&the_lnet.ln_msg_resend_lock);
3740
3741         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3742                 list_del_init(&msg->msg_list);
3743                 rc = lnet_send(&msg->msg_src_nid_param, msg,
3744                                &msg->msg_rtr_nid_param);
3745                 if (rc < 0) {
3746                         CNETERR("Error sending %s to %s: %d\n",
3747                                lnet_msgtyp2str(msg->msg_type),
3748                                libcfs_idstr(&msg->msg_target), rc);
3749                         lnet_finalize(msg, rc);
3750                 }
3751         }
3752 }
3753
3754 /* The discovery thread. */
3755 static int lnet_peer_discovery(void *arg)
3756 {
3757         struct lnet_peer *lp;
3758         int rc;
3759
3760         wait_for_completion(&the_lnet.ln_started);
3761
3762         CDEBUG(D_NET, "started\n");
3763
3764         for (;;) {
3765                 if (lnet_peer_discovery_wait_for_work())
3766                         break;
3767
3768                 if (lnet_push_target_resize_needed())
3769                         lnet_push_target_resize();
3770                 else if (the_lnet.ln_push_target->pb_needs_post)
3771                         lnet_push_target_post(the_lnet.ln_push_target,
3772                                               &the_lnet.ln_push_target_md);
3773
3774                 lnet_resend_msgs();
3775
3776                 lnet_net_lock(LNET_LOCK_EX);
3777                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3778                         lnet_net_unlock(LNET_LOCK_EX);
3779                         break;
3780                 }
3781
3782                 /*
3783                  * Process all incoming discovery work requests.  When
3784                  * discovery must wait on a peer to change state, it
3785                  * is added to the tail of the ln_dc_working queue. A
3786                  * timestamp keeps track of when the peer was added,
3787                  * so we can time out discovery requests that take too
3788                  * long.
3789                  */
3790                 while (!list_empty(&the_lnet.ln_dc_request)) {
3791                         lp = list_first_entry(&the_lnet.ln_dc_request,
3792                                               struct lnet_peer, lp_dc_list);
3793                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3794                         /*
3795                          * set the time the peer was put on the dc_working
3796                          * queue. It shouldn't remain on the queue
3797                          * forever, in case the GET message (for ping)
3798                          * doesn't get a REPLY or the PUT message (for
3799                          * push) doesn't get an ACK.
3800                          */
3801                         lp->lp_last_queued = ktime_get_real_seconds();
3802                         lnet_net_unlock(LNET_LOCK_EX);
3803
3804                         if (lnet_push_target_resize_needed())
3805                                 lnet_push_target_resize();
3806                         else if (the_lnet.ln_push_target->pb_needs_post)
3807                                 lnet_push_target_post(the_lnet.ln_push_target,
3808                                                       &the_lnet.ln_push_target_md);
3809
3810                         /*
3811                          * Select an action depending on the state of
3812                          * the peer and whether discovery is disabled.
3813                          * The check whether discovery is disabled is
3814                          * done after the code that handles processing
3815                          * for arrived data, cleanup for failures, and
3816                          * forcing a Ping or Push.
3817                          */
3818                         spin_lock(&lp->lp_lock);
3819                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3820                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3821                                 lp->lp_state);
3822                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3823                                             LNET_PEER_MARK_DELETED))
3824                                 rc = lnet_peer_deletion(lp);
3825                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3826                                 rc = lnet_peer_data_present(lp);
3827                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3828                                 rc = lnet_peer_ping_failed(lp);
3829                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3830                                 rc = lnet_peer_push_failed(lp);
3831                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3832                                 rc = lnet_peer_send_ping(lp);
3833                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3834                                 rc = lnet_peer_send_push(lp);
3835                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3836                                 rc = lnet_peer_send_ping(lp);
3837                         else if (lnet_peer_needs_push(lp))
3838                                 rc = lnet_peer_send_push(lp);
3839                         else
3840                                 rc = lnet_peer_discovered(lp);
3841                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3842                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3843                                 lp->lp_state, rc);
3844
3845                         if (rc == LNET_REDISCOVER_PEER) {
3846                                 spin_unlock(&lp->lp_lock);
3847                                 lnet_net_lock(LNET_LOCK_EX);
3848                                 list_move(&lp->lp_dc_list,
3849                                           &the_lnet.ln_dc_request);
3850                         } else if (rc ||
3851                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
3852                                 spin_unlock(&lp->lp_lock);
3853                                 lnet_net_lock(LNET_LOCK_EX);
3854                                 lnet_peer_discovery_complete(lp, rc);
3855                         } else {
3856                                 spin_unlock(&lp->lp_lock);
3857                                 lnet_net_lock(LNET_LOCK_EX);
3858                         }
3859
3860                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3861                                 break;
3862
3863                 }
3864
3865                 lnet_net_unlock(LNET_LOCK_EX);
3866         }
3867
3868         CDEBUG(D_NET, "stopping\n");
3869         /*
3870          * Clean up before telling lnet_peer_discovery_stop() that
3871          * we're done. Use wake_up() below to somewhat reduce the
3872          * size of the thundering herd if there are multiple threads
3873          * waiting on discovery of a single peer.
3874          */
3875
3876         /* Queue cleanup 1: stop all pending pings and pushes. */
3877         lnet_net_lock(LNET_LOCK_EX);
3878         while (!list_empty(&the_lnet.ln_dc_working)) {
3879                 lp = list_first_entry(&the_lnet.ln_dc_working,
3880                                       struct lnet_peer, lp_dc_list);
3881                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3882                 lnet_net_unlock(LNET_LOCK_EX);
3883                 lnet_peer_cancel_discovery(lp);
3884                 lnet_net_lock(LNET_LOCK_EX);
3885         }
3886         lnet_net_unlock(LNET_LOCK_EX);
3887
3888         /* Queue cleanup 2: wait for the expired queue to clear. */
3889         while (!list_empty(&the_lnet.ln_dc_expired))
3890                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3891
3892         /* Queue cleanup 3: clear the request queue. */
3893         lnet_net_lock(LNET_LOCK_EX);
3894         while (!list_empty(&the_lnet.ln_dc_request)) {
3895                 lp = list_first_entry(&the_lnet.ln_dc_request,
3896                                       struct lnet_peer, lp_dc_list);
3897                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
3898         }
3899         lnet_net_unlock(LNET_LOCK_EX);
3900
3901         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3902         the_lnet.ln_dc_handler = NULL;
3903
3904         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3905         wake_up(&the_lnet.ln_dc_waitq);
3906
3907         CDEBUG(D_NET, "stopped\n");
3908
3909         return 0;
3910 }
3911
3912 /* ln_api_mutex is held on entry. */
3913 int lnet_peer_discovery_start(void)
3914 {
3915         struct task_struct *task;
3916         int rc = 0;
3917
3918         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3919                 return -EALREADY;
3920
3921         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3922         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3923         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3924         if (IS_ERR(task)) {
3925                 rc = PTR_ERR(task);
3926                 CERROR("Can't start peer discovery thread: %d\n", rc);
3927
3928                 the_lnet.ln_dc_handler = NULL;
3929
3930                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3931         }
3932
3933         CDEBUG(D_NET, "discovery start: %d\n", rc);
3934
3935         return rc;
3936 }
3937
3938 /* ln_api_mutex is held on entry. */
3939 void lnet_peer_discovery_stop(void)
3940 {
3941         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3942                 return;
3943
3944         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3945         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3946
3947         /* In the LNetNIInit() path we may be stopping discovery before it
3948          * entered its work loop
3949          */
3950         if (!completion_done(&the_lnet.ln_started))
3951                 complete(&the_lnet.ln_started);
3952         else
3953                 wake_up(&the_lnet.ln_dc_waitq);
3954
3955         mutex_unlock(&the_lnet.ln_api_mutex);
3956         wait_event(the_lnet.ln_dc_waitq,
3957                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3958         mutex_lock(&the_lnet.ln_api_mutex);
3959
3960         LASSERT(list_empty(&the_lnet.ln_dc_request));
3961         LASSERT(list_empty(&the_lnet.ln_dc_working));
3962         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3963
3964         CDEBUG(D_NET, "discovery stopped\n");
3965 }
3966
3967 /* Debugging */
3968
3969 void
3970 lnet_debug_peer(lnet_nid_t nid)
3971 {
3972         char                    *aliveness = "NA";
3973         struct lnet_peer_ni     *lp;
3974         int                     cpt;
3975
3976         cpt = lnet_cpt_of_nid(nid, NULL);
3977         lnet_net_lock(cpt);
3978
3979         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3980         if (IS_ERR(lp)) {
3981                 lnet_net_unlock(cpt);
3982                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3983                 return;
3984         }
3985
3986         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3987                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3988
3989         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3990                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
3991                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3992                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3993                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3994
3995         lnet_peer_ni_decref_locked(lp);
3996
3997         lnet_net_unlock(cpt);
3998 }
3999
4000 /* Gathering information for userspace. */
4001
4002 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4003                           char aliveness[LNET_MAX_STR_LEN],
4004                           __u32 *cpt_iter, __u32 *refcount,
4005                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4006                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4007                           __u32 *peer_tx_qnob)
4008 {
4009         struct lnet_peer_table          *peer_table;
4010         struct lnet_peer_ni             *lp;
4011         int                             j;
4012         int                             lncpt;
4013         bool                            found = false;
4014
4015         /* get the number of CPTs */
4016         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4017
4018         /* if the cpt number to be examined is >= the number of cpts in
4019          * the system then indicate that there are no more cpts to examin
4020          */
4021         if (*cpt_iter >= lncpt)
4022                 return -ENOENT;
4023
4024         /* get the current table */
4025         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4026         /* if the ptable is NULL then there are no more cpts to examine */
4027         if (peer_table == NULL)
4028                 return -ENOENT;
4029
4030         lnet_net_lock(*cpt_iter);
4031
4032         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4033                 struct list_head *peers = &peer_table->pt_hash[j];
4034
4035                 list_for_each_entry(lp, peers, lpni_hashlist) {
4036                         if (!nid_is_nid4(&lp->lpni_nid))
4037                                 continue;
4038                         if (peer_index-- > 0)
4039                                 continue;
4040
4041                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4042                         if (lnet_isrouter(lp) ||
4043                                 lnet_peer_aliveness_enabled(lp))
4044                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4045                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4046
4047                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4048                         *refcount = kref_read(&lp->lpni_kref);
4049                         *ni_peer_tx_credits =
4050                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4051                         *peer_tx_credits = lp->lpni_txcredits;
4052                         *peer_rtr_credits = lp->lpni_rtrcredits;
4053                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4054                         *peer_tx_qnob = lp->lpni_txqnob;
4055
4056                         found = true;
4057                 }
4058
4059         }
4060         lnet_net_unlock(*cpt_iter);
4061
4062         *cpt_iter = lncpt;
4063
4064         return found ? 0 : -ENOENT;
4065 }
4066
4067 /* ln_api_mutex is held, which keeps the peer list stable */
4068 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4069 {
4070         struct lnet_ioctl_element_stats *lpni_stats;
4071         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4072         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4073         struct lnet_peer_ni_credit_info *lpni_info;
4074         struct lnet_peer_ni *lpni;
4075         struct lnet_peer *lp;
4076         lnet_nid_t nid;
4077         __u32 size;
4078         int rc;
4079
4080         lp = lnet_find_peer4(cfg->prcfg_prim_nid);
4081
4082         if (!lp) {
4083                 rc = -ENOENT;
4084                 goto out;
4085         }
4086
4087         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4088                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4089         size *= lp->lp_nnis;
4090         if (size > cfg->prcfg_size) {
4091                 cfg->prcfg_size = size;
4092                 rc = -E2BIG;
4093                 goto out_lp_decref;
4094         }
4095
4096         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4097         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4098         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4099         cfg->prcfg_count = lp->lp_nnis;
4100         cfg->prcfg_size = size;
4101         cfg->prcfg_state = lp->lp_state;
4102
4103         /* Allocate helper buffers. */
4104         rc = -ENOMEM;
4105         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4106         if (!lpni_info)
4107                 goto out_lp_decref;
4108         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4109         if (!lpni_stats)
4110                 goto out_free_info;
4111         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4112         if (!lpni_msg_stats)
4113                 goto out_free_stats;
4114         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4115         if (!lpni_hstats)
4116                 goto out_free_msg_stats;
4117
4118
4119         lpni = NULL;
4120         rc = -EFAULT;
4121         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4122                 if (!nid_is_nid4(&lpni->lpni_nid))
4123                         continue;
4124                 nid = lnet_nid_to_nid4(&lpni->lpni_nid);
4125                 if (copy_to_user(bulk, &nid, sizeof(nid)))
4126                         goto out_free_hstats;
4127                 bulk += sizeof(nid);
4128
4129                 memset(lpni_info, 0, sizeof(*lpni_info));
4130                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4131                 if (lnet_isrouter(lpni) ||
4132                         lnet_peer_aliveness_enabled(lpni))
4133                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4134                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4135
4136                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4137                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4138                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4139                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4140                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4141                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4142                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4143                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4144                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4145                         goto out_free_hstats;
4146                 bulk += sizeof(*lpni_info);
4147
4148                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4149                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4150                                                             LNET_STATS_TYPE_SEND);
4151                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4152                                                             LNET_STATS_TYPE_RECV);
4153                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4154                                                             LNET_STATS_TYPE_DROP);
4155                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4156                         goto out_free_hstats;
4157                 bulk += sizeof(*lpni_stats);
4158                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4159                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4160                         goto out_free_hstats;
4161                 bulk += sizeof(*lpni_msg_stats);
4162                 lpni_hstats->hlpni_network_timeout =
4163                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4164                 lpni_hstats->hlpni_remote_dropped =
4165                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4166                 lpni_hstats->hlpni_remote_timeout =
4167                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4168                 lpni_hstats->hlpni_remote_error =
4169                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4170                 lpni_hstats->hlpni_health_value =
4171                   atomic_read(&lpni->lpni_healthv);
4172                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4173                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4174                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4175                         goto out_free_hstats;
4176                 bulk += sizeof(*lpni_hstats);
4177         }
4178         rc = 0;
4179
4180 out_free_hstats:
4181         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4182 out_free_msg_stats:
4183         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4184 out_free_stats:
4185         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4186 out_free_info:
4187         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4188 out_lp_decref:
4189         lnet_peer_decref_locked(lp);
4190 out:
4191         return rc;
4192 }
4193
4194 /* must hold net_lock/0 */
4195 void
4196 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4197                                      struct list_head *recovery_queue,
4198                                      time64_t now)
4199 {
4200         /* the mt could've shutdown and cleaned up the queues */
4201         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4202                 return;
4203
4204         if (!list_empty(&lpni->lpni_recovery))
4205                 return;
4206
4207         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4208                 return;
4209
4210         if (!lpni->lpni_last_alive) {
4211                 CDEBUG(D_NET,
4212                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4213                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4214                        lpni->lpni_last_alive);
4215                 return;
4216         }
4217
4218         if (lnet_recovery_limit &&
4219             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4220                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4221                        libcfs_nidstr(&lpni->lpni_nid),
4222                        lpni->lpni_last_alive);
4223                 /* Reset the ping count so that if this peer NI is added back to
4224                  * the recovery queue we will send the first ping right away.
4225                  */
4226                 lpni->lpni_ping_count = 0;
4227                 return;
4228         }
4229
4230         /* This peer NI is going on the recovery queue, so take a ref on it */
4231         lnet_peer_ni_addref_locked(lpni);
4232
4233         lnet_peer_ni_set_next_ping(lpni, now);
4234
4235         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4236                libcfs_nidstr(&lpni->lpni_nid),
4237                lpni->lpni_ping_count,
4238                lpni->lpni_next_ping,
4239                lpni->lpni_last_alive,
4240                atomic_read(&lpni->lpni_healthv));
4241
4242         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4243 }
4244
4245 /* Call with the ln_api_mutex held */
4246 void
4247 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4248 {
4249         struct lnet_peer_table *ptable;
4250         struct lnet_peer *lp;
4251         struct lnet_peer_net *lpn;
4252         struct lnet_peer_ni *lpni;
4253         int lncpt;
4254         int cpt;
4255         time64_t now;
4256
4257         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4258                 return;
4259
4260         now = ktime_get_seconds();
4261
4262         if (!all) {
4263                 lnet_net_lock(LNET_LOCK_EX);
4264                 lpni = lnet_find_peer_ni_locked(nid);
4265                 if (!lpni) {
4266                         lnet_net_unlock(LNET_LOCK_EX);
4267                         return;
4268                 }
4269                 lnet_set_lpni_healthv_locked(lpni, value);
4270                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4271                                              &the_lnet.ln_mt_peerNIRecovq, now);
4272                 lnet_peer_ni_decref_locked(lpni);
4273                 lnet_net_unlock(LNET_LOCK_EX);
4274                 return;
4275         }
4276
4277         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4278
4279         /*
4280          * Walk all the peers and reset the health value for each one to the
4281          * specified value.
4282          */
4283         lnet_net_lock(LNET_LOCK_EX);
4284         for (cpt = 0; cpt < lncpt; cpt++) {
4285                 ptable = the_lnet.ln_peer_tables[cpt];
4286                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4287                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4288                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4289                                                     lpni_peer_nis) {
4290                                         lnet_set_lpni_healthv_locked(lpni,
4291                                                                      value);
4292                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4293                                              &the_lnet.ln_mt_peerNIRecovq, now);
4294                                 }
4295                         }
4296                 }
4297         }
4298         lnet_net_unlock(LNET_LOCK_EX);
4299 }
4300