Whamcloud - gitweb
LU-15102 lnet: Reset ni_ping_count only on receive
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(struct lnet_nid *nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = *nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NID_NET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(struct lnet_nid *nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = *nid;
265         lp->lp_disc_src_nid = LNET_ANY_NID;
266         lp->lp_disc_dst_nid = LNET_ANY_NID;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid_is_lo0(nid))
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nidstr(&lp->lp_primary_nid),
382                 libcfs_nidstr(&lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nidstr(&lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
461
462         spin_lock(&peer->lp_lock);
463         peer->lp_state |= LNET_PEER_MARK_DELETED;
464         spin_unlock(&peer->lp_lock);
465
466         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467         while (lpni != NULL) {
468                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469                 rc = lnet_peer_ni_del_locked(lpni, false);
470                 if (rc != 0)
471                         rc2 = rc;
472                 lpni = lpni2;
473         }
474
475         return rc2;
476 }
477
478 /*
479  * Discovering this peer is taking too long. Cancel any Ping or Push
480  * that discovery is waiting on by unlinking the relevant MDs. The
481  * lnet_discovery_event_handler() will proceed from here and complete
482  * the cleanup.
483  */
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
485 {
486         struct lnet_handle_md ping_mdh;
487         struct lnet_handle_md push_mdh;
488
489         LNetInvalidateMDHandle(&ping_mdh);
490         LNetInvalidateMDHandle(&push_mdh);
491
492         spin_lock(&lp->lp_lock);
493         if (lp->lp_state & LNET_PEER_PING_SENT) {
494                 ping_mdh = lp->lp_ping_mdh;
495                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
496         }
497         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498                 push_mdh = lp->lp_push_mdh;
499                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
500         }
501         spin_unlock(&lp->lp_lock);
502
503         if (!LNetMDHandleIsInvalid(ping_mdh))
504                 LNetMDUnlink(ping_mdh);
505         if (!LNetMDHandleIsInvalid(push_mdh))
506                 LNetMDUnlink(push_mdh);
507 }
508
509 static int
510 lnet_peer_del(struct lnet_peer *peer)
511 {
512         lnet_peer_cancel_discovery(peer);
513         lnet_net_lock(LNET_LOCK_EX);
514         lnet_peer_del_locked(peer);
515         lnet_net_unlock(LNET_LOCK_EX);
516
517         return 0;
518 }
519
520 /*
521  * Delete a NID from a peer. Call with ln_api_mutex held.
522  *
523  * Error codes:
524  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
525  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
526  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
527  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
528  */
529 static int
530 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
531 {
532         struct lnet_peer_ni *lpni;
533         struct lnet_nid primary_nid = lp->lp_primary_nid;
534         struct lnet_nid nid;
535         int rc = 0;
536         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
537
538         lnet_nid4_to_nid(nid4, &nid);
539         if (!(flags & LNET_PEER_CONFIGURED)) {
540                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
541                         rc = -EPERM;
542                         goto out;
543                 }
544         }
545
546         /* If we're asked to lock down the primary NID we shouldn't be
547          * deleting it
548          */
549         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
550             nid_same(&primary_nid, &nid)) {
551                 rc = -EPERM;
552                 goto out;
553         }
554
555         lpni = lnet_peer_ni_find_locked(&nid);
556         if (!lpni) {
557                 rc = -ENOENT;
558                 goto out;
559         }
560         lnet_peer_ni_decref_locked(lpni);
561         if (lp != lpni->lpni_peer_net->lpn_peer) {
562                 rc = -ECHILD;
563                 goto out;
564         }
565
566         /*
567          * This function only allows deletion of the primary NID if it
568          * is the only NID.
569          */
570         if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
571                 rc = -EBUSY;
572                 goto out;
573         }
574
575         lnet_net_lock(LNET_LOCK_EX);
576
577         if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
578                 struct lnet_peer_ni *lpni2;
579                 /* assign the next peer_ni to be the primary */
580                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
581                 LASSERT(lpni2);
582                 lp->lp_primary_nid = lpni2->lpni_nid;
583         }
584         rc = lnet_peer_ni_del_locked(lpni, force);
585
586         lnet_net_unlock(LNET_LOCK_EX);
587
588 out:
589         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
590                libcfs_nidstr(&primary_nid), libcfs_nidstr(&nid),
591                flags, rc);
592
593         return rc;
594 }
595
596 static void
597 lnet_peer_table_cleanup_locked(struct lnet_net *net,
598                                struct lnet_peer_table *ptable)
599 {
600         int                      i;
601         struct lnet_peer_ni     *next;
602         struct lnet_peer_ni     *lpni;
603         struct lnet_peer        *peer;
604
605         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
606                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
607                                          lpni_hashlist) {
608                         if (net != NULL && net != lpni->lpni_net)
609                                 continue;
610
611                         peer = lpni->lpni_peer_net->lpn_peer;
612                         if (!nid_same(&peer->lp_primary_nid,
613                                        &lpni->lpni_nid)) {
614                                 lnet_peer_ni_del_locked(lpni, false);
615                                 continue;
616                         }
617                         /*
618                          * Removing the primary NID implies removing
619                          * the entire peer. Advance next beyond any
620                          * peer_ni that belongs to the same peer.
621                          */
622                         list_for_each_entry_from(next, &ptable->pt_hash[i],
623                                                  lpni_hashlist) {
624                                 if (next->lpni_peer_net->lpn_peer != peer)
625                                         break;
626                         }
627                         lnet_peer_del_locked(peer);
628                 }
629         }
630 }
631
632 static void
633 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
634 {
635         wait_var_event_warning(&ptable->pt_zombies,
636                                ptable->pt_zombies == 0,
637                                "Waiting for %d zombies on peer table\n",
638                                ptable->pt_zombies);
639 }
640
641 static void
642 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
643                                 struct lnet_peer_table *ptable)
644 {
645         struct lnet_peer_ni     *lp;
646         struct lnet_peer_ni     *tmp;
647         lnet_nid_t              gw_nid;
648         int                     i;
649
650         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
651                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
652                                          lpni_hashlist) {
653                         if (net != lp->lpni_net)
654                                 continue;
655
656                         if (!lnet_isrouter(lp))
657                                 continue;
658
659                         /* FIXME handle large-addr nid */
660                         gw_nid = lnet_nid_to_nid4(
661                                 &lp->lpni_peer_net->lpn_peer->lp_primary_nid);
662
663                         lnet_net_unlock(LNET_LOCK_EX);
664                         lnet_del_route(LNET_NET_ANY, gw_nid);
665                         lnet_net_lock(LNET_LOCK_EX);
666                 }
667         }
668 }
669
670 void
671 lnet_peer_tables_cleanup(struct lnet_net *net)
672 {
673         int i;
674         struct lnet_peer_table *ptable;
675
676         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
677         /* If just deleting the peers for a NI, get rid of any routes these
678          * peers are gateways for. */
679         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
680                 lnet_net_lock(LNET_LOCK_EX);
681                 lnet_peer_table_del_rtrs_locked(net, ptable);
682                 lnet_net_unlock(LNET_LOCK_EX);
683         }
684
685         /* Start the cleanup process */
686         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
687                 lnet_net_lock(LNET_LOCK_EX);
688                 lnet_peer_table_cleanup_locked(net, ptable);
689                 lnet_net_unlock(LNET_LOCK_EX);
690         }
691
692         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
693                 lnet_peer_ni_finalize_wait(ptable);
694 }
695
696 static struct lnet_peer_ni *
697 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
698 {
699         struct list_head        *peers;
700         struct lnet_peer_ni     *lp;
701
702         if (the_lnet.ln_state != LNET_STATE_RUNNING)
703                 return NULL;
704
705         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
706         list_for_each_entry(lp, peers, lpni_hashlist) {
707                 if (nid_same(&lp->lpni_nid, nid)) {
708                         lnet_peer_ni_addref_locked(lp);
709                         return lp;
710                 }
711         }
712
713         return NULL;
714 }
715
716 struct lnet_peer_ni *
717 lnet_find_peer_ni_locked(lnet_nid_t nid4)
718 {
719         struct lnet_peer_ni *lpni;
720         struct lnet_peer_table *ptable;
721         int cpt;
722         struct lnet_nid nid;
723
724         lnet_nid4_to_nid(nid4, &nid);
725
726         cpt = lnet_nid_cpt_hash(&nid, LNET_CPT_NUMBER);
727
728         ptable = the_lnet.ln_peer_tables[cpt];
729         lpni = lnet_get_peer_ni_locked(ptable, &nid);
730
731         return lpni;
732 }
733
734 struct lnet_peer_ni *
735 lnet_peer_ni_find_locked(struct lnet_nid *nid)
736 {
737         struct lnet_peer_ni *lpni;
738         struct lnet_peer_table *ptable;
739         int cpt;
740
741         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
742
743         ptable = the_lnet.ln_peer_tables[cpt];
744         lpni = lnet_get_peer_ni_locked(ptable, nid);
745
746         return lpni;
747 }
748
749 struct lnet_peer_ni *
750 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
751 {
752         struct lnet_peer_net *lpn;
753         struct lnet_peer_ni *lpni;
754
755         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
756         if (!lpn)
757                 return NULL;
758
759         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
760                 if (lnet_nid_to_nid4(&lpni->lpni_nid) == nid)
761                         return lpni;
762         }
763
764         return NULL;
765 }
766
767 struct lnet_peer_ni *
768 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
769 {
770         struct lnet_peer_net *lpn;
771         struct lnet_peer_ni *lpni;
772
773         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
774         if (!lpn)
775                 return NULL;
776
777         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
778                 if (nid_same(&lpni->lpni_nid, nid))
779                         return lpni;
780         }
781
782         return NULL;
783 }
784
785 struct lnet_peer *
786 lnet_find_peer(lnet_nid_t nid)
787 {
788         struct lnet_peer_ni *lpni;
789         struct lnet_peer *lp = NULL;
790         int cpt;
791
792         cpt = lnet_net_lock_current();
793         lpni = lnet_find_peer_ni_locked(nid);
794         if (lpni) {
795                 lp = lpni->lpni_peer_net->lpn_peer;
796                 lnet_peer_addref_locked(lp);
797                 lnet_peer_ni_decref_locked(lpni);
798         }
799         lnet_net_unlock(cpt);
800
801         return lp;
802 }
803
804 struct lnet_peer_net *
805 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
806 {
807         struct lnet_peer_net *net;
808
809         if (!prev_lpn_id) {
810                 /* no net id provided return the first net */
811                 net = list_first_entry_or_null(&lp->lp_peer_nets,
812                                                struct lnet_peer_net,
813                                                lpn_peer_nets);
814
815                 return net;
816         }
817
818         /* find the net after the one provided */
819         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
820                 if (net->lpn_net_id == prev_lpn_id) {
821                         /*
822                          * if we reached the end of the list loop to the
823                          * beginning.
824                          */
825                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
826                                 return list_first_entry_or_null(&lp->lp_peer_nets,
827                                                                 struct lnet_peer_net,
828                                                                 lpn_peer_nets);
829                         else
830                                 return list_next_entry(net, lpn_peer_nets);
831                 }
832         }
833
834         return NULL;
835 }
836
837 struct lnet_peer_ni *
838 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
839                              struct lnet_peer_net *peer_net,
840                              struct lnet_peer_ni *prev)
841 {
842         struct lnet_peer_ni *lpni;
843         struct lnet_peer_net *net = peer_net;
844
845         if (!prev) {
846                 if (!net) {
847                         if (list_empty(&peer->lp_peer_nets))
848                                 return NULL;
849
850                         net = list_entry(peer->lp_peer_nets.next,
851                                          struct lnet_peer_net,
852                                          lpn_peer_nets);
853                 }
854                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
855                                   lpni_peer_nis);
856
857                 return lpni;
858         }
859
860         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
861                 /*
862                  * if you reached the end of the peer ni list and the peer
863                  * net is specified then there are no more peer nis in that
864                  * net.
865                  */
866                 if (net)
867                         return NULL;
868
869                 /*
870                  * we reached the end of this net ni list. move to the
871                  * next net
872                  */
873                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
874                     &peer->lp_peer_nets)
875                         /* no more nets and no more NIs. */
876                         return NULL;
877
878                 /* get the next net */
879                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
880                                  struct lnet_peer_net,
881                                  lpn_peer_nets);
882                 /* get the ni on it */
883                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
884                                   lpni_peer_nis);
885
886                 return lpni;
887         }
888
889         /* there are more nis left */
890         lpni = list_entry(prev->lpni_peer_nis.next,
891                           struct lnet_peer_ni, lpni_peer_nis);
892
893         return lpni;
894 }
895
896 /* Call with the ln_api_mutex held */
897 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
898 {
899         struct lnet_process_id id;
900         struct lnet_peer_table *ptable;
901         struct lnet_peer *lp;
902         __u32 count = 0;
903         __u32 size = 0;
904         int lncpt;
905         int cpt;
906         __u32 i;
907         int rc;
908
909         rc = -ESHUTDOWN;
910         if (the_lnet.ln_state != LNET_STATE_RUNNING)
911                 goto done;
912
913         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
914
915         /*
916          * Count the number of peers, and return E2BIG if the buffer
917          * is too small. We'll also return the desired size.
918          */
919         rc = -E2BIG;
920         for (cpt = 0; cpt < lncpt; cpt++) {
921                 ptable = the_lnet.ln_peer_tables[cpt];
922                 count += ptable->pt_peers;
923         }
924         size = count * sizeof(*ids);
925         if (size > *sizep)
926                 goto done;
927
928         /*
929          * Walk the peer lists and copy out the primary nids.
930          * This is safe because the peer lists are only modified
931          * while the ln_api_mutex is held. So we don't need to
932          * hold the lnet_net_lock as well, and can therefore
933          * directly call copy_to_user().
934          */
935         rc = -EFAULT;
936         memset(&id, 0, sizeof(id));
937         id.pid = LNET_PID_LUSTRE;
938         i = 0;
939         for (cpt = 0; cpt < lncpt; cpt++) {
940                 ptable = the_lnet.ln_peer_tables[cpt];
941                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
942                         if (!nid_is_nid4(&lp->lp_primary_nid))
943                                 continue;
944                         if (i >= count)
945                                 goto done;
946                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
947                         if (copy_to_user(&ids[i], &id, sizeof(id)))
948                                 goto done;
949                         i++;
950                 }
951         }
952         rc = 0;
953 done:
954         *countp = count;
955         *sizep = size;
956         return rc;
957 }
958
959 /*
960  * Start pushes to peers that need to be updated for a configuration
961  * change on this node.
962  */
963 void
964 lnet_push_update_to_peers(int force)
965 {
966         struct lnet_peer_table *ptable;
967         struct lnet_peer *lp;
968         int lncpt;
969         int cpt;
970
971         lnet_net_lock(LNET_LOCK_EX);
972         if (lnet_peer_discovery_disabled)
973                 force = 0;
974         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
975         for (cpt = 0; cpt < lncpt; cpt++) {
976                 ptable = the_lnet.ln_peer_tables[cpt];
977                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
978                         if (force) {
979                                 spin_lock(&lp->lp_lock);
980                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
981                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
982                                 spin_unlock(&lp->lp_lock);
983                         }
984                         if (lnet_peer_needs_push(lp))
985                                 lnet_peer_queue_for_discovery(lp);
986                 }
987         }
988         lnet_net_unlock(LNET_LOCK_EX);
989         wake_up(&the_lnet.ln_dc_waitq);
990 }
991
992 /* find the NID in the preferred gateways for the remote peer
993  * return:
994  *      false: list is not empty and NID is not preferred
995  *      false: list is empty
996  *      true: nid is found in the list
997  */
998 bool
999 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
1000                              struct lnet_nid *gw_nid)
1001 {
1002         struct lnet_nid_list *ne;
1003
1004         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
1005                libcfs_nidstr(&lpni->lpni_nid),
1006                list_empty(&lpni->lpni_rtr_pref_nids));
1007
1008         if (list_empty(&lpni->lpni_rtr_pref_nids))
1009                 return false;
1010
1011         /* iterate through all the preferred NIDs and see if any of them
1012          * matches the provided gw_nid
1013          */
1014         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1015                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1016                        libcfs_nidstr(&ne->nl_nid),
1017                        libcfs_nidstr(gw_nid));
1018                 if (nid_same(&ne->nl_nid, gw_nid))
1019                         return true;
1020         }
1021
1022         return false;
1023 }
1024
1025 void
1026 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
1027 {
1028         struct list_head zombies;
1029         struct lnet_nid_list *ne;
1030         struct lnet_nid_list *tmp;
1031         int cpt = lpni->lpni_cpt;
1032
1033         INIT_LIST_HEAD(&zombies);
1034
1035         lnet_net_lock(cpt);
1036         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1037         lnet_net_unlock(cpt);
1038
1039         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1040                 list_del(&ne->nl_list);
1041                 LIBCFS_FREE(ne, sizeof(*ne));
1042         }
1043 }
1044
1045 int
1046 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1047                        struct lnet_nid *gw_nid)
1048 {
1049         int cpt = lpni->lpni_cpt;
1050         struct lnet_nid_list *ne = NULL;
1051
1052         /* This function is called with api_mutex held. When the api_mutex
1053          * is held the list can not be modified, as it is only modified as
1054          * a result of applying a UDSP and that happens under api_mutex
1055          * lock.
1056          */
1057         __must_hold(&the_lnet.ln_api_mutex);
1058
1059         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1060                 if (nid_same(&ne->nl_nid, gw_nid))
1061                         return -EEXIST;
1062         }
1063
1064         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1065         if (!ne)
1066                 return -ENOMEM;
1067
1068         ne->nl_nid = *gw_nid;
1069
1070         /* Lock the cpt to protect against addition and checks in the
1071          * selection algorithm
1072          */
1073         lnet_net_lock(cpt);
1074         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1075         lnet_net_unlock(cpt);
1076
1077         return 0;
1078 }
1079
1080 /*
1081  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1082  * this is a preferred point-to-point path. Call with lnet_net_lock in
1083  * shared mmode.
1084  */
1085 bool
1086 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1087 {
1088         struct lnet_nid_list *ne;
1089
1090         if (lpni->lpni_pref_nnids == 0)
1091                 return false;
1092         if (lpni->lpni_pref_nnids == 1)
1093                 return nid_same(&lpni->lpni_pref.nid, nid);
1094         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1095                 if (nid_same(&ne->nl_nid, nid))
1096                         return true;
1097         }
1098         return false;
1099 }
1100
1101 /*
1102  * Set a single ni as preferred, provided no preferred ni is already
1103  * defined. Only to be used for non-multi-rail peer_ni.
1104  */
1105 int
1106 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1107                                   struct lnet_nid *nid)
1108 {
1109         int rc = 0;
1110
1111         if (!nid)
1112                 return -EINVAL;
1113         spin_lock(&lpni->lpni_lock);
1114         if (LNET_NID_IS_ANY(nid)) {
1115                 rc = -EINVAL;
1116         } else if (lpni->lpni_pref_nnids > 0) {
1117                 rc = -EPERM;
1118         } else if (lpni->lpni_pref_nnids == 0) {
1119                 lpni->lpni_pref.nid = *nid;
1120                 lpni->lpni_pref_nnids = 1;
1121                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1122         }
1123         spin_unlock(&lpni->lpni_lock);
1124
1125         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1126                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1127         return rc;
1128 }
1129
1130 /*
1131  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1132  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1133  */
1134 int
1135 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1136 {
1137         int rc = 0;
1138
1139         spin_lock(&lpni->lpni_lock);
1140         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1141                 lpni->lpni_pref_nnids = 0;
1142                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1143         } else if (lpni->lpni_pref_nnids == 0) {
1144                 rc = -ENOENT;
1145         } else {
1146                 rc = -EPERM;
1147         }
1148         spin_unlock(&lpni->lpni_lock);
1149
1150         CDEBUG(D_NET, "peer %s: %d\n",
1151                libcfs_nidstr(&lpni->lpni_nid), rc);
1152         return rc;
1153 }
1154
1155 void
1156 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1157 {
1158         lpni->lpni_sel_priority = priority;
1159 }
1160
1161 /*
1162  * Clear the preferred NIDs from a non-multi-rail peer.
1163  */
1164 void
1165 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1166 {
1167         struct lnet_peer_ni *lpni = NULL;
1168
1169         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1170                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1171 }
1172
1173 int
1174 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1175 {
1176         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1177         struct lnet_nid_list *ne1 = NULL;
1178         struct lnet_nid_list *ne2 = NULL;
1179         struct lnet_nid *tmp_nid = NULL;
1180         int rc = 0;
1181
1182         if (LNET_NID_IS_ANY(nid)) {
1183                 rc = -EINVAL;
1184                 goto out;
1185         }
1186
1187         if (lpni->lpni_pref_nnids == 1 &&
1188             nid_same(&lpni->lpni_pref.nid, nid)) {
1189                 rc = -EEXIST;
1190                 goto out;
1191         }
1192
1193         /* A non-MR node may have only one preferred NI per peer_ni */
1194         if (lpni->lpni_pref_nnids > 0 &&
1195             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1196                 rc = -EPERM;
1197                 goto out;
1198         }
1199
1200         /* add the new preferred nid to the list of preferred nids */
1201         if (lpni->lpni_pref_nnids != 0) {
1202                 size_t alloc_size = sizeof(*ne1);
1203
1204                 if (lpni->lpni_pref_nnids == 1) {
1205                         tmp_nid = &lpni->lpni_pref.nid;
1206                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1207                 }
1208
1209                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1210                         if (nid_same(&ne1->nl_nid, nid)) {
1211                                 rc = -EEXIST;
1212                                 goto out;
1213                         }
1214                 }
1215
1216                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1217                                  alloc_size);
1218                 if (!ne1) {
1219                         rc = -ENOMEM;
1220                         goto out;
1221                 }
1222
1223                 /* move the originally stored nid to the list */
1224                 if (lpni->lpni_pref_nnids == 1) {
1225                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1226                                 lpni->lpni_cpt, alloc_size);
1227                         if (!ne2) {
1228                                 rc = -ENOMEM;
1229                                 goto out;
1230                         }
1231                         INIT_LIST_HEAD(&ne2->nl_list);
1232                         ne2->nl_nid = *tmp_nid;
1233                 }
1234                 ne1->nl_nid = *nid;
1235         }
1236
1237         lnet_net_lock(LNET_LOCK_EX);
1238         spin_lock(&lpni->lpni_lock);
1239         if (lpni->lpni_pref_nnids == 0) {
1240                 lpni->lpni_pref.nid = *nid;
1241         } else {
1242                 if (ne2)
1243                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1244                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1245         }
1246         lpni->lpni_pref_nnids++;
1247         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1248         spin_unlock(&lpni->lpni_lock);
1249         lnet_net_unlock(LNET_LOCK_EX);
1250
1251 out:
1252         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1253                 spin_lock(&lpni->lpni_lock);
1254                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1255                 spin_unlock(&lpni->lpni_lock);
1256         }
1257         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1258                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1259         return rc;
1260 }
1261
1262 int
1263 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1264 {
1265         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1266         struct lnet_nid_list *ne = NULL;
1267         int rc = 0;
1268
1269         if (lpni->lpni_pref_nnids == 0) {
1270                 rc = -ENOENT;
1271                 goto out;
1272         }
1273
1274         if (lpni->lpni_pref_nnids == 1) {
1275                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1276                         rc = -ENOENT;
1277                         goto out;
1278                 }
1279         } else {
1280                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1281                         if (nid_same(&ne->nl_nid, nid))
1282                                 goto remove_nid_entry;
1283                 }
1284                 rc = -ENOENT;
1285                 ne = NULL;
1286                 goto out;
1287         }
1288
1289 remove_nid_entry:
1290         lnet_net_lock(LNET_LOCK_EX);
1291         spin_lock(&lpni->lpni_lock);
1292         if (lpni->lpni_pref_nnids == 1)
1293                 lpni->lpni_pref.nid = LNET_ANY_NID;
1294         else {
1295                 list_del_init(&ne->nl_list);
1296                 if (lpni->lpni_pref_nnids == 2) {
1297                         struct lnet_nid_list *ne, *tmp;
1298
1299                         list_for_each_entry_safe(ne, tmp,
1300                                                  &lpni->lpni_pref.nids,
1301                                                  nl_list) {
1302                                 lpni->lpni_pref.nid = ne->nl_nid;
1303                                 list_del_init(&ne->nl_list);
1304                                 LIBCFS_FREE(ne, sizeof(*ne));
1305                         }
1306                 }
1307         }
1308         lpni->lpni_pref_nnids--;
1309         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1310         spin_unlock(&lpni->lpni_lock);
1311         lnet_net_unlock(LNET_LOCK_EX);
1312
1313         if (ne)
1314                 LIBCFS_FREE(ne, sizeof(*ne));
1315 out:
1316         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1317                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1318         return rc;
1319 }
1320
1321 void
1322 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1323 {
1324         struct list_head zombies;
1325         struct lnet_nid_list *ne;
1326         struct lnet_nid_list *tmp;
1327
1328         INIT_LIST_HEAD(&zombies);
1329
1330         lnet_net_lock(LNET_LOCK_EX);
1331         if (lpni->lpni_pref_nnids == 1)
1332                 lpni->lpni_pref.nid = LNET_ANY_NID;
1333         else if (lpni->lpni_pref_nnids > 1)
1334                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1335         lpni->lpni_pref_nnids = 0;
1336         lnet_net_unlock(LNET_LOCK_EX);
1337
1338         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1339                 list_del_init(&ne->nl_list);
1340                 LIBCFS_FREE(ne, sizeof(*ne));
1341         }
1342 }
1343
1344 lnet_nid_t
1345 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1346 {
1347         /* FIXME handle large-addr nid */
1348         struct lnet_peer_ni *lpni;
1349         lnet_nid_t primary_nid = nid;
1350
1351         lpni = lnet_find_peer_ni_locked(nid);
1352         if (lpni) {
1353                 primary_nid = lnet_nid_to_nid4(
1354                         &lpni->lpni_peer_net->lpn_peer->lp_primary_nid);
1355                 lnet_peer_ni_decref_locked(lpni);
1356         }
1357
1358         return primary_nid;
1359 }
1360
1361 bool
1362 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1363 __must_hold(&lp->lp_lock)
1364 {
1365         if (lnet_peer_discovery_disabled)
1366                 return true;
1367
1368         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1369             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1370                 return true;
1371         }
1372
1373         return false;
1374 }
1375
1376 /*
1377  * Peer Discovery
1378  */
1379 bool
1380 lnet_is_discovery_disabled(struct lnet_peer *lp)
1381 {
1382         bool rc = false;
1383
1384         spin_lock(&lp->lp_lock);
1385         rc = lnet_is_discovery_disabled_locked(lp);
1386         spin_unlock(&lp->lp_lock);
1387
1388         return rc;
1389 }
1390
1391 int
1392 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1393 {
1394         lnet_nid_t pnid = 0;
1395         bool mr;
1396         int i, rc;
1397
1398         if (!nids || num_nids < 1)
1399                 return -EINVAL;
1400
1401         rc = LNetNIInit(LNET_PID_ANY);
1402         if (rc < 0)
1403                 return rc;
1404
1405         mutex_lock(&the_lnet.ln_api_mutex);
1406
1407         mr = lnet_peer_discovery_disabled == 0;
1408
1409         rc = 0;
1410         for (i = 0; i < num_nids; i++) {
1411                 if (nids[i] == LNET_NID_LO_0)
1412                         continue;
1413
1414                 if (!pnid) {
1415                         pnid = nids[i];
1416                         rc = lnet_add_peer_ni(pnid, LNET_NID_ANY, mr, true);
1417                 } else if (lnet_peer_discovery_disabled) {
1418                         rc = lnet_add_peer_ni(nids[i], LNET_NID_ANY, mr, true);
1419                 } else {
1420                         rc = lnet_add_peer_ni(pnid, nids[i], mr, true);
1421                 }
1422
1423                 if (rc && rc != -EEXIST)
1424                         goto unlock;
1425         }
1426
1427 unlock:
1428         mutex_unlock(&the_lnet.ln_api_mutex);
1429
1430         LNetNIFini();
1431
1432         return rc == -EEXIST ? 0 : rc;
1433 }
1434 EXPORT_SYMBOL(LNetAddPeer);
1435
1436 /* FIXME support large-addr nid */
1437 lnet_nid_t
1438 LNetPrimaryNID(lnet_nid_t nid)
1439 {
1440         struct lnet_peer *lp;
1441         struct lnet_peer_ni *lpni;
1442         lnet_nid_t primary_nid = nid;
1443         int rc = 0;
1444         int cpt;
1445
1446         if (nid == LNET_NID_LO_0)
1447                 return LNET_NID_LO_0;
1448
1449         cpt = lnet_net_lock_current();
1450         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1451         if (IS_ERR(lpni)) {
1452                 rc = PTR_ERR(lpni);
1453                 goto out_unlock;
1454         }
1455         lp = lpni->lpni_peer_net->lpn_peer;
1456
1457         /* If discovery is disabled locally then we needn't bother running
1458          * discovery here because discovery will not modify whatever
1459          * primary NID is currently set for this peer. If the specified peer is
1460          * down then this discovery can introduce long delays into the mount
1461          * process, so skip it if it isn't necessary.
1462          */
1463         if (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1464                 spin_lock(&lp->lp_lock);
1465                 /* force a full discovery cycle */
1466                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH |
1467                                 LNET_PEER_LOCK_PRIMARY;
1468                 spin_unlock(&lp->lp_lock);
1469
1470                 /* start discovery in the background. Messages to that
1471                  * peer will not go through until the discovery is
1472                  * complete
1473                  */
1474                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1475                 if (rc)
1476                         goto out_decref;
1477                 /* The lpni (or lp) for this NID may have changed and our ref is
1478                  * the only thing keeping the old one around. Release the ref
1479                  * and lookup the lpni again
1480                  */
1481                 lnet_peer_ni_decref_locked(lpni);
1482                 lpni = lnet_find_peer_ni_locked(nid);
1483                 if (!lpni) {
1484                         rc = -ENOENT;
1485                         goto out_unlock;
1486                 }
1487                 lp = lpni->lpni_peer_net->lpn_peer;
1488         }
1489         primary_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
1490 out_decref:
1491         lnet_peer_ni_decref_locked(lpni);
1492 out_unlock:
1493         lnet_net_unlock(cpt);
1494
1495         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1496                libcfs_nid2str(primary_nid), rc);
1497         return primary_nid;
1498 }
1499 EXPORT_SYMBOL(LNetPrimaryNID);
1500
1501 struct lnet_peer_net *
1502 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1503 {
1504         struct lnet_peer_net *peer_net;
1505         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1506                 if (peer_net->lpn_net_id == net_id)
1507                         return peer_net;
1508         }
1509         return NULL;
1510 }
1511
1512 /*
1513  * Attach a peer_ni to a peer_net and peer. This function assumes
1514  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1515  * may be attached to a different peer, in which case it will be
1516  * properly detached first. The whole operation is done atomically.
1517  *
1518  * This function consumes the reference on lpni and Always returns 0.
1519  * This is the last function called from functions that do return an
1520  * int, so returning 0 here allows the compiler to do a tail call.
1521  */
1522 static int
1523 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1524                          struct lnet_peer_net *lpn,
1525                          struct lnet_peer_ni *lpni,
1526                          unsigned flags)
1527 {
1528         struct lnet_peer_table *ptable;
1529         bool new_lpn = false;
1530         int rc;
1531
1532         /* Install the new peer_ni */
1533         lnet_net_lock(LNET_LOCK_EX);
1534         /* Add peer_ni to global peer table hash, if necessary. */
1535         if (list_empty(&lpni->lpni_hashlist)) {
1536                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1537
1538                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1539                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1540                 ptable->pt_version++;
1541                 lnet_peer_ni_addref_locked(lpni);
1542         }
1543
1544         /* Detach the peer_ni from an existing peer, if necessary. */
1545         if (lpni->lpni_peer_net) {
1546                 LASSERT(lpni->lpni_peer_net != lpn);
1547                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1548                 lnet_peer_detach_peer_ni_locked(lpni);
1549                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1550                 lpni->lpni_peer_net = NULL;
1551         }
1552
1553         /* Add peer_ni to peer_net */
1554         lpni->lpni_peer_net = lpn;
1555         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1556                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1557         else
1558                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1559         lnet_update_peer_net_healthv(lpni);
1560         lnet_peer_net_addref_locked(lpn);
1561
1562         /* Add peer_net to peer */
1563         if (!lpn->lpn_peer) {
1564                 new_lpn = true;
1565                 lpn->lpn_peer = lp;
1566                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1567                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1568                 else
1569                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1570                 lnet_peer_addref_locked(lp);
1571         }
1572
1573         /* Add peer to global peer list, if necessary */
1574         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1575         if (list_empty(&lp->lp_peer_list)) {
1576                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1577                 ptable->pt_peers++;
1578         }
1579
1580
1581         /* Update peer state */
1582         spin_lock(&lp->lp_lock);
1583         if (flags & LNET_PEER_CONFIGURED) {
1584                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1585                         lp->lp_state |= LNET_PEER_CONFIGURED;
1586         }
1587         if (flags & LNET_PEER_MULTI_RAIL) {
1588                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1589                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1590                         lnet_peer_clr_non_mr_pref_nids(lp);
1591                 }
1592         }
1593         if (flags & LNET_PEER_LOCK_PRIMARY)
1594                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1595         spin_unlock(&lp->lp_lock);
1596
1597         lp->lp_nnis++;
1598
1599         /* apply UDSPs */
1600         if (new_lpn) {
1601                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1602                 if (rc)
1603                         CERROR("Failed to apply UDSPs on lpn %s\n",
1604                                libcfs_net2str(lpn->lpn_net_id));
1605         }
1606         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1607         if (rc)
1608                 CERROR("Failed to apply UDSPs on lpni %s\n",
1609                        libcfs_nidstr(&lpni->lpni_nid));
1610
1611         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1612                libcfs_nidstr(&lp->lp_primary_nid),
1613                libcfs_nidstr(&lpni->lpni_nid), flags);
1614         lnet_peer_ni_decref_locked(lpni);
1615         lnet_net_unlock(LNET_LOCK_EX);
1616
1617         return 0;
1618 }
1619
1620 /*
1621  * Create a new peer, with nid as its primary nid.
1622  *
1623  * Call with the lnet_api_mutex held.
1624  */
1625 static int
1626 lnet_peer_add(lnet_nid_t nid4, unsigned int flags)
1627 {
1628         struct lnet_nid nid;
1629         struct lnet_peer *lp;
1630         struct lnet_peer_net *lpn;
1631         struct lnet_peer_ni *lpni;
1632         int rc = 0;
1633
1634         LASSERT(nid4 != LNET_NID_ANY);
1635
1636         /*
1637          * No need for the lnet_net_lock here, because the
1638          * lnet_api_mutex is held.
1639          */
1640         lpni = lnet_find_peer_ni_locked(nid4);
1641         if (lpni) {
1642                 /* A peer with this NID already exists. */
1643                 lp = lpni->lpni_peer_net->lpn_peer;
1644                 lnet_peer_ni_decref_locked(lpni);
1645                 /*
1646                  * This is an error if the peer was configured and the
1647                  * primary NID differs or an attempt is made to change
1648                  * the Multi-Rail flag. Otherwise the assumption is
1649                  * that an existing peer is being modified.
1650                  */
1651                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1652                         if (lnet_nid_to_nid4(&lp->lp_primary_nid) != nid4)
1653                                 rc = -EEXIST;
1654                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1655                                 rc = -EPERM;
1656                         goto out;
1657                 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1658                         if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid4) {
1659                                 rc = -EEXIST;
1660                                 goto out;
1661                         }
1662                 }
1663                 /* Delete and recreate as a configured peer. */
1664                 lnet_peer_del(lp);
1665         }
1666
1667         /* Create peer, peer_net, and peer_ni. */
1668         rc = -ENOMEM;
1669         lnet_nid4_to_nid(nid4, &nid);
1670         lp = lnet_peer_alloc(&nid);
1671         if (!lp)
1672                 goto out;
1673         lpn = lnet_peer_net_alloc(LNET_NID_NET(&nid));
1674         if (!lpn)
1675                 goto out_free_lp;
1676         lpni = lnet_peer_ni_alloc(&nid);
1677         if (!lpni)
1678                 goto out_free_lpn;
1679
1680         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1681
1682 out_free_lpn:
1683         LIBCFS_FREE(lpn, sizeof(*lpn));
1684 out_free_lp:
1685         LIBCFS_FREE(lp, sizeof(*lp));
1686 out:
1687         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1688                libcfs_nid2str(nid4), flags, rc);
1689         return rc;
1690 }
1691
1692 /*
1693  * Add a NID to a peer. Call with ln_api_mutex held.
1694  *
1695  * Error codes:
1696  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1697  *  -EEXIST:   The NID was configured by DLC for a different peer.
1698  *  -ENOMEM:   Out of memory.
1699  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1700  *             non-multi-rail peer.
1701  */
1702 static int
1703 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
1704 {
1705         struct lnet_peer_net *lpn;
1706         struct lnet_peer_ni *lpni;
1707         struct lnet_nid nid;
1708         int rc = 0;
1709
1710         LASSERT(lp);
1711         LASSERT(nid4 != LNET_NID_ANY);
1712
1713         lnet_nid4_to_nid(nid4, &nid);
1714
1715         /* A configured peer can only be updated through configuration. */
1716         if (!(flags & LNET_PEER_CONFIGURED)) {
1717                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1718                         rc = -EPERM;
1719                         goto out;
1720                 }
1721         }
1722
1723         /*
1724          * The MULTI_RAIL flag can be set but not cleared, because
1725          * that would leave the peer struct in an invalid state.
1726          */
1727         if (flags & LNET_PEER_MULTI_RAIL) {
1728                 spin_lock(&lp->lp_lock);
1729                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1730                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1731                         lnet_peer_clr_non_mr_pref_nids(lp);
1732                 }
1733                 spin_unlock(&lp->lp_lock);
1734         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1735                 rc = -EPERM;
1736                 goto out;
1737         }
1738
1739         lpni = lnet_find_peer_ni_locked(nid4);
1740         if (lpni) {
1741                 /*
1742                  * A peer_ni already exists. This is only a problem if
1743                  * it is not connected to this peer and was configured
1744                  * by DLC.
1745                  */
1746                 if (lpni->lpni_peer_net->lpn_peer == lp)
1747                         goto out_free_lpni;
1748                 if (lnet_peer_ni_is_configured(lpni)) {
1749                         rc = -EEXIST;
1750                         goto out_free_lpni;
1751                 }
1752                 /* If this is the primary NID, destroy the peer. */
1753                 if (lnet_peer_ni_is_primary(lpni)) {
1754                         struct lnet_peer *lp2 =
1755                                 lpni->lpni_peer_net->lpn_peer;
1756                         int rtr_refcount = lp2->lp_rtr_refcount;
1757
1758                         /* If the new peer that this NID belongs to is
1759                          * a primary NID for another peer which we're
1760                          * suppose to preserve the Primary for then we
1761                          * don't want to mess with it. But the
1762                          * configuration is wrong at this point, so we
1763                          * should flag both of these peers as in a bad
1764                          * state
1765                          */
1766                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY) {
1767                                 spin_lock(&lp->lp_lock);
1768                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1769                                 spin_unlock(&lp->lp_lock);
1770                                 spin_lock(&lp2->lp_lock);
1771                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1772                                 spin_unlock(&lp2->lp_lock);
1773                                 goto out_free_lpni;
1774                         }
1775                         /*
1776                          * if we're trying to delete a router it means
1777                          * we're moving this peer NI to a new peer so must
1778                          * transfer router properties to the new peer
1779                          */
1780                         if (rtr_refcount > 0) {
1781                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1782                                 lnet_rtr_transfer_to_peer(lp2, lp);
1783                         }
1784                         lnet_peer_del(lp2);
1785                         lnet_peer_ni_decref_locked(lpni);
1786                         lpni = lnet_peer_ni_alloc(&nid);
1787                         if (!lpni) {
1788                                 rc = -ENOMEM;
1789                                 goto out_free_lpni;
1790                         }
1791                 }
1792         } else {
1793                 lpni = lnet_peer_ni_alloc(&nid);
1794                 if (!lpni) {
1795                         rc = -ENOMEM;
1796                         goto out_free_lpni;
1797                 }
1798         }
1799
1800         /*
1801          * Get the peer_net. Check that we're not adding a second
1802          * peer_ni on a peer_net of a non-multi-rail peer.
1803          */
1804         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid4));
1805         if (!lpn) {
1806                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid4));
1807                 if (!lpn) {
1808                         rc = -ENOMEM;
1809                         goto out_free_lpni;
1810                 }
1811         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1812                 rc = -ENOTUNIQ;
1813                 goto out_free_lpni;
1814         }
1815
1816         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1817
1818 out_free_lpni:
1819         lnet_peer_ni_decref_locked(lpni);
1820 out:
1821         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1822                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nid2str(nid4),
1823                flags, rc);
1824         return rc;
1825 }
1826
1827 /*
1828  * Update the primary NID of a peer, if possible.
1829  *
1830  * Call with the lnet_api_mutex held.
1831  */
1832 static int
1833 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid,
1834                           unsigned int flags)
1835 {
1836         struct lnet_nid old = lp->lp_primary_nid;
1837         int rc = 0;
1838
1839         if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid)
1840                 goto out;
1841
1842         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1843                 lnet_nid4_to_nid(nid, &lp->lp_primary_nid);
1844
1845         rc = lnet_peer_add_nid(lp, nid, flags);
1846         if (rc) {
1847                 lp->lp_primary_nid = old;
1848                 goto out;
1849         }
1850 out:
1851         /* if this is a configured peer or the primary for that peer has
1852          * been locked, then we don't want to flag this scenario as
1853          * a failure
1854          */
1855         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1856             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1857                 return 0;
1858
1859         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1860                libcfs_nidstr(&old), libcfs_nid2str(nid), rc);
1861
1862         return rc;
1863 }
1864
1865 /*
1866  * lpni creation initiated due to traffic either sending or receiving.
1867  */
1868 static int
1869 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1870 {
1871         struct lnet_peer *lp;
1872         struct lnet_peer_net *lpn;
1873         struct lnet_peer_ni *lpni;
1874         unsigned flags = 0;
1875         int rc = 0;
1876
1877         if (LNET_NID_IS_ANY(nid)) {
1878                 rc = -EINVAL;
1879                 goto out;
1880         }
1881
1882         /* lnet_net_lock is not needed here because ln_api_lock is held */
1883         lpni = lnet_peer_ni_find_locked(nid);
1884         if (lpni) {
1885                 /*
1886                  * We must have raced with another thread. Since we
1887                  * know next to nothing about a peer_ni created by
1888                  * traffic, we just assume everything is ok and
1889                  * return.
1890                  */
1891                 lnet_peer_ni_decref_locked(lpni);
1892                 goto out;
1893         }
1894
1895         /* Create peer, peer_net, and peer_ni. */
1896         rc = -ENOMEM;
1897         lp = lnet_peer_alloc(nid);
1898         if (!lp)
1899                 goto out;
1900         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1901         if (!lpn)
1902                 goto out_free_lp;
1903         lpni = lnet_peer_ni_alloc(nid);
1904         if (!lpni)
1905                 goto out_free_lpn;
1906         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1907
1908         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1909
1910 out_free_lpn:
1911         LIBCFS_FREE(lpn, sizeof(*lpn));
1912 out_free_lp:
1913         LIBCFS_FREE(lp, sizeof(*lp));
1914 out:
1915         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1916         return rc;
1917 }
1918
1919 /*
1920  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1921  *
1922  * This API handles the following combinations:
1923  *   Create a peer with its primary NI if only the prim_nid is provided
1924  *   Add a NID to a peer identified by the prim_nid. The peer identified
1925  *   by the prim_nid must already exist.
1926  *   The peer being created may be non-MR.
1927  *
1928  * The caller must hold ln_api_mutex. This prevents the peer from
1929  * being created/modified/deleted by a different thread.
1930  */
1931 int
1932 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr, bool temp)
1933 {
1934         struct lnet_peer *lp = NULL;
1935         struct lnet_peer_ni *lpni;
1936         unsigned int flags = 0;
1937
1938         /* The prim_nid must always be specified */
1939         if (prim_nid == LNET_NID_ANY)
1940                 return -EINVAL;
1941
1942         if (!temp)
1943                 flags = LNET_PEER_CONFIGURED;
1944
1945         if (mr)
1946                 flags |= LNET_PEER_MULTI_RAIL;
1947
1948         /*
1949          * If nid isn't specified, we must create a new peer with
1950          * prim_nid as its primary nid.
1951          */
1952         if (nid == LNET_NID_ANY)
1953                 return lnet_peer_add(prim_nid, flags);
1954
1955         /* Look up the prim_nid, which must exist. */
1956         lpni = lnet_find_peer_ni_locked(prim_nid);
1957         if (!lpni)
1958                 return -ENOENT;
1959         lnet_peer_ni_decref_locked(lpni);
1960         lp = lpni->lpni_peer_net->lpn_peer;
1961
1962         /* Peer must have been configured. */
1963         if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1964                 CDEBUG(D_NET, "peer %s was not configured\n",
1965                        libcfs_nid2str(prim_nid));
1966                 return -ENOENT;
1967         }
1968
1969         /* Primary NID must match */
1970         if (lnet_nid_to_nid4(&lp->lp_primary_nid) != prim_nid) {
1971                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1972                        libcfs_nid2str(prim_nid),
1973                        libcfs_nidstr(&lp->lp_primary_nid));
1974                 return -ENODEV;
1975         }
1976
1977         /* Multi-Rail flag must match. */
1978         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1979                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1980                        libcfs_nid2str(prim_nid));
1981                 return -EPERM;
1982         }
1983
1984         return lnet_peer_add_nid(lp, nid, flags);
1985 }
1986
1987 /*
1988  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1989  *
1990  * This API handles the following combinations:
1991  *   Delete a NI from a peer if both prim_nid and nid are provided.
1992  *   Delete a peer if only prim_nid is provided.
1993  *   Delete a peer if its primary nid is provided.
1994  *
1995  * The caller must hold ln_api_mutex. This prevents the peer from
1996  * being modified/deleted by a different thread.
1997  */
1998 int
1999 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
2000 {
2001         struct lnet_peer *lp;
2002         struct lnet_peer_ni *lpni;
2003         unsigned flags;
2004
2005         if (prim_nid == LNET_NID_ANY)
2006                 return -EINVAL;
2007
2008         lpni = lnet_find_peer_ni_locked(prim_nid);
2009         if (!lpni)
2010                 return -ENOENT;
2011         lnet_peer_ni_decref_locked(lpni);
2012         lp = lpni->lpni_peer_net->lpn_peer;
2013
2014         if (prim_nid != lnet_nid_to_nid4(&lp->lp_primary_nid)) {
2015                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2016                        libcfs_nid2str(prim_nid),
2017                        libcfs_nidstr(&lp->lp_primary_nid));
2018                 return -ENODEV;
2019         }
2020
2021         lnet_net_lock(LNET_LOCK_EX);
2022         if (lp->lp_rtr_refcount > 0) {
2023                 lnet_net_unlock(LNET_LOCK_EX);
2024                 CERROR("%s is a router. Can not be deleted\n",
2025                        libcfs_nid2str(prim_nid));
2026                 return -EBUSY;
2027         }
2028         lnet_net_unlock(LNET_LOCK_EX);
2029
2030         if (nid == LNET_NID_ANY || nid == lnet_nid_to_nid4(&lp->lp_primary_nid))
2031                 return lnet_peer_del(lp);
2032
2033         flags = LNET_PEER_CONFIGURED;
2034         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2035                 flags |= LNET_PEER_MULTI_RAIL;
2036
2037         return lnet_peer_del_nid(lp, nid, flags);
2038 }
2039
2040 void
2041 lnet_destroy_peer_ni_locked(struct kref *ref)
2042 {
2043         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2044                                                  lpni_kref);
2045         struct lnet_peer_table *ptable;
2046         struct lnet_peer_net *lpn;
2047
2048         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2049
2050         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2051         LASSERT(list_empty(&lpni->lpni_txq));
2052         LASSERT(lpni->lpni_txqnob == 0);
2053         LASSERT(list_empty(&lpni->lpni_peer_nis));
2054         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2055
2056         lpn = lpni->lpni_peer_net;
2057         lpni->lpni_peer_net = NULL;
2058         lpni->lpni_net = NULL;
2059
2060         if (!list_empty(&lpni->lpni_hashlist)) {
2061                 /* remove the peer ni from the zombie list */
2062                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2063                 spin_lock(&ptable->pt_zombie_lock);
2064                 list_del_init(&lpni->lpni_hashlist);
2065                 ptable->pt_zombies--;
2066                 spin_unlock(&ptable->pt_zombie_lock);
2067         }
2068
2069         if (lpni->lpni_pref_nnids > 1) {
2070                 struct lnet_nid_list *ne, *tmp;
2071
2072                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2073                                          nl_list) {
2074                         list_del_init(&ne->nl_list);
2075                         LIBCFS_FREE(ne, sizeof(*ne));
2076                 }
2077         }
2078         LIBCFS_FREE(lpni, sizeof(*lpni));
2079
2080         if (lpn)
2081                 lnet_peer_net_decref_locked(lpn);
2082 }
2083
2084 struct lnet_peer_ni *
2085 lnet_nid2peerni_ex(struct lnet_nid *nid, int cpt)
2086 {
2087         struct lnet_peer_ni *lpni = NULL;
2088         int rc;
2089
2090         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2091                 return ERR_PTR(-ESHUTDOWN);
2092
2093         /*
2094          * find if a peer_ni already exists.
2095          * If so then just return that.
2096          */
2097         lpni = lnet_peer_ni_find_locked(nid);
2098         if (lpni)
2099                 return lpni;
2100
2101         lnet_net_unlock(cpt);
2102
2103         rc = lnet_peer_ni_traffic_add(nid, NULL);
2104         if (rc) {
2105                 lpni = ERR_PTR(rc);
2106                 goto out_net_relock;
2107         }
2108
2109         lpni = lnet_peer_ni_find_locked(nid);
2110         LASSERT(lpni);
2111
2112 out_net_relock:
2113         lnet_net_lock(cpt);
2114
2115         return lpni;
2116 }
2117
2118 /*
2119  * Get a peer_ni for the given nid, create it if necessary. Takes a
2120  * hold on the peer_ni.
2121  */
2122 struct lnet_peer_ni *
2123 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2124                         struct lnet_nid *pref, int cpt)
2125 {
2126         struct lnet_peer_ni *lpni = NULL;
2127         int rc;
2128
2129         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2130                 return ERR_PTR(-ESHUTDOWN);
2131
2132         /*
2133          * find if a peer_ni already exists.
2134          * If so then just return that.
2135          */
2136         lpni = lnet_peer_ni_find_locked(nid);
2137         if (lpni)
2138                 return lpni;
2139
2140         /*
2141          * Slow path:
2142          * use the lnet_api_mutex to serialize the creation of the peer_ni
2143          * and the creation/deletion of the local ni/net. When a local ni is
2144          * created, if there exists a set of peer_nis on that network,
2145          * they need to be traversed and updated. When a local NI is
2146          * deleted, which could result in a network being deleted, then
2147          * all peer nis on that network need to be removed as well.
2148          *
2149          * Creation through traffic should also be serialized with
2150          * creation through DLC.
2151          */
2152         lnet_net_unlock(cpt);
2153         mutex_lock(&the_lnet.ln_api_mutex);
2154         /*
2155          * Shutdown is only set under the ln_api_lock, so a single
2156          * check here is sufficent.
2157          */
2158         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2159                 lpni = ERR_PTR(-ESHUTDOWN);
2160                 goto out_mutex_unlock;
2161         }
2162
2163         rc = lnet_peer_ni_traffic_add(nid, pref);
2164         if (rc) {
2165                 lpni = ERR_PTR(rc);
2166                 goto out_mutex_unlock;
2167         }
2168
2169         lpni = lnet_peer_ni_find_locked(nid);
2170         LASSERT(lpni);
2171
2172 out_mutex_unlock:
2173         mutex_unlock(&the_lnet.ln_api_mutex);
2174         lnet_net_lock(cpt);
2175
2176         /* Lock has been dropped, check again for shutdown. */
2177         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2178                 if (!IS_ERR(lpni))
2179                         lnet_peer_ni_decref_locked(lpni);
2180                 lpni = ERR_PTR(-ESHUTDOWN);
2181         }
2182
2183         return lpni;
2184 }
2185
2186 struct lnet_peer_ni *
2187 lnet_nid2peerni_locked(lnet_nid_t nid4, lnet_nid_t pref4, int cpt)
2188 {
2189         struct lnet_nid nid, pref;
2190
2191         lnet_nid4_to_nid(nid4, &nid);
2192         lnet_nid4_to_nid(pref4, &pref);
2193         if (pref4 == LNET_NID_ANY)
2194                 return lnet_peerni_by_nid_locked(&nid, NULL, cpt);
2195         else
2196                 return lnet_peerni_by_nid_locked(&nid, &pref, cpt);
2197 }
2198
2199 bool
2200 lnet_peer_gw_discovery(struct lnet_peer *lp)
2201 {
2202         bool rc = false;
2203
2204         spin_lock(&lp->lp_lock);
2205         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2206                 rc = true;
2207         spin_unlock(&lp->lp_lock);
2208
2209         return rc;
2210 }
2211
2212 bool
2213 lnet_peer_is_uptodate(struct lnet_peer *lp)
2214 {
2215         bool rc;
2216
2217         spin_lock(&lp->lp_lock);
2218         rc = lnet_peer_is_uptodate_locked(lp);
2219         spin_unlock(&lp->lp_lock);
2220         return rc;
2221 }
2222
2223 /*
2224  * Is a peer uptodate from the point of view of discovery?
2225  *
2226  * If it is currently being processed, obviously not.
2227  * A forced Ping or Push is also handled by the discovery thread.
2228  *
2229  * Otherwise look at whether the peer needs rediscovering.
2230  */
2231 bool
2232 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2233 __must_hold(&lp->lp_lock)
2234 {
2235         bool rc;
2236
2237         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2238                             LNET_PEER_FORCE_PING |
2239                             LNET_PEER_FORCE_PUSH)) {
2240                 rc = false;
2241         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2242                 rc = false;
2243         } else if (lnet_peer_needs_push(lp)) {
2244                 rc = false;
2245         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2246                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2247                         rc = true;
2248                 else
2249                         rc = false;
2250         } else {
2251                 rc = false;
2252         }
2253
2254         return rc;
2255 }
2256
2257 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2258 void
2259 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2260 {
2261         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2262          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2263          * when adding to the list and queuing the peer to ensure that we do not
2264          * strand any messages on the lp_dc_pendq. This scheme ensures the
2265          * message will be resent even if the peer is already being discovered.
2266          * Therefore we needn't check the return value of
2267          * lnet_peer_queue_for_discovery(lp).
2268          */
2269         lnet_net_lock(LNET_LOCK_EX);
2270         spin_lock(&lp->lp_lock);
2271         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2272         spin_unlock(&lp->lp_lock);
2273         lnet_peer_queue_for_discovery(lp);
2274         lnet_net_unlock(LNET_LOCK_EX);
2275 }
2276
2277 /*
2278  * Queue a peer for the attention of the discovery thread.  Call with
2279  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2280  * -EALREADY if the peer was already queued.
2281  */
2282 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2283 {
2284         int rc;
2285
2286         spin_lock(&lp->lp_lock);
2287         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2288                 lp->lp_state |= LNET_PEER_DISCOVERING;
2289         spin_unlock(&lp->lp_lock);
2290         if (list_empty(&lp->lp_dc_list)) {
2291                 lnet_peer_addref_locked(lp);
2292                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2293                 wake_up(&the_lnet.ln_dc_waitq);
2294                 rc = 0;
2295         } else {
2296                 rc = -EALREADY;
2297         }
2298
2299         CDEBUG(D_NET, "Queue peer %s: %d\n",
2300                libcfs_nidstr(&lp->lp_primary_nid), rc);
2301
2302         return rc;
2303 }
2304
2305 /*
2306  * Discovery of a peer is complete. Wake all waiters on the peer.
2307  * Call with lnet_net_lock/EX held.
2308  */
2309 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2310 {
2311         struct lnet_msg *msg, *tmp;
2312         int rc = 0;
2313         LIST_HEAD(pending_msgs);
2314
2315         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2316                libcfs_nidstr(&lp->lp_primary_nid));
2317
2318         list_del_init(&lp->lp_dc_list);
2319         spin_lock(&lp->lp_lock);
2320         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2321         spin_unlock(&lp->lp_lock);
2322         wake_up(&lp->lp_dc_waitq);
2323
2324         if (lp->lp_rtr_refcount > 0)
2325                 lnet_router_discovery_complete(lp);
2326
2327         lnet_net_unlock(LNET_LOCK_EX);
2328
2329         /* iterate through all pending messages and send them again */
2330         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2331                 list_del_init(&msg->msg_list);
2332                 if (lp->lp_dc_error) {
2333                         lnet_finalize(msg, lp->lp_dc_error);
2334                         continue;
2335                 }
2336
2337                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2338                        lnet_msgtyp2str(msg->msg_type),
2339                        libcfs_id2str(msg->msg_target));
2340                 rc = lnet_send(msg->msg_src_nid_param, msg,
2341                                msg->msg_rtr_nid_param);
2342                 if (rc < 0) {
2343                         CNETERR("Error sending %s to %s: %d\n",
2344                                lnet_msgtyp2str(msg->msg_type),
2345                                libcfs_id2str(msg->msg_target), rc);
2346                         lnet_finalize(msg, rc);
2347                 }
2348         }
2349         lnet_net_lock(LNET_LOCK_EX);
2350         lnet_peer_decref_locked(lp);
2351 }
2352
2353 /*
2354  * Handle inbound push.
2355  * Like any event handler, called with lnet_res_lock/CPT held.
2356  */
2357 void lnet_peer_push_event(struct lnet_event *ev)
2358 {
2359         struct lnet_ping_buffer *pbuf;
2360         struct lnet_peer *lp;
2361
2362         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2363
2364         /* lnet_find_peer() adds a refcount */
2365         lp = lnet_find_peer(ev->source.nid);
2366         if (!lp) {
2367                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2368                        libcfs_nid2str(ev->initiator.nid),
2369                        libcfs_nid2str(ev->source.nid));
2370                 pbuf->pb_needs_post = true;
2371                 return;
2372         }
2373
2374         /* Ensure peer state remains consistent while we modify it. */
2375         spin_lock(&lp->lp_lock);
2376
2377         /*
2378          * If some kind of error happened the contents of the message
2379          * cannot be used. Clear the NIDS_UPTODATE and set the
2380          * FORCE_PING flag to trigger a ping.
2381          */
2382         if (ev->status) {
2383                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2384                 lp->lp_state |= LNET_PEER_FORCE_PING;
2385                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2386                        ev->status,
2387                        libcfs_nidstr(&lp->lp_primary_nid),
2388                        libcfs_nid2str(ev->source.nid));
2389                 goto out;
2390         }
2391
2392         /*
2393          * A push with invalid or corrupted info. Clear the UPTODATE
2394          * flag to trigger a ping.
2395          */
2396         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2397                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2398                 lp->lp_state |= LNET_PEER_FORCE_PING;
2399                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2400                        libcfs_nidstr(&lp->lp_primary_nid));
2401                 goto out;
2402         }
2403
2404         /*
2405          * Make sure we'll allocate the correct size ping buffer when
2406          * pinging the peer.
2407          */
2408         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2409                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2410
2411         /*
2412          * A non-Multi-Rail peer is not supposed to be capable of
2413          * sending a push.
2414          */
2415         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2416                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2417                        libcfs_nidstr(&lp->lp_primary_nid));
2418                 goto out;
2419         }
2420
2421         /*
2422          * The peer may have discovery disabled at its end. Set
2423          * NO_DISCOVERY as appropriate.
2424          */
2425         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2426                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2427                        libcfs_nidstr(&lp->lp_primary_nid));
2428                 /*
2429                  * Mark the peer for deletion if we already know about it
2430                  * and it's going from discovery set to no discovery set
2431                  */
2432                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2433                                       LNET_PEER_DISCOVERING)) &&
2434                      lp->lp_state & LNET_PEER_DISCOVERED) {
2435                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2436                                libcfs_nidstr(&lp->lp_primary_nid),
2437                                lp->lp_state);
2438                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2439                 }
2440                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2441         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2442                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2443                        libcfs_nidstr(&lp->lp_primary_nid));
2444                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2445         }
2446
2447         /*
2448          * Update the MULTI_RAIL flag based on the push. If the peer
2449          * was configured with DLC then the setting should match what
2450          * DLC put in.
2451          * NB: We verified above that the MR feature bit is set in pi_features
2452          */
2453         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2454                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2455                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2456         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2457                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2458                       libcfs_nidstr(&lp->lp_primary_nid));
2459         } else if (lnet_peer_discovery_disabled) {
2460                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2461                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2462         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2463                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2464                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2465         } else {
2466                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2467                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2468                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2469                 lnet_peer_clr_non_mr_pref_nids(lp);
2470         }
2471
2472         /*
2473          * Check for truncation of the Put message. Clear the
2474          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2475          * and tell discovery to allocate a bigger buffer.
2476          */
2477         if (ev->mlength < ev->rlength) {
2478                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2479                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2480                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2481                 lp->lp_state |= LNET_PEER_FORCE_PING;
2482                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2483                        libcfs_nidstr(&lp->lp_primary_nid),
2484                        pbuf->pb_info.pi_nnis);
2485                 goto out;
2486         }
2487
2488         /* always assume new data */
2489         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2490         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2491
2492         /*
2493          * If there is data present that hasn't been processed yet,
2494          * we'll replace it if the Put contained newer data and it
2495          * fits. We're racing with a Ping or earlier Push in this
2496          * case.
2497          */
2498         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2499                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2500                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2501                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2502                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2503                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2504                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2505                               libcfs_nidstr(&lp->lp_primary_nid),
2506                               LNET_PING_BUFFER_SEQNO(pbuf),
2507                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2508                 }
2509                 goto out;
2510         }
2511
2512         /*
2513          * Allocate a buffer to copy the data. On a failure we drop
2514          * the Push and set FORCE_PING to force the discovery
2515          * thread to fix the problem by pinging the peer.
2516          */
2517         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2518         if (!lp->lp_data) {
2519                 lp->lp_state |= LNET_PEER_FORCE_PING;
2520                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2521                        libcfs_nidstr(&lp->lp_primary_nid),
2522                        LNET_PING_BUFFER_SEQNO(pbuf));
2523                 goto out;
2524         }
2525
2526         /* Success */
2527         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2528                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2529         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2530         CDEBUG(D_NET, "Received Push %s %u\n",
2531                libcfs_nidstr(&lp->lp_primary_nid),
2532                LNET_PING_BUFFER_SEQNO(pbuf));
2533
2534 out:
2535         /* We've processed this buffer. It can be reposted */
2536         pbuf->pb_needs_post = true;
2537
2538         /*
2539          * Queue the peer for discovery if not done, force it on the request
2540          * queue and wake the discovery thread if the peer was already queued,
2541          * because its status changed.
2542          */
2543         spin_unlock(&lp->lp_lock);
2544         lnet_net_lock(LNET_LOCK_EX);
2545         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2546                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2547                 wake_up(&the_lnet.ln_dc_waitq);
2548         }
2549         /* Drop refcount from lookup */
2550         lnet_peer_decref_locked(lp);
2551         lnet_net_unlock(LNET_LOCK_EX);
2552 }
2553
2554 /*
2555  * Clear the discovery error state, unless we're already discovering
2556  * this peer, in which case the error is current.
2557  */
2558 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2559 {
2560         spin_lock(&lp->lp_lock);
2561         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2562                 lp->lp_dc_error = 0;
2563         spin_unlock(&lp->lp_lock);
2564 }
2565
2566 /*
2567  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2568  * dropped/retaken within this function. An lnet_peer_ni is passed in
2569  * because discovery could tear down an lnet_peer.
2570  */
2571 int
2572 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2573 {
2574         DEFINE_WAIT(wait);
2575         struct lnet_peer *lp;
2576         int rc = 0;
2577         int count = 0;
2578
2579 again:
2580         lnet_net_unlock(cpt);
2581         lnet_net_lock(LNET_LOCK_EX);
2582         lp = lpni->lpni_peer_net->lpn_peer;
2583         lnet_peer_clear_discovery_error(lp);
2584
2585         /*
2586          * We're willing to be interrupted. The lpni can become a
2587          * zombie if we race with DLC, so we must check for that.
2588          */
2589         for (;;) {
2590                 /* Keep lp alive when the lnet_net_lock is unlocked */
2591                 lnet_peer_addref_locked(lp);
2592                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2593                 if (signal_pending(current))
2594                         break;
2595                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2596                         break;
2597                 /*
2598                  * Don't repeat discovery if discovery is disabled. This is
2599                  * done to ensure we can use discovery as a standard ping as
2600                  * well for backwards compatibility with routers which do not
2601                  * have discovery or have discovery disabled
2602                  */
2603                 if (lnet_is_discovery_disabled(lp) && count > 0)
2604                         break;
2605                 if (lp->lp_dc_error)
2606                         break;
2607                 if (lnet_peer_is_uptodate(lp))
2608                         break;
2609                 lnet_peer_queue_for_discovery(lp);
2610                 count++;
2611                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2612
2613                 /*
2614                  * If caller requested a non-blocking operation then
2615                  * return immediately. Once discovery is complete any
2616                  * pending messages that were stopped due to discovery
2617                  * will be transmitted.
2618                  */
2619                 if (!block)
2620                         break;
2621
2622                 lnet_net_unlock(LNET_LOCK_EX);
2623                 schedule();
2624                 finish_wait(&lp->lp_dc_waitq, &wait);
2625                 lnet_net_lock(LNET_LOCK_EX);
2626                 lnet_peer_decref_locked(lp);
2627                 /* Peer may have changed */
2628                 lp = lpni->lpni_peer_net->lpn_peer;
2629         }
2630         finish_wait(&lp->lp_dc_waitq, &wait);
2631
2632         lnet_net_unlock(LNET_LOCK_EX);
2633         lnet_net_lock(cpt);
2634         lnet_peer_decref_locked(lp);
2635         /*
2636          * The peer may have changed, so re-check and rediscover if that turns
2637          * out to have been the case. The reference count on lp ensured that
2638          * even if it was unlinked from lpni the memory could not be recycled.
2639          * Thus the check below is sufficient to determine whether the peer
2640          * changed. If the peer changed, then lp must not be dereferenced.
2641          */
2642         if (lp != lpni->lpni_peer_net->lpn_peer)
2643                 goto again;
2644
2645         if (signal_pending(current))
2646                 rc = -EINTR;
2647         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2648                 rc = -ESHUTDOWN;
2649         else if (lp->lp_dc_error)
2650                 rc = lp->lp_dc_error;
2651         else if (!block)
2652                 CDEBUG(D_NET, "non-blocking discovery\n");
2653         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2654                 goto again;
2655
2656         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2657                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2658                libcfs_nidstr(&lpni->lpni_nid), rc,
2659                (!block) ? "pending discovery" : "discovery complete");
2660
2661         return rc;
2662 }
2663
2664 /* Handle an incoming ack for a push. */
2665 static void
2666 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2667 {
2668         struct lnet_ping_buffer *pbuf;
2669
2670         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2671         spin_lock(&lp->lp_lock);
2672         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2673         lp->lp_push_error = ev->status;
2674         if (ev->status)
2675                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2676         else
2677                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2678         spin_unlock(&lp->lp_lock);
2679
2680         CDEBUG(D_NET, "peer %s ev->status %d\n",
2681                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2682 }
2683
2684 /* Handle a Reply message. This is the reply to a Ping message. */
2685 static void
2686 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2687 {
2688         struct lnet_ping_buffer *pbuf;
2689         int rc;
2690
2691         spin_lock(&lp->lp_lock);
2692
2693         lnet_nid4_to_nid(ev->target.nid, &lp->lp_disc_src_nid);
2694         lnet_nid4_to_nid(ev->source.nid, &lp->lp_disc_dst_nid);
2695
2696         /*
2697          * If some kind of error happened the contents of message
2698          * cannot be used. Set PING_FAILED to trigger a retry.
2699          */
2700         if (ev->status) {
2701                 lp->lp_state |= LNET_PEER_PING_FAILED;
2702                 lp->lp_ping_error = ev->status;
2703                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2704                        ev->status,
2705                        libcfs_nidstr(&lp->lp_primary_nid),
2706                        libcfs_nid2str(ev->source.nid));
2707                 goto out;
2708         }
2709
2710         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2711         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2712                 lnet_swap_pinginfo(pbuf);
2713
2714         /*
2715          * A reply with invalid or corrupted info. Set PING_FAILED to
2716          * trigger a retry.
2717          */
2718         rc = lnet_ping_info_validate(&pbuf->pb_info);
2719         if (rc) {
2720                 lp->lp_state |= LNET_PEER_PING_FAILED;
2721                 lp->lp_ping_error = 0;
2722                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2723                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2724                 goto out;
2725         }
2726
2727         /*
2728          * The peer may have discovery disabled at its end. Set
2729          * NO_DISCOVERY as appropriate.
2730          */
2731         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2732             lnet_peer_discovery_disabled) {
2733                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2734                        libcfs_nidstr(&lp->lp_primary_nid));
2735
2736                 /* Detect whether this peer has toggled discovery from on to
2737                  * off and whether we can delete and re-create the peer. Peers
2738                  * that were manually configured cannot be deleted by discovery.
2739                  * We need to delete this peer and re-create it if the peer was
2740                  * not configured manually, is currently considered DD capable,
2741                  * and either:
2742                  * 1. We've already discovered the peer (the peer has toggled
2743                  *    the discovery feature from on to off), or
2744                  * 2. The peer is considered MR, but it was not user configured
2745                  *    (this was a "temporary" peer created via the kernel APIs
2746                  *     that we're discovering for the first time)
2747                  */
2748                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2749                                       LNET_PEER_NO_DISCOVERY)) &&
2750                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2751                                      LNET_PEER_MULTI_RAIL))) {
2752                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2753                                libcfs_nidstr(&lp->lp_primary_nid),
2754                                lp->lp_state);
2755                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2756                 }
2757                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2758         } else {
2759                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2760                        libcfs_nidstr(&lp->lp_primary_nid));
2761                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2762         }
2763
2764         /*
2765          * Update the MULTI_RAIL flag based on the reply. If the peer
2766          * was configured with DLC then the setting should match what
2767          * DLC put in.
2768          */
2769         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2770                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2771                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2772                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2773                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2774                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2775                               libcfs_nidstr(&lp->lp_primary_nid));
2776                 } else if (lnet_peer_discovery_disabled) {
2777                         CDEBUG(D_NET,
2778                                "peer %s(%p) not MR: DD disabled locally\n",
2779                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2780                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2781                         CDEBUG(D_NET,
2782                                "peer %s(%p) not MR: DD disabled remotely\n",
2783                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2784                 } else {
2785                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2786                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2787                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2788                         lnet_peer_clr_non_mr_pref_nids(lp);
2789                 }
2790         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2791                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2792                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2793                               libcfs_nidstr(&lp->lp_primary_nid));
2794                 } else {
2795                         CERROR("Multi-Rail state vanished from %s\n",
2796                                libcfs_nidstr(&lp->lp_primary_nid));
2797                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2798                 }
2799         }
2800
2801         /*
2802          * Make sure we'll allocate the correct size ping buffer when
2803          * pinging the peer.
2804          */
2805         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2806                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2807
2808         /*
2809          * Check for truncation of the Reply. Clear PING_SENT and set
2810          * PING_FAILED to trigger a retry.
2811          */
2812         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2813                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2814                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2815                 lp->lp_state |= LNET_PEER_PING_FAILED;
2816                 lp->lp_ping_error = 0;
2817                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2818                        libcfs_nidstr(&lp->lp_primary_nid),
2819                        pbuf->pb_info.pi_nnis);
2820                 goto out;
2821         }
2822
2823         /*
2824          * Check the sequence numbers in the reply. These are only
2825          * available if the reply came from a Multi-Rail peer.
2826          */
2827         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2828             pbuf->pb_info.pi_nnis > 1 &&
2829             lnet_nid_to_nid4(&lp->lp_primary_nid) ==
2830             pbuf->pb_info.pi_ni[1].ns_nid) {
2831                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2832                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2833                                 libcfs_nidstr(&lp->lp_primary_nid),
2834                                 LNET_PING_BUFFER_SEQNO(pbuf),
2835                                 lp->lp_peer_seqno);
2836
2837                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2838         }
2839
2840         /* We're happy with the state of the data in the buffer. */
2841         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2842                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2843                lp->lp_state);
2844         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2845                 lnet_ping_buffer_decref(lp->lp_data);
2846         else
2847                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2848         lnet_ping_buffer_addref(pbuf);
2849         lp->lp_data = pbuf;
2850 out:
2851         lp->lp_state &= ~LNET_PEER_PING_SENT;
2852         spin_unlock(&lp->lp_lock);
2853
2854         lnet_net_lock(LNET_LOCK_EX);
2855         /*
2856          * If this peer is a gateway, call the routing callback to
2857          * handle the ping reply
2858          */
2859         if (lp->lp_rtr_refcount > 0)
2860                 lnet_router_discovery_ping_reply(lp);
2861         lnet_net_unlock(LNET_LOCK_EX);
2862 }
2863
2864 /*
2865  * Send event handling. Only matters for error cases, where we clean
2866  * up state on the peer and peer_ni that would otherwise be updated in
2867  * the REPLY event handler for a successful Ping, and the ACK event
2868  * handler for a successful Push.
2869  */
2870 static int
2871 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2872 {
2873         int rc = 0;
2874
2875         if (!ev->status)
2876                 goto out;
2877
2878         spin_lock(&lp->lp_lock);
2879         if (ev->msg_type == LNET_MSG_GET) {
2880                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2881                 lp->lp_state |= LNET_PEER_PING_FAILED;
2882                 lp->lp_ping_error = ev->status;
2883         } else { /* ev->msg_type == LNET_MSG_PUT */
2884                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2885                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2886                 lp->lp_push_error = ev->status;
2887         }
2888         spin_unlock(&lp->lp_lock);
2889         rc = LNET_REDISCOVER_PEER;
2890 out:
2891         CDEBUG(D_NET, "%s Send to %s: %d\n",
2892                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2893                 libcfs_nid2str(ev->target.nid), rc);
2894         return rc;
2895 }
2896
2897 /*
2898  * Unlink event handling. This event is only seen if a call to
2899  * LNetMDUnlink() caused the event to be unlinked. If this call was
2900  * made after the event was set up in LNetGet() or LNetPut() then we
2901  * assume the Ping or Push timed out.
2902  */
2903 static void
2904 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2905 {
2906         spin_lock(&lp->lp_lock);
2907         /* We've passed through LNetGet() */
2908         if (lp->lp_state & LNET_PEER_PING_SENT) {
2909                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2910                 lp->lp_state |= LNET_PEER_PING_FAILED;
2911                 lp->lp_ping_error = -ETIMEDOUT;
2912                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2913                         libcfs_nidstr(&lp->lp_primary_nid));
2914         }
2915         /* We've passed through LNetPut() */
2916         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2917                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2918                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2919                 lp->lp_push_error = -ETIMEDOUT;
2920                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2921                         libcfs_nidstr(&lp->lp_primary_nid));
2922         }
2923         spin_unlock(&lp->lp_lock);
2924 }
2925
2926 /*
2927  * Event handler for the discovery EQ.
2928  *
2929  * Called with lnet_res_lock(cpt) held. The cpt is the
2930  * lnet_cpt_of_cookie() of the md handle cookie.
2931  */
2932 static void lnet_discovery_event_handler(struct lnet_event *event)
2933 {
2934         struct lnet_peer *lp = event->md_user_ptr;
2935         struct lnet_ping_buffer *pbuf;
2936         int rc;
2937
2938         /* discovery needs to take another look */
2939         rc = LNET_REDISCOVER_PEER;
2940
2941         CDEBUG(D_NET, "Received event: %d\n", event->type);
2942
2943         switch (event->type) {
2944         case LNET_EVENT_ACK:
2945                 lnet_discovery_event_ack(lp, event);
2946                 break;
2947         case LNET_EVENT_REPLY:
2948                 lnet_discovery_event_reply(lp, event);
2949                 break;
2950         case LNET_EVENT_SEND:
2951                 /* Only send failure triggers a retry. */
2952                 rc = lnet_discovery_event_send(lp, event);
2953                 break;
2954         case LNET_EVENT_UNLINK:
2955                 /* LNetMDUnlink() was called */
2956                 lnet_discovery_event_unlink(lp, event);
2957                 break;
2958         default:
2959                 /* Invalid events. */
2960                 LBUG();
2961         }
2962         lnet_net_lock(LNET_LOCK_EX);
2963         if (event->unlinked) {
2964                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2965                 lnet_ping_buffer_decref(pbuf);
2966                 lnet_peer_decref_locked(lp);
2967         }
2968
2969         /* put peer back at end of request queue, if discovery not already
2970          * done */
2971         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2972             lnet_peer_queue_for_discovery(lp)) {
2973                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2974                 wake_up(&the_lnet.ln_dc_waitq);
2975         }
2976         lnet_net_unlock(LNET_LOCK_EX);
2977 }
2978
2979 /*
2980  * Build a peer from incoming data.
2981  *
2982  * The NIDs in the incoming data are supposed to be structured as follows:
2983  *  - loopback
2984  *  - primary NID
2985  *  - other NIDs in same net
2986  *  - NIDs in second net
2987  *  - NIDs in third net
2988  *  - ...
2989  * This due to the way the list of NIDs in the data is created.
2990  *
2991  * Note that this function will mark the peer uptodate unless an
2992  * ENOMEM is encontered. All other errors are due to a conflict
2993  * between the DLC configuration and what discovery sees. We treat DLC
2994  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2995  * peer from becoming stuck in discovery.
2996  */
2997 static int lnet_peer_merge_data(struct lnet_peer *lp,
2998                                 struct lnet_ping_buffer *pbuf)
2999 {
3000         struct lnet_peer_net *lpn;
3001         struct lnet_peer_ni *lpni;
3002         lnet_nid_t *curnis = NULL;
3003         struct lnet_ni_status *addnis = NULL;
3004         lnet_nid_t *delnis = NULL;
3005         unsigned flags;
3006         int ncurnis;
3007         int naddnis;
3008         int ndelnis;
3009         int nnis = 0;
3010         int i;
3011         int j;
3012         int rc;
3013
3014         flags = LNET_PEER_DISCOVERED;
3015         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3016                 flags |= LNET_PEER_MULTI_RAIL;
3017
3018         /*
3019          * Cache the routing feature for the peer; whether it is enabled
3020          * for disabled as reported by the remote peer.
3021          */
3022         spin_lock(&lp->lp_lock);
3023         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3024                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3025         else
3026                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3027         spin_unlock(&lp->lp_lock);
3028
3029         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
3030         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3031         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3032         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3033         if (!curnis || !addnis || !delnis) {
3034                 rc = -ENOMEM;
3035                 goto out;
3036         }
3037         ncurnis = 0;
3038         naddnis = 0;
3039         ndelnis = 0;
3040
3041         /* Construct the list of NIDs present in peer. */
3042         lpni = NULL;
3043         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3044                 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
3045
3046         /*
3047          * Check for NIDs in pbuf not present in curnis[].
3048          * The loop starts at 1 to skip the loopback NID.
3049          */
3050         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
3051                 for (j = 0; j < ncurnis; j++)
3052                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
3053                                 break;
3054                 if (j == ncurnis)
3055                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
3056         }
3057         /*
3058          * Check for NIDs in curnis[] not present in pbuf.
3059          * The nested loop starts at 1 to skip the loopback NID.
3060          *
3061          * But never add the loopback NID to delnis[]: if it is
3062          * present in curnis[] then this peer is for this node.
3063          */
3064         for (i = 0; i < ncurnis; i++) {
3065                 if (curnis[i] == LNET_NID_LO_0)
3066                         continue;
3067                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
3068                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
3069                                 /*
3070                                  * update the information we cache for the
3071                                  * peer with the latest information we
3072                                  * received
3073                                  */
3074                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
3075                                 if (lpni) {
3076                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
3077                                         lnet_peer_ni_decref_locked(lpni);
3078                                 }
3079                                 break;
3080                         }
3081                 }
3082                 if (j == pbuf->pb_info.pi_nnis)
3083                         delnis[ndelnis++] = curnis[i];
3084         }
3085
3086         /*
3087          * If we get here and the discovery is disabled then we don't want
3088          * to add or delete any NIs. We just updated the ones we have some
3089          * information on, and call it a day
3090          */
3091         rc = 0;
3092         if (lnet_is_discovery_disabled(lp))
3093                 goto out;
3094
3095         for (i = 0; i < naddnis; i++) {
3096                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
3097                 if (rc) {
3098                         CERROR("Error adding NID %s to peer %s: %d\n",
3099                                libcfs_nid2str(addnis[i].ns_nid),
3100                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3101                         if (rc == -ENOMEM)
3102                                 goto out;
3103                 }
3104                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
3105                 if (lpni) {
3106                         lpni->lpni_ns_status = addnis[i].ns_status;
3107                         lnet_peer_ni_decref_locked(lpni);
3108                 }
3109         }
3110
3111         for (i = 0; i < ndelnis; i++) {
3112                 /*
3113                  * for routers it's okay to delete the primary_nid because
3114                  * the upper layers don't really rely on it. So if we're
3115                  * being told that the router changed its primary_nid
3116                  * then it's okay to delete it.
3117                  */
3118                 if (lp->lp_rtr_refcount > 0)
3119                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3120                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
3121                 if (rc) {
3122                         CERROR("Error deleting NID %s from peer %s: %d\n",
3123                                libcfs_nid2str(delnis[i]),
3124                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3125                         if (rc == -ENOMEM)
3126                                 goto out;
3127                 }
3128         }
3129
3130         /* The peer net for the primary NID should be the first entry in the
3131          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3132          * be the first entry in its peer net's lpn_peer_nis list.
3133          */
3134         lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
3135         if (!lpni) {
3136                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3137                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3138                 goto out;
3139         }
3140
3141         lnet_peer_ni_decref_locked(lpni);
3142
3143         lpn = lpni->lpni_peer_net;
3144         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3145                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3146
3147         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3148                 list_move(&lpni->lpni_peer_nis,
3149                           &lpni->lpni_peer_net->lpn_peer_nis);
3150
3151         /*
3152          * Errors other than -ENOMEM are due to peers having been
3153          * configured with DLC. Ignore these because DLC overrides
3154          * Discovery.
3155          */
3156         rc = 0;
3157 out:
3158         CFS_FREE_PTR_ARRAY(curnis, nnis);
3159         CFS_FREE_PTR_ARRAY(addnis, nnis);
3160         CFS_FREE_PTR_ARRAY(delnis, nnis);
3161         lnet_ping_buffer_decref(pbuf);
3162         CDEBUG(D_NET, "peer %s (%p): %d\n",
3163                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3164
3165         if (rc) {
3166                 spin_lock(&lp->lp_lock);
3167                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3168                 lp->lp_state |= LNET_PEER_FORCE_PING;
3169                 spin_unlock(&lp->lp_lock);
3170         }
3171         return rc;
3172 }
3173
3174 /*
3175  * The data in pbuf says lp is its primary peer, but the data was
3176  * received by a different peer. Try to update lp with the data.
3177  */
3178 static int
3179 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3180 {
3181         struct lnet_handle_md mdh;
3182
3183         /* Queue lp for discovery, and force it on the request queue. */
3184         lnet_net_lock(LNET_LOCK_EX);
3185         if (lnet_peer_queue_for_discovery(lp))
3186                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3187         lnet_net_unlock(LNET_LOCK_EX);
3188
3189         LNetInvalidateMDHandle(&mdh);
3190
3191         /*
3192          * Decide whether we can move the peer to the DATA_PRESENT state.
3193          *
3194          * We replace stale data for a multi-rail peer, repair PING_FAILED
3195          * status, and preempt FORCE_PING.
3196          *
3197          * If after that we have DATA_PRESENT, we merge it into this peer.
3198          */
3199         spin_lock(&lp->lp_lock);
3200         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3201                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3202                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3203                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3204                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3205                         lnet_ping_buffer_decref(pbuf);
3206                         pbuf = lp->lp_data;
3207                         lp->lp_data = NULL;
3208                 }
3209         }
3210         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3211                 lnet_ping_buffer_decref(lp->lp_data);
3212                 lp->lp_data = NULL;
3213                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3214         }
3215         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3216                 mdh = lp->lp_ping_mdh;
3217                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3218                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3219                 lp->lp_ping_error = 0;
3220         }
3221         if (lp->lp_state & LNET_PEER_FORCE_PING)
3222                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3223         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3224         spin_unlock(&lp->lp_lock);
3225
3226         if (!LNetMDHandleIsInvalid(mdh))
3227                 LNetMDUnlink(mdh);
3228
3229         if (pbuf)
3230                 return lnet_peer_merge_data(lp, pbuf);
3231
3232         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3233         return 0;
3234 }
3235
3236 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3237 {
3238         int i;
3239
3240         for (i = 0; i < pinfo->pi_nnis; i++) {
3241                 if (pinfo->pi_ni[i].ns_nid == nid)
3242                         return true;
3243         }
3244
3245         return false;
3246 }
3247
3248 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3249  * to the discovery queue a reference was taken that will prevent the peer from
3250  * actually being freed by this function. After this function exits the
3251  * discovery thread should call lnet_peer_discovery_complete() which will
3252  * drop that reference as well as wake any waiters that may also be holding a
3253  * ref on the peer
3254  */
3255 static int lnet_peer_deletion(struct lnet_peer *lp)
3256 __must_hold(&lp->lp_lock)
3257 {
3258         struct list_head rlist;
3259         struct lnet_route *route, *tmp;
3260         int sensitivity = lp->lp_health_sensitivity;
3261
3262         INIT_LIST_HEAD(&rlist);
3263
3264         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3265                           LNET_PEER_FORCE_PUSH);
3266         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3267                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3268
3269         /* no-op if lnet_peer_del() has already been called on this peer */
3270         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3271                 return 0;
3272
3273         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3274                 return -ESHUTDOWN;
3275
3276         spin_unlock(&lp->lp_lock);
3277
3278         mutex_lock(&the_lnet.ln_api_mutex);
3279
3280         lnet_net_lock(LNET_LOCK_EX);
3281         /* remove the peer from the discovery work
3282          * queue if it's on there in preparation
3283          * of deleting it.
3284          */
3285         if (!list_empty(&lp->lp_dc_list))
3286                 list_del_init(&lp->lp_dc_list);
3287         list_for_each_entry_safe(route, tmp,
3288                                  &lp->lp_routes,
3289                                  lr_gwlist)
3290                 lnet_move_route(route, NULL, &rlist);
3291         lnet_net_unlock(LNET_LOCK_EX);
3292
3293         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3294         lnet_peer_del(lp);
3295
3296         list_for_each_entry_safe(route, tmp,
3297                                  &rlist, lr_list) {
3298                 /* re-add these routes */
3299                 lnet_add_route(route->lr_net,
3300                                route->lr_hops,
3301                                &route->lr_nid,
3302                                route->lr_priority,
3303                                sensitivity);
3304                 LIBCFS_FREE(route, sizeof(*route));
3305         }
3306
3307         mutex_unlock(&the_lnet.ln_api_mutex);
3308
3309         spin_lock(&lp->lp_lock);
3310
3311         return 0;
3312 }
3313
3314 /*
3315  * Update a peer using the data received.
3316  */
3317 static int lnet_peer_data_present(struct lnet_peer *lp)
3318 __must_hold(&lp->lp_lock)
3319 {
3320         struct lnet_ping_buffer *pbuf;
3321         struct lnet_peer_ni *lpni;
3322         lnet_nid_t nid = LNET_NID_ANY;
3323         unsigned flags;
3324         int rc = 0;
3325
3326         pbuf = lp->lp_data;
3327         lp->lp_data = NULL;
3328         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3329         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3330         spin_unlock(&lp->lp_lock);
3331
3332         /*
3333          * Modifications of peer structures are done while holding the
3334          * ln_api_mutex. A global lock is required because we may be
3335          * modifying multiple peer structures, and a mutex greatly
3336          * simplifies memory management.
3337          *
3338          * The actual changes to the data structures must also protect
3339          * against concurrent lookups, for which the lnet_net_lock in
3340          * LNET_LOCK_EX mode is used.
3341          */
3342         mutex_lock(&the_lnet.ln_api_mutex);
3343         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3344                 rc = -ESHUTDOWN;
3345                 goto out;
3346         }
3347
3348         /*
3349          * If this peer is not on the peer list then it is being torn
3350          * down, and our reference count may be all that is keeping it
3351          * alive. Don't do any work on it.
3352          */
3353         if (list_empty(&lp->lp_peer_list))
3354                 goto out;
3355
3356         flags = LNET_PEER_DISCOVERED;
3357         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3358                 flags |= LNET_PEER_MULTI_RAIL;
3359
3360         /*
3361          * Check whether the primary NID in the message matches the
3362          * primary NID of the peer. If it does, update the peer, if
3363          * it it does not, check whether there is already a peer with
3364          * that primary NID. If no such peer exists, try to update
3365          * the primary NID of the current peer (allowed if it was
3366          * created due to message traffic) and complete the update.
3367          * If the peer did exist, hand off the data to it.
3368          *
3369          * The peer for the loopback interface is a special case: this
3370          * is the peer for the local node, and we want to set its
3371          * primary NID to the correct value here. Moreover, this peer
3372          * can show up with only the loopback NID in the ping buffer.
3373          */
3374         if (pbuf->pb_info.pi_nnis <= 1)
3375                 goto out;
3376         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3377         if (nid_is_lo0(&lp->lp_primary_nid)) {
3378                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3379                 if (!rc)
3380                         rc = lnet_peer_merge_data(lp, pbuf);
3381         /*
3382          * if the primary nid of the peer is present in the ping info returned
3383          * from the peer, but it's not the local primary peer we have
3384          * cached and discovery is disabled, then we don't want to update
3385          * our local peer info, by adding or removing NIDs, we just want
3386          * to update the status of the nids that we currently have
3387          * recorded in that peer.
3388          */
3389         } else if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid ||
3390                    (lnet_is_nid_in_ping_info(lnet_nid_to_nid4(&lp->lp_primary_nid),
3391                                              &pbuf->pb_info) &&
3392                     lnet_is_discovery_disabled(lp))) {
3393                 rc = lnet_peer_merge_data(lp, pbuf);
3394         } else {
3395                 lpni = lnet_find_peer_ni_locked(nid);
3396                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3397                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3398                         if (rc) {
3399                                 CERROR("Primary NID error %s versus %s: %d\n",
3400                                        libcfs_nidstr(&lp->lp_primary_nid),
3401                                        libcfs_nid2str(nid), rc);
3402                         } else {
3403                                 rc = lnet_peer_merge_data(lp, pbuf);
3404                         }
3405                         if (lpni)
3406                                 lnet_peer_ni_decref_locked(lpni);
3407                 } else {
3408                         struct lnet_peer *new_lp;
3409                         new_lp = lpni->lpni_peer_net->lpn_peer;
3410                         /*
3411                          * if lp has discovery/MR enabled that means new_lp
3412                          * should have discovery/MR enabled as well, since
3413                          * it's the same peer, which we're about to merge
3414                          */
3415                         spin_lock(&lp->lp_lock);
3416                         spin_lock(&new_lp->lp_lock);
3417                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3418                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3419                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3420                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3421                         /* If we're processing a ping reply then we may be
3422                          * about to send a push to the peer that we ping'd.
3423                          * Since the ping reply that we're processing was
3424                          * received by lp, we need to set the discovery source
3425                          * NID for new_lp to the NID stored in lp.
3426                          */
3427                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3428                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3429                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3430                         }
3431                         spin_unlock(&new_lp->lp_lock);
3432                         spin_unlock(&lp->lp_lock);
3433
3434                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3435                         lnet_consolidate_routes_locked(lp, new_lp);
3436                         lnet_peer_ni_decref_locked(lpni);
3437                 }
3438         }
3439 out:
3440         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3441                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3442                lp->lp_state);
3443         mutex_unlock(&the_lnet.ln_api_mutex);
3444
3445         spin_lock(&lp->lp_lock);
3446         /* Tell discovery to re-check the peer immediately. */
3447         if (!rc)
3448                 rc = LNET_REDISCOVER_PEER;
3449         return rc;
3450 }
3451
3452 /*
3453  * A ping failed. Clear the PING_FAILED state and set the
3454  * FORCE_PING state, to ensure a retry even if discovery is
3455  * disabled. This avoids being left with incorrect state.
3456  */
3457 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3458 __must_hold(&lp->lp_lock)
3459 {
3460         struct lnet_handle_md mdh;
3461         int rc;
3462
3463         mdh = lp->lp_ping_mdh;
3464         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3465         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3466         lp->lp_state |= LNET_PEER_FORCE_PING;
3467         rc = lp->lp_ping_error;
3468         lp->lp_ping_error = 0;
3469         spin_unlock(&lp->lp_lock);
3470
3471         if (!LNetMDHandleIsInvalid(mdh))
3472                 LNetMDUnlink(mdh);
3473
3474         CDEBUG(D_NET, "peer %s:%d\n",
3475                libcfs_nidstr(&lp->lp_primary_nid), rc);
3476
3477         spin_lock(&lp->lp_lock);
3478         return rc ? rc : LNET_REDISCOVER_PEER;
3479 }
3480
3481 /* Active side of ping. */
3482 static int lnet_peer_send_ping(struct lnet_peer *lp)
3483 __must_hold(&lp->lp_lock)
3484 {
3485         int nnis;
3486         int rc;
3487         int cpt;
3488
3489         lp->lp_state |= LNET_PEER_PING_SENT;
3490         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3491         spin_unlock(&lp->lp_lock);
3492
3493         cpt = lnet_net_lock_current();
3494         /* Refcount for MD. */
3495         lnet_peer_addref_locked(lp);
3496         lnet_net_unlock(cpt);
3497
3498         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3499
3500         rc = lnet_send_ping(lnet_nid_to_nid4(&lp->lp_primary_nid),
3501                             &lp->lp_ping_mdh, nnis, lp,
3502                             the_lnet.ln_dc_handler, false);
3503
3504         /*
3505          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3506          * refcount on the peer, otherwise LNetMDUnlink will be called
3507          * which will eventually do that.
3508          */
3509         if (rc > 0) {
3510                 lnet_net_lock(cpt);
3511                 lnet_peer_decref_locked(lp);
3512                 lnet_net_unlock(cpt);
3513                 rc = -rc; /* change the rc to negative value */
3514                 goto fail_error;
3515         } else if (rc < 0) {
3516                 goto fail_error;
3517         }
3518
3519         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3520
3521         spin_lock(&lp->lp_lock);
3522         return 0;
3523
3524 fail_error:
3525         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3526         /*
3527          * The errors that get us here are considered hard errors and
3528          * cause Discovery to terminate. So we clear PING_SENT, but do
3529          * not set either PING_FAILED or FORCE_PING. In fact we need
3530          * to clear PING_FAILED, because the unlink event handler will
3531          * have set it if we called LNetMDUnlink() above.
3532          */
3533         spin_lock(&lp->lp_lock);
3534         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3535         return rc;
3536 }
3537
3538 /*
3539  * This function exists because you cannot call LNetMDUnlink() from an
3540  * event handler.
3541  */
3542 static int lnet_peer_push_failed(struct lnet_peer *lp)
3543 __must_hold(&lp->lp_lock)
3544 {
3545         struct lnet_handle_md mdh;
3546         int rc;
3547
3548         mdh = lp->lp_push_mdh;
3549         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3550         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3551         rc = lp->lp_push_error;
3552         lp->lp_push_error = 0;
3553         spin_unlock(&lp->lp_lock);
3554
3555         if (!LNetMDHandleIsInvalid(mdh))
3556                 LNetMDUnlink(mdh);
3557
3558         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3559         spin_lock(&lp->lp_lock);
3560         return rc ? rc : LNET_REDISCOVER_PEER;
3561 }
3562
3563 /*
3564  * Mark the peer as discovered.
3565  */
3566 static int lnet_peer_discovered(struct lnet_peer *lp)
3567 __must_hold(&lp->lp_lock)
3568 {
3569         lp->lp_state |= LNET_PEER_DISCOVERED;
3570         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3571                           LNET_PEER_REDISCOVER);
3572
3573         lp->lp_dc_error = 0;
3574
3575         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3576
3577         return 0;
3578 }
3579
3580 /* Active side of push. */
3581 static int lnet_peer_send_push(struct lnet_peer *lp)
3582 __must_hold(&lp->lp_lock)
3583 {
3584         struct lnet_ping_buffer *pbuf;
3585         struct lnet_process_id id;
3586         struct lnet_md md;
3587         int cpt;
3588         int rc;
3589
3590         /* Don't push to a non-multi-rail peer. */
3591         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3592                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3593                 /* if peer's NIDs are uptodate then peer is discovered */
3594                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3595                         rc = lnet_peer_discovered(lp);
3596                         return rc;
3597                 }
3598
3599                 return 0;
3600         }
3601
3602         lp->lp_state |= LNET_PEER_PUSH_SENT;
3603         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3604         spin_unlock(&lp->lp_lock);
3605
3606         cpt = lnet_net_lock_current();
3607         pbuf = the_lnet.ln_ping_target;
3608         lnet_ping_buffer_addref(pbuf);
3609         lnet_net_unlock(cpt);
3610
3611         /* Push source MD */
3612         md.start     = &pbuf->pb_info;
3613         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3614         md.threshold = 2; /* Put/Ack */
3615         md.max_size  = 0;
3616         md.options   = LNET_MD_TRACK_RESPONSE;
3617         md.handler   = the_lnet.ln_dc_handler;
3618         md.user_ptr  = lp;
3619
3620         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3621         if (rc) {
3622                 lnet_ping_buffer_decref(pbuf);
3623                 CERROR("Can't bind push source MD: %d\n", rc);
3624                 goto fail_error;
3625         }
3626
3627         cpt = lnet_net_lock_current();
3628         /* Refcount for MD. */
3629         lnet_peer_addref_locked(lp);
3630         id.pid = LNET_PID_LUSTRE;
3631         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3632                 id.nid = lnet_nid_to_nid4(&lp->lp_disc_dst_nid);
3633         else
3634                 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
3635         lnet_net_unlock(cpt);
3636
3637         rc = LNetPut(lnet_nid_to_nid4(&lp->lp_disc_src_nid), lp->lp_push_mdh,
3638                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3639                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3640
3641         /*
3642          * reset the discovery nid. There is no need to restrict sending
3643          * from that source, if we call lnet_push_update_to_peers(). It'll
3644          * get set to a specific NID, if we initiate discovery from the
3645          * scratch
3646          */
3647         lp->lp_disc_src_nid = LNET_ANY_NID;
3648         lp->lp_disc_dst_nid = LNET_ANY_NID;
3649
3650         if (rc)
3651                 goto fail_unlink;
3652
3653         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3654
3655         spin_lock(&lp->lp_lock);
3656         return 0;
3657
3658 fail_unlink:
3659         LNetMDUnlink(lp->lp_push_mdh);
3660         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3661 fail_error:
3662         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3663                lp, rc);
3664         /*
3665          * The errors that get us here are considered hard errors and
3666          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3667          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3668          * because the unlink event handler will have set it if we
3669          * called LNetMDUnlink() above.
3670          */
3671         spin_lock(&lp->lp_lock);
3672         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3673         return rc;
3674 }
3675
3676 /*
3677  * An unrecoverable error was encountered during discovery.
3678  * Set error status in peer and abort discovery.
3679  */
3680 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3681 {
3682         CDEBUG(D_NET, "Discovery error %s: %d\n",
3683                libcfs_nidstr(&lp->lp_primary_nid), error);
3684
3685         spin_lock(&lp->lp_lock);
3686         lp->lp_dc_error = error;
3687         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3688         lp->lp_state |= LNET_PEER_REDISCOVER;
3689         spin_unlock(&lp->lp_lock);
3690 }
3691
3692 /*
3693  * Wait for work to be queued or some other change that must be
3694  * attended to. Returns non-zero if the discovery thread should shut
3695  * down.
3696  */
3697 static int lnet_peer_discovery_wait_for_work(void)
3698 {
3699         int cpt;
3700         int rc = 0;
3701
3702         DEFINE_WAIT(wait);
3703
3704         cpt = lnet_net_lock_current();
3705         for (;;) {
3706                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3707                                 TASK_INTERRUPTIBLE);
3708                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3709                         break;
3710                 if (lnet_push_target_resize_needed() ||
3711                     the_lnet.ln_push_target->pb_needs_post)
3712                         break;
3713                 if (!list_empty(&the_lnet.ln_dc_request))
3714                         break;
3715                 if (!list_empty(&the_lnet.ln_msg_resend))
3716                         break;
3717                 lnet_net_unlock(cpt);
3718
3719                 /*
3720                  * wakeup max every second to check if there are peers that
3721                  * have been stuck on the working queue for greater than
3722                  * the peer timeout.
3723                  */
3724                 schedule_timeout(cfs_time_seconds(1));
3725                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3726                 cpt = lnet_net_lock_current();
3727         }
3728         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3729
3730         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3731                 rc = -ESHUTDOWN;
3732
3733         lnet_net_unlock(cpt);
3734
3735         CDEBUG(D_NET, "woken: %d\n", rc);
3736
3737         return rc;
3738 }
3739
3740 /*
3741  * Messages that were pending on a destroyed peer will be put on a global
3742  * resend list. The message resend list will be checked by
3743  * the discovery thread when it wakes up, and will resend messages. These
3744  * messages can still be sendable in the case the lpni which was the initial
3745  * cause of the message re-queue was transfered to another peer.
3746  *
3747  * It is possible that LNet could be shutdown while we're iterating
3748  * through the list. lnet_shudown_lndnets() will attempt to access the
3749  * resend list, but will have to wait until the spinlock is released, by
3750  * which time there shouldn't be any more messages on the resend list.
3751  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3752  * for the messages so they can be released. The other case is that
3753  * lnet_shudown_lndnets() can finalize all the messages before this
3754  * function can visit the resend list, in which case this function will be
3755  * a no-op.
3756  */
3757 static void lnet_resend_msgs(void)
3758 {
3759         struct lnet_msg *msg, *tmp;
3760         LIST_HEAD(resend);
3761         int rc;
3762
3763         spin_lock(&the_lnet.ln_msg_resend_lock);
3764         list_splice(&the_lnet.ln_msg_resend, &resend);
3765         spin_unlock(&the_lnet.ln_msg_resend_lock);
3766
3767         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3768                 list_del_init(&msg->msg_list);
3769                 rc = lnet_send(msg->msg_src_nid_param, msg,
3770                                msg->msg_rtr_nid_param);
3771                 if (rc < 0) {
3772                         CNETERR("Error sending %s to %s: %d\n",
3773                                lnet_msgtyp2str(msg->msg_type),
3774                                libcfs_id2str(msg->msg_target), rc);
3775                         lnet_finalize(msg, rc);
3776                 }
3777         }
3778 }
3779
3780 /* The discovery thread. */
3781 static int lnet_peer_discovery(void *arg)
3782 {
3783         struct lnet_peer *lp;
3784         int rc;
3785
3786         wait_for_completion(&the_lnet.ln_started);
3787
3788         CDEBUG(D_NET, "started\n");
3789
3790         for (;;) {
3791                 if (lnet_peer_discovery_wait_for_work())
3792                         break;
3793
3794                 if (lnet_push_target_resize_needed())
3795                         lnet_push_target_resize();
3796                 else if (the_lnet.ln_push_target->pb_needs_post)
3797                         lnet_push_target_post(the_lnet.ln_push_target,
3798                                               &the_lnet.ln_push_target_md);
3799
3800                 lnet_resend_msgs();
3801
3802                 lnet_net_lock(LNET_LOCK_EX);
3803                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3804                         lnet_net_unlock(LNET_LOCK_EX);
3805                         break;
3806                 }
3807
3808                 /*
3809                  * Process all incoming discovery work requests.  When
3810                  * discovery must wait on a peer to change state, it
3811                  * is added to the tail of the ln_dc_working queue. A
3812                  * timestamp keeps track of when the peer was added,
3813                  * so we can time out discovery requests that take too
3814                  * long.
3815                  */
3816                 while (!list_empty(&the_lnet.ln_dc_request)) {
3817                         lp = list_first_entry(&the_lnet.ln_dc_request,
3818                                               struct lnet_peer, lp_dc_list);
3819                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3820                         /*
3821                          * set the time the peer was put on the dc_working
3822                          * queue. It shouldn't remain on the queue
3823                          * forever, in case the GET message (for ping)
3824                          * doesn't get a REPLY or the PUT message (for
3825                          * push) doesn't get an ACK.
3826                          */
3827                         lp->lp_last_queued = ktime_get_real_seconds();
3828                         lnet_net_unlock(LNET_LOCK_EX);
3829
3830                         if (lnet_push_target_resize_needed())
3831                                 lnet_push_target_resize();
3832                         else if (the_lnet.ln_push_target->pb_needs_post)
3833                                 lnet_push_target_post(the_lnet.ln_push_target,
3834                                                       &the_lnet.ln_push_target_md);
3835
3836                         /*
3837                          * Select an action depending on the state of
3838                          * the peer and whether discovery is disabled.
3839                          * The check whether discovery is disabled is
3840                          * done after the code that handles processing
3841                          * for arrived data, cleanup for failures, and
3842                          * forcing a Ping or Push.
3843                          */
3844                         spin_lock(&lp->lp_lock);
3845                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3846                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3847                                 lp->lp_state);
3848                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3849                                             LNET_PEER_MARK_DELETED))
3850                                 rc = lnet_peer_deletion(lp);
3851                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3852                                 rc = lnet_peer_data_present(lp);
3853                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3854                                 rc = lnet_peer_ping_failed(lp);
3855                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3856                                 rc = lnet_peer_push_failed(lp);
3857                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3858                                 rc = lnet_peer_send_ping(lp);
3859                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3860                                 rc = lnet_peer_send_push(lp);
3861                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3862                                 rc = lnet_peer_send_ping(lp);
3863                         else if (lnet_peer_needs_push(lp))
3864                                 rc = lnet_peer_send_push(lp);
3865                         else
3866                                 rc = lnet_peer_discovered(lp);
3867                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3868                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3869                                 lp->lp_state, rc);
3870                         spin_unlock(&lp->lp_lock);
3871
3872                         lnet_net_lock(LNET_LOCK_EX);
3873                         if (rc == LNET_REDISCOVER_PEER) {
3874                                 list_move(&lp->lp_dc_list,
3875                                           &the_lnet.ln_dc_request);
3876                         } else if (rc) {
3877                                 lnet_peer_discovery_error(lp, rc);
3878                         }
3879                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3880                                 lnet_peer_discovery_complete(lp);
3881                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3882                                 break;
3883
3884                 }
3885
3886                 lnet_net_unlock(LNET_LOCK_EX);
3887         }
3888
3889         CDEBUG(D_NET, "stopping\n");
3890         /*
3891          * Clean up before telling lnet_peer_discovery_stop() that
3892          * we're done. Use wake_up() below to somewhat reduce the
3893          * size of the thundering herd if there are multiple threads
3894          * waiting on discovery of a single peer.
3895          */
3896
3897         /* Queue cleanup 1: stop all pending pings and pushes. */
3898         lnet_net_lock(LNET_LOCK_EX);
3899         while (!list_empty(&the_lnet.ln_dc_working)) {
3900                 lp = list_first_entry(&the_lnet.ln_dc_working,
3901                                       struct lnet_peer, lp_dc_list);
3902                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3903                 lnet_net_unlock(LNET_LOCK_EX);
3904                 lnet_peer_cancel_discovery(lp);
3905                 lnet_net_lock(LNET_LOCK_EX);
3906         }
3907         lnet_net_unlock(LNET_LOCK_EX);
3908
3909         /* Queue cleanup 2: wait for the expired queue to clear. */
3910         while (!list_empty(&the_lnet.ln_dc_expired))
3911                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3912
3913         /* Queue cleanup 3: clear the request queue. */
3914         lnet_net_lock(LNET_LOCK_EX);
3915         while (!list_empty(&the_lnet.ln_dc_request)) {
3916                 lp = list_first_entry(&the_lnet.ln_dc_request,
3917                                       struct lnet_peer, lp_dc_list);
3918                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3919                 lnet_peer_discovery_complete(lp);
3920         }
3921         lnet_net_unlock(LNET_LOCK_EX);
3922
3923         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3924         the_lnet.ln_dc_handler = NULL;
3925
3926         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3927         wake_up(&the_lnet.ln_dc_waitq);
3928
3929         CDEBUG(D_NET, "stopped\n");
3930
3931         return 0;
3932 }
3933
3934 /* ln_api_mutex is held on entry. */
3935 int lnet_peer_discovery_start(void)
3936 {
3937         struct task_struct *task;
3938         int rc = 0;
3939
3940         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3941                 return -EALREADY;
3942
3943         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3944         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3945         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3946         if (IS_ERR(task)) {
3947                 rc = PTR_ERR(task);
3948                 CERROR("Can't start peer discovery thread: %d\n", rc);
3949
3950                 the_lnet.ln_dc_handler = NULL;
3951
3952                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3953         }
3954
3955         CDEBUG(D_NET, "discovery start: %d\n", rc);
3956
3957         return rc;
3958 }
3959
3960 /* ln_api_mutex is held on entry. */
3961 void lnet_peer_discovery_stop(void)
3962 {
3963         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3964                 return;
3965
3966         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3967         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3968
3969         /* In the LNetNIInit() path we may be stopping discovery before it
3970          * entered its work loop
3971          */
3972         if (!completion_done(&the_lnet.ln_started))
3973                 complete(&the_lnet.ln_started);
3974         else
3975                 wake_up(&the_lnet.ln_dc_waitq);
3976
3977         wait_event(the_lnet.ln_dc_waitq,
3978                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3979
3980         LASSERT(list_empty(&the_lnet.ln_dc_request));
3981         LASSERT(list_empty(&the_lnet.ln_dc_working));
3982         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3983
3984         CDEBUG(D_NET, "discovery stopped\n");
3985 }
3986
3987 /* Debugging */
3988
3989 void
3990 lnet_debug_peer(lnet_nid_t nid)
3991 {
3992         char                    *aliveness = "NA";
3993         struct lnet_peer_ni     *lp;
3994         int                     cpt;
3995
3996         cpt = lnet_cpt_of_nid(nid, NULL);
3997         lnet_net_lock(cpt);
3998
3999         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
4000         if (IS_ERR(lp)) {
4001                 lnet_net_unlock(cpt);
4002                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
4003                 return;
4004         }
4005
4006         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4007                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4008
4009         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4010                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4011                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4012                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4013                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4014
4015         lnet_peer_ni_decref_locked(lp);
4016
4017         lnet_net_unlock(cpt);
4018 }
4019
4020 /* Gathering information for userspace. */
4021
4022 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4023                           char aliveness[LNET_MAX_STR_LEN],
4024                           __u32 *cpt_iter, __u32 *refcount,
4025                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4026                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4027                           __u32 *peer_tx_qnob)
4028 {
4029         struct lnet_peer_table          *peer_table;
4030         struct lnet_peer_ni             *lp;
4031         int                             j;
4032         int                             lncpt;
4033         bool                            found = false;
4034
4035         /* get the number of CPTs */
4036         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4037
4038         /* if the cpt number to be examined is >= the number of cpts in
4039          * the system then indicate that there are no more cpts to examin
4040          */
4041         if (*cpt_iter >= lncpt)
4042                 return -ENOENT;
4043
4044         /* get the current table */
4045         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4046         /* if the ptable is NULL then there are no more cpts to examine */
4047         if (peer_table == NULL)
4048                 return -ENOENT;
4049
4050         lnet_net_lock(*cpt_iter);
4051
4052         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4053                 struct list_head *peers = &peer_table->pt_hash[j];
4054
4055                 list_for_each_entry(lp, peers, lpni_hashlist) {
4056                         if (!nid_is_nid4(&lp->lpni_nid))
4057                                 continue;
4058                         if (peer_index-- > 0)
4059                                 continue;
4060
4061                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4062                         if (lnet_isrouter(lp) ||
4063                                 lnet_peer_aliveness_enabled(lp))
4064                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4065                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4066
4067                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4068                         *refcount = kref_read(&lp->lpni_kref);
4069                         *ni_peer_tx_credits =
4070                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4071                         *peer_tx_credits = lp->lpni_txcredits;
4072                         *peer_rtr_credits = lp->lpni_rtrcredits;
4073                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4074                         *peer_tx_qnob = lp->lpni_txqnob;
4075
4076                         found = true;
4077                 }
4078
4079         }
4080         lnet_net_unlock(*cpt_iter);
4081
4082         *cpt_iter = lncpt;
4083
4084         return found ? 0 : -ENOENT;
4085 }
4086
4087 /* ln_api_mutex is held, which keeps the peer list stable */
4088 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4089 {
4090         struct lnet_ioctl_element_stats *lpni_stats;
4091         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4092         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4093         struct lnet_peer_ni_credit_info *lpni_info;
4094         struct lnet_peer_ni *lpni;
4095         struct lnet_peer *lp;
4096         lnet_nid_t nid;
4097         __u32 size;
4098         int rc;
4099
4100         lp = lnet_find_peer(cfg->prcfg_prim_nid);
4101
4102         if (!lp) {
4103                 rc = -ENOENT;
4104                 goto out;
4105         }
4106
4107         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4108                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4109         size *= lp->lp_nnis;
4110         if (size > cfg->prcfg_size) {
4111                 cfg->prcfg_size = size;
4112                 rc = -E2BIG;
4113                 goto out_lp_decref;
4114         }
4115
4116         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4117         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4118         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4119         cfg->prcfg_count = lp->lp_nnis;
4120         cfg->prcfg_size = size;
4121         cfg->prcfg_state = lp->lp_state;
4122
4123         /* Allocate helper buffers. */
4124         rc = -ENOMEM;
4125         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4126         if (!lpni_info)
4127                 goto out_lp_decref;
4128         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4129         if (!lpni_stats)
4130                 goto out_free_info;
4131         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4132         if (!lpni_msg_stats)
4133                 goto out_free_stats;
4134         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4135         if (!lpni_hstats)
4136                 goto out_free_msg_stats;
4137
4138
4139         lpni = NULL;
4140         rc = -EFAULT;
4141         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4142                 if (!nid_is_nid4(&lpni->lpni_nid))
4143                         continue;
4144                 nid = lnet_nid_to_nid4(&lpni->lpni_nid);
4145                 if (copy_to_user(bulk, &nid, sizeof(nid)))
4146                         goto out_free_hstats;
4147                 bulk += sizeof(nid);
4148
4149                 memset(lpni_info, 0, sizeof(*lpni_info));
4150                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4151                 if (lnet_isrouter(lpni) ||
4152                         lnet_peer_aliveness_enabled(lpni))
4153                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4154                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4155
4156                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4157                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4158                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4159                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4160                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4161                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4162                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4163                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4164                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4165                         goto out_free_hstats;
4166                 bulk += sizeof(*lpni_info);
4167
4168                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4169                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4170                                                             LNET_STATS_TYPE_SEND);
4171                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4172                                                             LNET_STATS_TYPE_RECV);
4173                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4174                                                             LNET_STATS_TYPE_DROP);
4175                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4176                         goto out_free_hstats;
4177                 bulk += sizeof(*lpni_stats);
4178                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4179                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4180                         goto out_free_hstats;
4181                 bulk += sizeof(*lpni_msg_stats);
4182                 lpni_hstats->hlpni_network_timeout =
4183                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4184                 lpni_hstats->hlpni_remote_dropped =
4185                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4186                 lpni_hstats->hlpni_remote_timeout =
4187                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4188                 lpni_hstats->hlpni_remote_error =
4189                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4190                 lpni_hstats->hlpni_health_value =
4191                   atomic_read(&lpni->lpni_healthv);
4192                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4193                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4194                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4195                         goto out_free_hstats;
4196                 bulk += sizeof(*lpni_hstats);
4197         }
4198         rc = 0;
4199
4200 out_free_hstats:
4201         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4202 out_free_msg_stats:
4203         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4204 out_free_stats:
4205         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4206 out_free_info:
4207         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4208 out_lp_decref:
4209         lnet_peer_decref_locked(lp);
4210 out:
4211         return rc;
4212 }
4213
4214 /* must hold net_lock/0 */
4215 void
4216 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4217                                      struct list_head *recovery_queue,
4218                                      time64_t now)
4219 {
4220         /* the mt could've shutdown and cleaned up the queues */
4221         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4222                 return;
4223
4224         if (!list_empty(&lpni->lpni_recovery))
4225                 return;
4226
4227         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4228                 return;
4229
4230         if (!lpni->lpni_last_alive) {
4231                 CDEBUG(D_NET,
4232                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4233                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4234                        lpni->lpni_last_alive);
4235                 return;
4236         }
4237
4238         if (lnet_recovery_limit &&
4239             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4240                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4241                        libcfs_nidstr(&lpni->lpni_nid),
4242                        lpni->lpni_last_alive);
4243                 /* Reset the ping count so that if this peer NI is added back to
4244                  * the recovery queue we will send the first ping right away.
4245                  */
4246                 lpni->lpni_ping_count = 0;
4247                 return;
4248         }
4249
4250         /* This peer NI is going on the recovery queue, so take a ref on it */
4251         lnet_peer_ni_addref_locked(lpni);
4252
4253         lnet_peer_ni_set_next_ping(lpni, now);
4254
4255         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4256                libcfs_nidstr(&lpni->lpni_nid),
4257                lpni->lpni_ping_count,
4258                lpni->lpni_next_ping,
4259                lpni->lpni_last_alive,
4260                atomic_read(&lpni->lpni_healthv));
4261
4262         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4263 }
4264
4265 /* Call with the ln_api_mutex held */
4266 void
4267 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4268 {
4269         struct lnet_peer_table *ptable;
4270         struct lnet_peer *lp;
4271         struct lnet_peer_net *lpn;
4272         struct lnet_peer_ni *lpni;
4273         int lncpt;
4274         int cpt;
4275         time64_t now;
4276
4277         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4278                 return;
4279
4280         now = ktime_get_seconds();
4281
4282         if (!all) {
4283                 lnet_net_lock(LNET_LOCK_EX);
4284                 lpni = lnet_find_peer_ni_locked(nid);
4285                 if (!lpni) {
4286                         lnet_net_unlock(LNET_LOCK_EX);
4287                         return;
4288                 }
4289                 lnet_set_lpni_healthv_locked(lpni, value);
4290                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4291                                              &the_lnet.ln_mt_peerNIRecovq, now);
4292                 lnet_peer_ni_decref_locked(lpni);
4293                 lnet_net_unlock(LNET_LOCK_EX);
4294                 return;
4295         }
4296
4297         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4298
4299         /*
4300          * Walk all the peers and reset the health value for each one to the
4301          * specified value.
4302          */
4303         lnet_net_lock(LNET_LOCK_EX);
4304         for (cpt = 0; cpt < lncpt; cpt++) {
4305                 ptable = the_lnet.ln_peer_tables[cpt];
4306                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4307                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4308                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4309                                                     lpni_peer_nis) {
4310                                         lnet_set_lpni_healthv_locked(lpni,
4311                                                                      value);
4312                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4313                                              &the_lnet.ln_mt_peerNIRecovq, now);
4314                                 }
4315                         }
4316                 }
4317         }
4318         lnet_net_unlock(LNET_LOCK_EX);
4319 }
4320