Whamcloud - gitweb
LU-16709 lnet: fix locking multiple NIDs of the MR peer
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
51                             unsigned int flags);
52
53 static void
54 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
55 {
56         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
57                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
58                 lnet_peer_ni_decref_locked(lpni);
59         }
60 }
61
62 void
63 lnet_peer_net_added(struct lnet_net *net)
64 {
65         struct lnet_peer_ni *lpni, *tmp;
66
67         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
68                                  lpni_on_remote_peer_ni_list) {
69
70                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
71                         lpni->lpni_net = net;
72
73                         spin_lock(&lpni->lpni_lock);
74                         lpni->lpni_txcredits =
75                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
76                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
77                         lpni->lpni_rtrcredits =
78                                 lnet_peer_buffer_credits(lpni->lpni_net);
79                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
80                         spin_unlock(&lpni->lpni_lock);
81
82                         lnet_peer_remove_from_remote_list(lpni);
83                 }
84         }
85 }
86
87 static void
88 lnet_peer_tables_destroy(void)
89 {
90         struct lnet_peer_table  *ptable;
91         struct list_head        *hash;
92         int                     i;
93         int                     j;
94
95         if (!the_lnet.ln_peer_tables)
96                 return;
97
98         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
99                 hash = ptable->pt_hash;
100                 if (!hash) /* not intialized */
101                         break;
102
103                 LASSERT(list_empty(&ptable->pt_zombie_list));
104
105                 ptable->pt_hash = NULL;
106                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
107                         LASSERT(list_empty(&hash[j]));
108
109                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110         }
111
112         cfs_percpt_free(the_lnet.ln_peer_tables);
113         the_lnet.ln_peer_tables = NULL;
114 }
115
116 int
117 lnet_peer_tables_create(void)
118 {
119         struct lnet_peer_table  *ptable;
120         struct list_head        *hash;
121         int                     i;
122         int                     j;
123
124         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
125                                                    sizeof(*ptable));
126         if (the_lnet.ln_peer_tables == NULL) {
127                 CERROR("Failed to allocate cpu-partition peer tables\n");
128                 return -ENOMEM;
129         }
130
131         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
132                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
133                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
134                 if (hash == NULL) {
135                         CERROR("Failed to create peer hash table\n");
136                         lnet_peer_tables_destroy();
137                         return -ENOMEM;
138                 }
139
140                 spin_lock_init(&ptable->pt_zombie_lock);
141                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
142
143                 INIT_LIST_HEAD(&ptable->pt_peer_list);
144
145                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
146                         INIT_LIST_HEAD(&hash[j]);
147                 ptable->pt_hash = hash; /* sign of initialization */
148         }
149
150         return 0;
151 }
152
153 static struct lnet_peer_ni *
154 lnet_peer_ni_alloc(struct lnet_nid *nid)
155 {
156         struct lnet_peer_ni *lpni;
157         struct lnet_net *net;
158         int cpt;
159
160         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
161
162         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
163         if (!lpni)
164                 return NULL;
165
166         INIT_LIST_HEAD(&lpni->lpni_txq);
167         INIT_LIST_HEAD(&lpni->lpni_hashlist);
168         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
169         INIT_LIST_HEAD(&lpni->lpni_recovery);
170         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
171         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
172         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
173         kref_init(&lpni->lpni_kref);
174         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
175
176         spin_lock_init(&lpni->lpni_lock);
177
178         if (lnet_peers_start_down())
179                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
180         else
181                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
182         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
183         lpni->lpni_nid = *nid;
184         lpni->lpni_cpt = cpt;
185         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
186
187         net = lnet_get_net_locked(LNET_NID_NET(nid));
188         lpni->lpni_net = net;
189         if (net) {
190                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
191                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
192                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
193                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
194         } else {
195                 /*
196                  * This peer_ni is not on a local network, so we
197                  * cannot add the credits here. In case the net is
198                  * added later, add the peer_ni to the remote peer ni
199                  * list so it can be easily found and revisited.
200                  */
201                 /* FIXME: per-net implementation instead? */
202                 lnet_peer_ni_addref_locked(lpni);
203                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
204                               &the_lnet.ln_remote_peer_ni_list);
205         }
206
207         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
208
209         return lpni;
210 }
211
212 static struct lnet_peer_net *
213 lnet_peer_net_alloc(__u32 net_id)
214 {
215         struct lnet_peer_net *lpn;
216
217         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
218         if (!lpn)
219                 return NULL;
220
221         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
222         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
223         lpn->lpn_net_id = net_id;
224         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
225
226         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
227
228         return lpn;
229 }
230
231 void
232 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
233 {
234         struct lnet_peer *lp;
235
236         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
237
238         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
239         LASSERT(list_empty(&lpn->lpn_peer_nis));
240         LASSERT(list_empty(&lpn->lpn_peer_nets));
241         lp = lpn->lpn_peer;
242         lpn->lpn_peer = NULL;
243         LIBCFS_FREE(lpn, sizeof(*lpn));
244
245         lnet_peer_decref_locked(lp);
246 }
247
248 static struct lnet_peer *
249 lnet_peer_alloc(struct lnet_nid *nid)
250 {
251         struct lnet_peer *lp;
252
253         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
254         if (!lp)
255                 return NULL;
256
257         INIT_LIST_HEAD(&lp->lp_rtrq);
258         INIT_LIST_HEAD(&lp->lp_routes);
259         INIT_LIST_HEAD(&lp->lp_peer_list);
260         INIT_LIST_HEAD(&lp->lp_peer_nets);
261         INIT_LIST_HEAD(&lp->lp_dc_list);
262         INIT_LIST_HEAD(&lp->lp_dc_pendq);
263         INIT_LIST_HEAD(&lp->lp_rtr_list);
264         init_waitqueue_head(&lp->lp_dc_waitq);
265         spin_lock_init(&lp->lp_lock);
266         lp->lp_primary_nid = *nid;
267         lp->lp_disc_src_nid = LNET_ANY_NID;
268         lp->lp_disc_dst_nid = LNET_ANY_NID;
269         if (lnet_peers_start_down())
270                 lp->lp_alive = false;
271         else
272                 lp->lp_alive = true;
273
274         /*
275          * all peers created on a router should have health on
276          * if it's not already on.
277          */
278         if (the_lnet.ln_routing && !lnet_health_sensitivity)
279                 lp->lp_health_sensitivity = 1;
280
281         /*
282          * Turn off discovery for loopback peer. If you're creating a peer
283          * for the loopback interface then that was initiated when we
284          * attempted to send a message over the loopback. There is no need
285          * to ever use a different interface when sending messages to
286          * myself.
287          */
288         if (nid_is_lo0(nid))
289                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
290         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
291
292         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
293
294         return lp;
295 }
296
297 void
298 lnet_destroy_peer_locked(struct lnet_peer *lp)
299 {
300         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
301
302         LASSERT(atomic_read(&lp->lp_refcount) == 0);
303         LASSERT(lp->lp_rtr_refcount == 0);
304         LASSERT(list_empty(&lp->lp_peer_nets));
305         LASSERT(list_empty(&lp->lp_peer_list));
306         LASSERT(list_empty(&lp->lp_dc_list));
307
308         if (lp->lp_data)
309                 lnet_ping_buffer_decref(lp->lp_data);
310
311         /*
312          * if there are messages still on the pending queue, then make
313          * sure to queue them on the ln_msg_resend list so they can be
314          * resent at a later point if the discovery thread is still
315          * running.
316          * If the discovery thread has stopped, then the wakeup will be a
317          * no-op, and it is expected the lnet_shutdown_lndnets() will
318          * eventually be called, which will traverse this list and
319          * finalize the messages on the list.
320          * We can not resend them now because we're holding the cpt lock.
321          * Releasing the lock can cause an inconsistent state
322          */
323         spin_lock(&the_lnet.ln_msg_resend_lock);
324         spin_lock(&lp->lp_lock);
325         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
326         spin_unlock(&lp->lp_lock);
327         spin_unlock(&the_lnet.ln_msg_resend_lock);
328         wake_up(&the_lnet.ln_dc_waitq);
329
330         LIBCFS_FREE(lp, sizeof(*lp));
331 }
332
333 /*
334  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
335  * that peer_net, detach the peer_net from the peer.
336  *
337  * Call with lnet_net_lock/EX held
338  */
339 static void
340 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
341 {
342         struct lnet_peer_table *ptable;
343         struct lnet_peer_net *lpn;
344         struct lnet_peer *lp;
345
346         /*
347          * Belts and suspenders: gracefully handle teardown of a
348          * partially connected peer_ni.
349          */
350         lpn = lpni->lpni_peer_net;
351
352         list_del_init(&lpni->lpni_peer_nis);
353         /*
354          * If there are no lpni's left, we detach lpn from
355          * lp_peer_nets, so it cannot be found anymore.
356          */
357         if (list_empty(&lpn->lpn_peer_nis))
358                 list_del_init(&lpn->lpn_peer_nets);
359
360         /* Update peer NID count. */
361         lp = lpn->lpn_peer;
362         lp->lp_nnis--;
363
364         /*
365          * If there are no more peer nets, make the peer unfindable
366          * via the peer_tables.
367          *
368          * Otherwise, if the peer is DISCOVERED, tell discovery to
369          * take another look at it. This is a no-op if discovery for
370          * this peer did the detaching.
371          */
372         if (list_empty(&lp->lp_peer_nets)) {
373                 list_del_init(&lp->lp_peer_list);
374                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
375                 ptable->pt_peers--;
376         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
377                 /* Discovery isn't running, nothing to do here. */
378         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
379                 lnet_peer_queue_for_discovery(lp);
380                 wake_up(&the_lnet.ln_dc_waitq);
381         }
382         CDEBUG(D_NET, "peer %s NID %s\n",
383                 libcfs_nidstr(&lp->lp_primary_nid),
384                 libcfs_nidstr(&lpni->lpni_nid));
385 }
386
387 /* called with lnet_net_lock LNET_LOCK_EX held */
388 static int
389 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
390 {
391         struct lnet_peer_table *ptable = NULL;
392
393         /* don't remove a peer_ni if it's also a gateway */
394         if (lnet_isrouter(lpni) && !force) {
395                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
396                        libcfs_nidstr(&lpni->lpni_nid));
397                 return -EBUSY;
398         }
399
400         lnet_peer_remove_from_remote_list(lpni);
401
402         /* remove peer ni from the hash list. */
403         list_del_init(&lpni->lpni_hashlist);
404
405         /*
406          * indicate the peer is being deleted so the monitor thread can
407          * remove it from the recovery queue.
408          */
409         spin_lock(&lpni->lpni_lock);
410         lpni->lpni_state |= LNET_PEER_NI_DELETING;
411         spin_unlock(&lpni->lpni_lock);
412
413         /* decrement the ref count on the peer table */
414         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
415
416         /*
417          * The peer_ni can no longer be found with a lookup. But there
418          * can be current users, so keep track of it on the zombie
419          * list until the reference count has gone to zero.
420          *
421          * The last reference may be lost in a place where the
422          * lnet_net_lock locks only a single cpt, and that cpt may not
423          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
424          * has its own lock.
425          */
426         spin_lock(&ptable->pt_zombie_lock);
427         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
428         ptable->pt_zombies++;
429         spin_unlock(&ptable->pt_zombie_lock);
430
431         /* no need to keep this peer_ni on the hierarchy anymore */
432         lnet_peer_detach_peer_ni_locked(lpni);
433
434         /* remove hashlist reference on peer_ni */
435         lnet_peer_ni_decref_locked(lpni);
436
437         return 0;
438 }
439
440 void lnet_peer_uninit(void)
441 {
442         struct lnet_peer_ni *lpni, *tmp;
443
444         lnet_net_lock(LNET_LOCK_EX);
445
446         /* remove all peer_nis from the remote peer and the hash list */
447         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
448                                  lpni_on_remote_peer_ni_list)
449                 lnet_peer_ni_del_locked(lpni, false);
450
451         lnet_peer_tables_destroy();
452
453         lnet_net_unlock(LNET_LOCK_EX);
454 }
455
456 static int
457 lnet_peer_del_locked(struct lnet_peer *peer)
458 {
459         struct lnet_peer_ni *lpni = NULL, *lpni2;
460         int rc = 0, rc2 = 0;
461
462         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
463
464         spin_lock(&peer->lp_lock);
465         peer->lp_state |= LNET_PEER_MARK_DELETED;
466         spin_unlock(&peer->lp_lock);
467
468         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469         while (lpni != NULL) {
470                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
471                 rc = lnet_peer_ni_del_locked(lpni, false);
472                 if (rc != 0)
473                         rc2 = rc;
474                 lpni = lpni2;
475         }
476
477         return rc2;
478 }
479
480 /*
481  * Discovering this peer is taking too long. Cancel any Ping or Push
482  * that discovery is waiting on by unlinking the relevant MDs. The
483  * lnet_discovery_event_handler() will proceed from here and complete
484  * the cleanup.
485  */
486 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
487 {
488         struct lnet_handle_md ping_mdh;
489         struct lnet_handle_md push_mdh;
490
491         LNetInvalidateMDHandle(&ping_mdh);
492         LNetInvalidateMDHandle(&push_mdh);
493
494         spin_lock(&lp->lp_lock);
495         if (lp->lp_state & LNET_PEER_PING_SENT) {
496                 ping_mdh = lp->lp_ping_mdh;
497                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
498         }
499         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
500                 push_mdh = lp->lp_push_mdh;
501                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
502         }
503         spin_unlock(&lp->lp_lock);
504
505         if (!LNetMDHandleIsInvalid(ping_mdh))
506                 LNetMDUnlink(ping_mdh);
507         if (!LNetMDHandleIsInvalid(push_mdh))
508                 LNetMDUnlink(push_mdh);
509 }
510
511 static int
512 lnet_peer_del(struct lnet_peer *peer)
513 {
514         int rc;
515
516         lnet_peer_cancel_discovery(peer);
517         lnet_net_lock(LNET_LOCK_EX);
518         rc = lnet_peer_del_locked(peer);
519         lnet_net_unlock(LNET_LOCK_EX);
520
521         return rc;
522 }
523
524 /*
525  * Delete a NID from a peer. Call with ln_api_mutex held.
526  *
527  * Error codes:
528  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
529  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
530  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
531  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
532  */
533 static int
534 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
535                   unsigned int flags)
536 {
537         struct lnet_peer_ni *lpni;
538         struct lnet_nid primary_nid = lp->lp_primary_nid;
539         int rc = 0;
540         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
541
542         if (!(flags & LNET_PEER_CONFIGURED)) {
543                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
544                         rc = -EPERM;
545                         goto out;
546                 }
547         }
548
549         /* If we're asked to lock down the primary NID we shouldn't be
550          * deleting it
551          */
552         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
553             nid_same(&primary_nid, nid)) {
554                 rc = -EPERM;
555                 goto out;
556         }
557
558         lpni = lnet_peer_ni_find_locked(nid);
559         if (!lpni) {
560                 rc = -ENOENT;
561                 goto out;
562         }
563         lnet_peer_ni_decref_locked(lpni);
564         if (lp != lpni->lpni_peer_net->lpn_peer) {
565                 rc = -ECHILD;
566                 goto out;
567         }
568
569         /*
570          * This function only allows deletion of the primary NID if it
571          * is the only NID.
572          */
573         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
574                 rc = -EBUSY;
575                 goto out;
576         }
577
578         lnet_net_lock(LNET_LOCK_EX);
579
580         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
581                 struct lnet_peer_ni *lpni2;
582                 /* assign the next peer_ni to be the primary */
583                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
584                 LASSERT(lpni2);
585                 lp->lp_primary_nid = lpni2->lpni_nid;
586         }
587         rc = lnet_peer_ni_del_locked(lpni, force);
588
589         lnet_net_unlock(LNET_LOCK_EX);
590
591 out:
592         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
593                libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
594                flags, rc);
595
596         return rc;
597 }
598
599 static void
600 lnet_peer_table_cleanup_locked(struct lnet_net *net,
601                                struct lnet_peer_table *ptable)
602 {
603         int                      i;
604         struct lnet_peer_ni     *next;
605         struct lnet_peer_ni     *lpni;
606         struct lnet_peer        *peer;
607
608         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
609                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
610                                          lpni_hashlist) {
611                         if (net != NULL && net != lpni->lpni_net)
612                                 continue;
613
614                         peer = lpni->lpni_peer_net->lpn_peer;
615                         if (!nid_same(&peer->lp_primary_nid,
616                                        &lpni->lpni_nid)) {
617                                 lnet_peer_ni_del_locked(lpni, false);
618                                 continue;
619                         }
620                         /*
621                          * Removing the primary NID implies removing
622                          * the entire peer. Advance next beyond any
623                          * peer_ni that belongs to the same peer.
624                          */
625                         list_for_each_entry_from(next, &ptable->pt_hash[i],
626                                                  lpni_hashlist) {
627                                 if (next->lpni_peer_net->lpn_peer != peer)
628                                         break;
629                         }
630                         lnet_peer_del_locked(peer);
631                 }
632         }
633 }
634
635 static void
636 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
637 {
638         wait_var_event_warning(&ptable->pt_zombies,
639                                ptable->pt_zombies == 0,
640                                "Waiting for %d zombies on peer table\n",
641                                ptable->pt_zombies);
642 }
643
644 static void
645 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
646                                 struct lnet_peer_table *ptable)
647 {
648         struct lnet_peer_ni     *lp;
649         struct lnet_peer_ni     *tmp;
650         struct lnet_nid         gw_nid;
651         int                     i;
652
653         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
654                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
655                                          lpni_hashlist) {
656                         if (net != lp->lpni_net)
657                                 continue;
658
659                         if (!lnet_isrouter(lp))
660                                 continue;
661
662                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
663
664                         lnet_net_unlock(LNET_LOCK_EX);
665                         lnet_del_route(LNET_NET_ANY, &gw_nid);
666                         lnet_net_lock(LNET_LOCK_EX);
667                 }
668         }
669 }
670
671 void
672 lnet_peer_tables_cleanup(struct lnet_net *net)
673 {
674         int i;
675         struct lnet_peer_table *ptable;
676
677         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
678         /* If just deleting the peers for a NI, get rid of any routes these
679          * peers are gateways for. */
680         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
681                 lnet_net_lock(LNET_LOCK_EX);
682                 lnet_peer_table_del_rtrs_locked(net, ptable);
683                 lnet_net_unlock(LNET_LOCK_EX);
684         }
685
686         /* Start the cleanup process */
687         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
688                 lnet_net_lock(LNET_LOCK_EX);
689                 lnet_peer_table_cleanup_locked(net, ptable);
690                 lnet_net_unlock(LNET_LOCK_EX);
691         }
692
693         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
694                 lnet_peer_ni_finalize_wait(ptable);
695 }
696
697 static struct lnet_peer_ni *
698 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
699 {
700         struct list_head        *peers;
701         struct lnet_peer_ni     *lp;
702
703         if (the_lnet.ln_state != LNET_STATE_RUNNING)
704                 return NULL;
705
706         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
707         list_for_each_entry(lp, peers, lpni_hashlist) {
708                 if (nid_same(&lp->lpni_nid, nid)) {
709                         lnet_peer_ni_addref_locked(lp);
710                         return lp;
711                 }
712         }
713
714         return NULL;
715 }
716
717 struct lnet_peer_ni *
718 lnet_peer_ni_find_locked(struct lnet_nid *nid)
719 {
720         struct lnet_peer_ni *lpni;
721         struct lnet_peer_table *ptable;
722         int cpt;
723
724         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
725
726         ptable = the_lnet.ln_peer_tables[cpt];
727         lpni = lnet_get_peer_ni_locked(ptable, nid);
728
729         return lpni;
730 }
731
732 struct lnet_peer_ni *
733 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
734 {
735         struct lnet_peer_net *lpn;
736         struct lnet_peer_ni *lpni;
737
738         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
739         if (!lpn)
740                 return NULL;
741
742         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
743                 if (nid_same(&lpni->lpni_nid, nid))
744                         return lpni;
745         }
746
747         return NULL;
748 }
749
750 struct lnet_peer *
751 lnet_find_peer(struct lnet_nid *nid)
752 {
753         struct lnet_peer_ni *lpni;
754         struct lnet_peer *lp = NULL;
755         int cpt;
756
757         cpt = lnet_net_lock_current();
758         lpni = lnet_peer_ni_find_locked(nid);
759         if (lpni) {
760                 lp = lpni->lpni_peer_net->lpn_peer;
761                 lnet_peer_addref_locked(lp);
762                 lnet_peer_ni_decref_locked(lpni);
763         }
764         lnet_net_unlock(cpt);
765
766         return lp;
767 }
768
769 struct lnet_peer_net *
770 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
771 {
772         struct lnet_peer_net *net;
773
774         if (!prev_lpn_id) {
775                 /* no net id provided return the first net */
776                 net = list_first_entry_or_null(&lp->lp_peer_nets,
777                                                struct lnet_peer_net,
778                                                lpn_peer_nets);
779
780                 return net;
781         }
782
783         /* find the net after the one provided */
784         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
785                 if (net->lpn_net_id == prev_lpn_id) {
786                         /*
787                          * if we reached the end of the list loop to the
788                          * beginning.
789                          */
790                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
791                                 return list_first_entry_or_null(&lp->lp_peer_nets,
792                                                                 struct lnet_peer_net,
793                                                                 lpn_peer_nets);
794                         else
795                                 return list_next_entry(net, lpn_peer_nets);
796                 }
797         }
798
799         return NULL;
800 }
801
802 struct lnet_peer_ni *
803 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
804                              struct lnet_peer_net *peer_net,
805                              struct lnet_peer_ni *prev)
806 {
807         struct lnet_peer_ni *lpni;
808         struct lnet_peer_net *net = peer_net;
809
810         if (!prev) {
811                 if (!net) {
812                         if (list_empty(&peer->lp_peer_nets))
813                                 return NULL;
814
815                         net = list_first_entry(&peer->lp_peer_nets,
816                                                struct lnet_peer_net,
817                                                lpn_peer_nets);
818                 }
819                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
820                                         lpni_peer_nis);
821
822                 return lpni;
823         }
824
825         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
826                 /*
827                  * if you reached the end of the peer ni list and the peer
828                  * net is specified then there are no more peer nis in that
829                  * net.
830                  */
831                 if (net)
832                         return NULL;
833
834                 /*
835                  * we reached the end of this net ni list. move to the
836                  * next net
837                  */
838                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
839                     &peer->lp_peer_nets)
840                         /* no more nets and no more NIs. */
841                         return NULL;
842
843                 /* get the next net */
844                 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
845                                        struct lnet_peer_net,
846                                        lpn_peer_nets);
847                 /* get the ni on it */
848                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
849                                         lpni_peer_nis);
850
851                 return lpni;
852         }
853
854         /* there are more nis left */
855         lpni = list_first_entry(&prev->lpni_peer_nis,
856                                 struct lnet_peer_ni, lpni_peer_nis);
857
858         return lpni;
859 }
860
861 /* Call with the ln_api_mutex held */
862 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
863 {
864         struct lnet_process_id id;
865         struct lnet_peer_table *ptable;
866         struct lnet_peer *lp;
867         __u32 count = 0;
868         __u32 size = 0;
869         int lncpt;
870         int cpt;
871         __u32 i;
872         int rc;
873
874         rc = -ESHUTDOWN;
875         if (the_lnet.ln_state != LNET_STATE_RUNNING)
876                 goto done;
877
878         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
879
880         /*
881          * Count the number of peers, and return E2BIG if the buffer
882          * is too small. We'll also return the desired size.
883          */
884         rc = -E2BIG;
885         for (cpt = 0; cpt < lncpt; cpt++) {
886                 ptable = the_lnet.ln_peer_tables[cpt];
887                 count += ptable->pt_peers;
888         }
889         size = count * sizeof(*ids);
890         if (size > *sizep)
891                 goto done;
892
893         /*
894          * Walk the peer lists and copy out the primary nids.
895          * This is safe because the peer lists are only modified
896          * while the ln_api_mutex is held. So we don't need to
897          * hold the lnet_net_lock as well, and can therefore
898          * directly call copy_to_user().
899          */
900         rc = -EFAULT;
901         memset(&id, 0, sizeof(id));
902         id.pid = LNET_PID_LUSTRE;
903         i = 0;
904         for (cpt = 0; cpt < lncpt; cpt++) {
905                 ptable = the_lnet.ln_peer_tables[cpt];
906                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
907                         if (!nid_is_nid4(&lp->lp_primary_nid))
908                                 continue;
909                         if (i >= count)
910                                 goto done;
911                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
912                         if (copy_to_user(&ids[i], &id, sizeof(id)))
913                                 goto done;
914                         i++;
915                 }
916         }
917         rc = 0;
918 done:
919         *countp = count;
920         *sizep = size;
921         return rc;
922 }
923
924 /*
925  * Start pushes to peers that need to be updated for a configuration
926  * change on this node.
927  */
928 void
929 lnet_push_update_to_peers(int force)
930 {
931         struct lnet_peer_table *ptable;
932         struct lnet_peer *lp;
933         int lncpt;
934         int cpt;
935
936         lnet_net_lock(LNET_LOCK_EX);
937         if (lnet_peer_discovery_disabled)
938                 force = 0;
939         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
940         for (cpt = 0; cpt < lncpt; cpt++) {
941                 ptable = the_lnet.ln_peer_tables[cpt];
942                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
943                         if (force) {
944                                 spin_lock(&lp->lp_lock);
945                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
946                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
947                                 spin_unlock(&lp->lp_lock);
948                         }
949                         if (lnet_peer_needs_push(lp))
950                                 lnet_peer_queue_for_discovery(lp);
951                 }
952         }
953         lnet_net_unlock(LNET_LOCK_EX);
954         wake_up(&the_lnet.ln_dc_waitq);
955 }
956
957 /* find the NID in the preferred gateways for the remote peer
958  * return:
959  *      false: list is not empty and NID is not preferred
960  *      false: list is empty
961  *      true: nid is found in the list
962  */
963 bool
964 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
965                              struct lnet_nid *gw_nid)
966 {
967         struct lnet_nid_list *ne;
968
969         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
970                libcfs_nidstr(&lpni->lpni_nid),
971                list_empty(&lpni->lpni_rtr_pref_nids));
972
973         if (list_empty(&lpni->lpni_rtr_pref_nids))
974                 return false;
975
976         /* iterate through all the preferred NIDs and see if any of them
977          * matches the provided gw_nid
978          */
979         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
980                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
981                        libcfs_nidstr(&ne->nl_nid),
982                        libcfs_nidstr(gw_nid));
983                 if (nid_same(&ne->nl_nid, gw_nid))
984                         return true;
985         }
986
987         return false;
988 }
989
990 void
991 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
992 {
993         struct list_head zombies;
994         struct lnet_nid_list *ne;
995         struct lnet_nid_list *tmp;
996         int cpt = lpni->lpni_cpt;
997
998         INIT_LIST_HEAD(&zombies);
999
1000         lnet_net_lock(cpt);
1001         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1002         lnet_net_unlock(cpt);
1003
1004         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1005                 list_del(&ne->nl_list);
1006                 LIBCFS_FREE(ne, sizeof(*ne));
1007         }
1008 }
1009
1010 int
1011 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1012                        struct lnet_nid *gw_nid)
1013 {
1014         int cpt = lpni->lpni_cpt;
1015         struct lnet_nid_list *ne = NULL;
1016
1017         /* This function is called with api_mutex held. When the api_mutex
1018          * is held the list can not be modified, as it is only modified as
1019          * a result of applying a UDSP and that happens under api_mutex
1020          * lock.
1021          */
1022         __must_hold(&the_lnet.ln_api_mutex);
1023
1024         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1025                 if (nid_same(&ne->nl_nid, gw_nid))
1026                         return -EEXIST;
1027         }
1028
1029         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1030         if (!ne)
1031                 return -ENOMEM;
1032
1033         ne->nl_nid = *gw_nid;
1034
1035         /* Lock the cpt to protect against addition and checks in the
1036          * selection algorithm
1037          */
1038         lnet_net_lock(cpt);
1039         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1040         lnet_net_unlock(cpt);
1041
1042         return 0;
1043 }
1044
1045 /*
1046  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1047  * this is a preferred point-to-point path. Call with lnet_net_lock in
1048  * shared mmode.
1049  */
1050 bool
1051 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1052 {
1053         struct lnet_nid_list *ne;
1054
1055         if (lpni->lpni_pref_nnids == 0)
1056                 return false;
1057         if (lpni->lpni_pref_nnids == 1)
1058                 return nid_same(&lpni->lpni_pref.nid, nid);
1059         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1060                 if (nid_same(&ne->nl_nid, nid))
1061                         return true;
1062         }
1063         return false;
1064 }
1065
1066 /*
1067  * Set a single ni as preferred, provided no preferred ni is already
1068  * defined. Only to be used for non-multi-rail peer_ni.
1069  */
1070 int
1071 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1072                                   struct lnet_nid *nid)
1073 {
1074         int rc = 0;
1075
1076         if (!nid)
1077                 return -EINVAL;
1078         spin_lock(&lpni->lpni_lock);
1079         if (LNET_NID_IS_ANY(nid)) {
1080                 rc = -EINVAL;
1081         } else if (lpni->lpni_pref_nnids > 0) {
1082                 rc = -EPERM;
1083         } else if (lpni->lpni_pref_nnids == 0) {
1084                 lpni->lpni_pref.nid = *nid;
1085                 lpni->lpni_pref_nnids = 1;
1086                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1087         }
1088         spin_unlock(&lpni->lpni_lock);
1089
1090         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1091                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1092         return rc;
1093 }
1094
1095 /*
1096  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1097  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1098  */
1099 int
1100 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1101 {
1102         int rc = 0;
1103
1104         spin_lock(&lpni->lpni_lock);
1105         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1106                 lpni->lpni_pref_nnids = 0;
1107                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1108         } else if (lpni->lpni_pref_nnids == 0) {
1109                 rc = -ENOENT;
1110         } else {
1111                 rc = -EPERM;
1112         }
1113         spin_unlock(&lpni->lpni_lock);
1114
1115         CDEBUG(D_NET, "peer %s: %d\n",
1116                libcfs_nidstr(&lpni->lpni_nid), rc);
1117         return rc;
1118 }
1119
1120 void
1121 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1122 {
1123         lpni->lpni_sel_priority = priority;
1124 }
1125
1126 /*
1127  * Clear the preferred NIDs from a non-multi-rail peer.
1128  */
1129 void
1130 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1131 {
1132         struct lnet_peer_ni *lpni = NULL;
1133
1134         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1135                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1136 }
1137
1138 int
1139 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1140 {
1141         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1142         struct lnet_nid_list *ne1 = NULL;
1143         struct lnet_nid_list *ne2 = NULL;
1144         struct lnet_nid *tmp_nid = NULL;
1145         int rc = 0;
1146
1147         if (LNET_NID_IS_ANY(nid)) {
1148                 rc = -EINVAL;
1149                 goto out;
1150         }
1151
1152         if (lpni->lpni_pref_nnids == 1 &&
1153             nid_same(&lpni->lpni_pref.nid, nid)) {
1154                 rc = -EEXIST;
1155                 goto out;
1156         }
1157
1158         /* A non-MR node may have only one preferred NI per peer_ni */
1159         if (lpni->lpni_pref_nnids > 0 &&
1160             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1161                 rc = -EPERM;
1162                 goto out;
1163         }
1164
1165         /* add the new preferred nid to the list of preferred nids */
1166         if (lpni->lpni_pref_nnids != 0) {
1167                 size_t alloc_size = sizeof(*ne1);
1168
1169                 if (lpni->lpni_pref_nnids == 1) {
1170                         tmp_nid = &lpni->lpni_pref.nid;
1171                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1172                 }
1173
1174                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1175                         if (nid_same(&ne1->nl_nid, nid)) {
1176                                 rc = -EEXIST;
1177                                 goto out;
1178                         }
1179                 }
1180
1181                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1182                                  alloc_size);
1183                 if (!ne1) {
1184                         rc = -ENOMEM;
1185                         goto out;
1186                 }
1187
1188                 /* move the originally stored nid to the list */
1189                 if (lpni->lpni_pref_nnids == 1) {
1190                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1191                                 lpni->lpni_cpt, alloc_size);
1192                         if (!ne2) {
1193                                 rc = -ENOMEM;
1194                                 goto out;
1195                         }
1196                         INIT_LIST_HEAD(&ne2->nl_list);
1197                         ne2->nl_nid = *tmp_nid;
1198                 }
1199                 ne1->nl_nid = *nid;
1200         }
1201
1202         lnet_net_lock(LNET_LOCK_EX);
1203         spin_lock(&lpni->lpni_lock);
1204         if (lpni->lpni_pref_nnids == 0) {
1205                 lpni->lpni_pref.nid = *nid;
1206         } else {
1207                 if (ne2)
1208                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1209                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1210         }
1211         lpni->lpni_pref_nnids++;
1212         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1213         spin_unlock(&lpni->lpni_lock);
1214         lnet_net_unlock(LNET_LOCK_EX);
1215
1216 out:
1217         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1218                 spin_lock(&lpni->lpni_lock);
1219                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1220                 spin_unlock(&lpni->lpni_lock);
1221         }
1222         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1223                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1224         return rc;
1225 }
1226
1227 int
1228 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1229 {
1230         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1231         struct lnet_nid_list *ne = NULL;
1232         int rc = 0;
1233
1234         if (lpni->lpni_pref_nnids == 0) {
1235                 rc = -ENOENT;
1236                 goto out;
1237         }
1238
1239         if (lpni->lpni_pref_nnids == 1) {
1240                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1241                         rc = -ENOENT;
1242                         goto out;
1243                 }
1244         } else {
1245                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1246                         if (nid_same(&ne->nl_nid, nid))
1247                                 goto remove_nid_entry;
1248                 }
1249                 rc = -ENOENT;
1250                 ne = NULL;
1251                 goto out;
1252         }
1253
1254 remove_nid_entry:
1255         lnet_net_lock(LNET_LOCK_EX);
1256         spin_lock(&lpni->lpni_lock);
1257         if (lpni->lpni_pref_nnids == 1)
1258                 lpni->lpni_pref.nid = LNET_ANY_NID;
1259         else {
1260                 list_del_init(&ne->nl_list);
1261                 if (lpni->lpni_pref_nnids == 2) {
1262                         struct lnet_nid_list *ne, *tmp;
1263
1264                         list_for_each_entry_safe(ne, tmp,
1265                                                  &lpni->lpni_pref.nids,
1266                                                  nl_list) {
1267                                 lpni->lpni_pref.nid = ne->nl_nid;
1268                                 list_del_init(&ne->nl_list);
1269                                 LIBCFS_FREE(ne, sizeof(*ne));
1270                         }
1271                 }
1272         }
1273         lpni->lpni_pref_nnids--;
1274         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1275         spin_unlock(&lpni->lpni_lock);
1276         lnet_net_unlock(LNET_LOCK_EX);
1277
1278         if (ne)
1279                 LIBCFS_FREE(ne, sizeof(*ne));
1280 out:
1281         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1282                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1283         return rc;
1284 }
1285
1286 void
1287 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1288 {
1289         struct list_head zombies;
1290         struct lnet_nid_list *ne;
1291         struct lnet_nid_list *tmp;
1292
1293         INIT_LIST_HEAD(&zombies);
1294
1295         lnet_net_lock(LNET_LOCK_EX);
1296         if (lpni->lpni_pref_nnids == 1)
1297                 lpni->lpni_pref.nid = LNET_ANY_NID;
1298         else if (lpni->lpni_pref_nnids > 1)
1299                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1300         lpni->lpni_pref_nnids = 0;
1301         lnet_net_unlock(LNET_LOCK_EX);
1302
1303         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1304                 list_del_init(&ne->nl_list);
1305                 LIBCFS_FREE(ne, sizeof(*ne));
1306         }
1307 }
1308
1309 void
1310 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1311 {
1312         struct lnet_peer_ni *lpni;
1313
1314         *result = *nid;
1315         lpni = lnet_peer_ni_find_locked(nid);
1316         if (lpni) {
1317                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1318                 lnet_peer_ni_decref_locked(lpni);
1319         }
1320 }
1321
1322 bool
1323 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1324 __must_hold(&lp->lp_lock)
1325 {
1326         if (lnet_peer_discovery_disabled)
1327                 return true;
1328
1329         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1330             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1331                 return true;
1332         }
1333
1334         return false;
1335 }
1336
1337 /*
1338  * Peer Discovery
1339  */
1340 bool
1341 lnet_is_discovery_disabled(struct lnet_peer *lp)
1342 {
1343         bool rc = false;
1344
1345         spin_lock(&lp->lp_lock);
1346         rc = lnet_is_discovery_disabled_locked(lp);
1347         spin_unlock(&lp->lp_lock);
1348
1349         return rc;
1350 }
1351
1352 int
1353 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1354 {
1355         struct lnet_nid pnid = LNET_ANY_NID;
1356         bool mr;
1357         int i, rc;
1358         int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1359
1360         if (!nids || num_nids < 1)
1361                 return -EINVAL;
1362
1363         rc = LNetNIInit(LNET_PID_ANY);
1364         if (rc < 0)
1365                 return rc;
1366
1367         mutex_lock(&the_lnet.ln_api_mutex);
1368
1369         mr = lnet_peer_discovery_disabled == 0;
1370
1371         rc = 0;
1372         for (i = 0; i < num_nids; i++) {
1373                 if (nid_is_lo0(&nids[i]))
1374                         continue;
1375
1376                 if (LNET_NID_IS_ANY(&pnid)) {
1377                         pnid = nids[i];
1378                         rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1379                         if (rc == -EALREADY) {
1380                                 struct lnet_peer *lp;
1381
1382                                 CDEBUG(D_NET, "A peer exists for NID %s\n",
1383                                        libcfs_nidstr(&pnid));
1384                                 rc = 0;
1385                                 /* Adds a refcount */
1386                                 lp = lnet_find_peer(&pnid);
1387                                 LASSERT(lp);
1388                                 pnid = lp->lp_primary_nid;
1389                                 /* Drop refcount from lookup */
1390                                 lnet_peer_decref_locked(lp);
1391                         }
1392                 } else if (lnet_peer_discovery_disabled) {
1393                         rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1394                                               flags);
1395                 } else {
1396                         rc = lnet_add_peer_ni(&pnid, &nids[i], mr,
1397                                               flags);
1398                 }
1399
1400                 if (rc && rc != -EEXIST)
1401                         goto unlock;
1402         }
1403
1404 unlock:
1405         mutex_unlock(&the_lnet.ln_api_mutex);
1406
1407         LNetNIFini();
1408
1409         return rc == -EEXIST ? 0 : rc;
1410 }
1411 EXPORT_SYMBOL(LNetAddPeer);
1412
1413 void LNetPrimaryNID(struct lnet_nid *nid)
1414 {
1415         struct lnet_peer *lp;
1416         struct lnet_peer_ni *lpni;
1417         struct lnet_nid orig;
1418         int rc = 0;
1419         int cpt;
1420
1421         if (!nid || nid_is_lo0(nid))
1422                 return;
1423         orig = *nid;
1424
1425         cpt = lnet_net_lock_current();
1426         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1427         if (IS_ERR(lpni)) {
1428                 rc = PTR_ERR(lpni);
1429                 goto out_unlock;
1430         }
1431         lp = lpni->lpni_peer_net->lpn_peer;
1432
1433         /* If discovery is disabled locally then we needn't bother running
1434          * discovery here because discovery will not modify whatever
1435          * primary NID is currently set for this peer. If the specified peer is
1436          * down then this discovery can introduce long delays into the mount
1437          * process, so skip it if it isn't necessary.
1438          */
1439 again:
1440         spin_lock(&lp->lp_lock);
1441         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY) && lock_prim_nid) {
1442                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1443                 lp->lp_prim_lock_ts = ktime_get_ns();
1444         }
1445
1446         /* DD disabled, nothing to do */
1447         if (lnet_peer_discovery_disabled) {
1448                 *nid = lp->lp_primary_nid;
1449                 spin_unlock(&lp->lp_lock);
1450                 goto out_decref;
1451         }
1452
1453         /* Peer already up to date, nothing to do */
1454         if (lnet_peer_is_uptodate_locked(lp)) {
1455                 *nid = lp->lp_primary_nid;
1456                 spin_unlock(&lp->lp_lock);
1457                 goto out_decref;
1458         }
1459         spin_unlock(&lp->lp_lock);
1460
1461         /* If primary nid locking is enabled, discovery is performed
1462          * in the background.
1463          * If primary nid locking is disabled, discovery blocks here.
1464          * Messages to the peer will not go through until the discovery is
1465          * complete.
1466          */
1467         if (lock_prim_nid)
1468                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1469         else
1470                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1471         if (rc)
1472                 goto out_decref;
1473
1474         /* The lpni (or lp) for this NID may have changed and our ref is
1475          * the only thing keeping the old one around. Release the ref
1476          * and lookup the lpni again
1477          */
1478         lnet_peer_ni_decref_locked(lpni);
1479         lpni = lnet_peer_ni_find_locked(nid);
1480         if (!lpni) {
1481                 rc = -ENOENT;
1482                 goto out_unlock;
1483         }
1484         lp = lpni->lpni_peer_net->lpn_peer;
1485
1486         if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1487                 goto again;
1488         *nid = lp->lp_primary_nid;
1489 out_decref:
1490         lnet_peer_ni_decref_locked(lpni);
1491 out_unlock:
1492         lnet_net_unlock(cpt);
1493
1494         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1495                libcfs_nidstr(nid), rc);
1496 }
1497 EXPORT_SYMBOL(LNetPrimaryNID);
1498
1499 struct lnet_peer_net *
1500 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1501 {
1502         struct lnet_peer_net *peer_net;
1503         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1504                 if (peer_net->lpn_net_id == net_id)
1505                         return peer_net;
1506         }
1507         return NULL;
1508 }
1509
1510 /*
1511  * Attach a peer_ni to a peer_net and peer. This function assumes
1512  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1513  * may be attached to a different peer, in which case it will be
1514  * properly detached first. The whole operation is done atomically.
1515  *
1516  * This function consumes the reference on lpni and Always returns 0.
1517  * This is the last function called from functions that do return an
1518  * int, so returning 0 here allows the compiler to do a tail call.
1519  */
1520 static int
1521 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1522                          struct lnet_peer_net *lpn,
1523                          struct lnet_peer_ni *lpni,
1524                          unsigned flags)
1525 {
1526         struct lnet_peer_table *ptable;
1527         bool new_lpn = false;
1528         int rc;
1529
1530         /* Install the new peer_ni */
1531         lnet_net_lock(LNET_LOCK_EX);
1532         /* Add peer_ni to global peer table hash, if necessary. */
1533         if (list_empty(&lpni->lpni_hashlist)) {
1534                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1535
1536                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1537                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1538                 ptable->pt_version++;
1539                 lnet_peer_ni_addref_locked(lpni);
1540         }
1541
1542         /* Detach the peer_ni from an existing peer, if necessary. */
1543         if (lpni->lpni_peer_net) {
1544                 LASSERT(lpni->lpni_peer_net != lpn);
1545                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1546                 lnet_peer_detach_peer_ni_locked(lpni);
1547                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1548                 lpni->lpni_peer_net = NULL;
1549         }
1550
1551         /* Add peer_ni to peer_net */
1552         lpni->lpni_peer_net = lpn;
1553         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1554                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1555         else
1556                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1557         lnet_update_peer_net_healthv(lpni);
1558         lnet_peer_net_addref_locked(lpn);
1559
1560         /* Add peer_net to peer */
1561         if (!lpn->lpn_peer) {
1562                 new_lpn = true;
1563                 lpn->lpn_peer = lp;
1564                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1565                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1566                 else
1567                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1568                 lnet_peer_addref_locked(lp);
1569         }
1570
1571         /* Add peer to global peer list, if necessary */
1572         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1573         if (list_empty(&lp->lp_peer_list)) {
1574                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1575                 ptable->pt_peers++;
1576         }
1577
1578         /* Update peer state */
1579         spin_lock(&lp->lp_lock);
1580         if (flags & LNET_PEER_CONFIGURED) {
1581                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1582                         lp->lp_state |= LNET_PEER_CONFIGURED;
1583         }
1584         if (flags & LNET_PEER_MULTI_RAIL) {
1585                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1586                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1587                         lnet_peer_clr_non_mr_pref_nids(lp);
1588                 }
1589         }
1590         if (flags & LNET_PEER_LOCK_PRIMARY) {
1591                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1592                 lp->lp_prim_lock_ts = ktime_get_ns();
1593         }
1594         spin_unlock(&lp->lp_lock);
1595
1596         lp->lp_nnis++;
1597
1598         /* apply UDSPs */
1599         if (new_lpn) {
1600                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1601                 if (rc)
1602                         CERROR("Failed to apply UDSPs on lpn %s\n",
1603                                libcfs_net2str(lpn->lpn_net_id));
1604         }
1605         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1606         if (rc)
1607                 CERROR("Failed to apply UDSPs on lpni %s\n",
1608                        libcfs_nidstr(&lpni->lpni_nid));
1609
1610         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1611                libcfs_nidstr(&lp->lp_primary_nid),
1612                libcfs_nidstr(&lpni->lpni_nid), flags);
1613         lnet_peer_ni_decref_locked(lpni);
1614         lnet_net_unlock(LNET_LOCK_EX);
1615
1616         return 0;
1617 }
1618
1619 /*
1620  * Create a new peer, with nid as its primary nid.
1621  *
1622  * Call with the lnet_api_mutex held.
1623  */
1624 static int
1625 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1626 {
1627         struct lnet_peer *lp;
1628         struct lnet_peer_net *lpn;
1629         struct lnet_peer_ni *lpni;
1630         int rc = 0;
1631
1632         LASSERT(nid);
1633
1634         /*
1635          * No need for the lnet_net_lock here, because the
1636          * lnet_api_mutex is held.
1637          */
1638         lpni = lnet_peer_ni_find_locked(nid);
1639         if (lpni) {
1640                 /* A peer with this NID already exists. */
1641                 lp = lpni->lpni_peer_net->lpn_peer;
1642                 lnet_peer_ni_decref_locked(lpni);
1643                 /*
1644                  * This is an error if the peer was configured and the
1645                  * primary NID differs or an attempt is made to change
1646                  * the Multi-Rail flag. Otherwise the assumption is
1647                  * that an existing peer is being modified.
1648                  */
1649                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1650                         if (!nid_same(&lp->lp_primary_nid, nid))
1651                                 rc = -EEXIST;
1652                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1653                                 rc = -EPERM;
1654                         goto out;
1655                 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1656                         if (nid_same(&lp->lp_primary_nid, nid))
1657                                 rc = -EEXIST;
1658                         /* we're trying to recreate an existing peer which
1659                          * has already been created and its primary
1660                          * locked. This is likely due to two servers
1661                          * existing on the same node. So we'll just refer
1662                          * to that node with the primary NID which was
1663                          * first added by Lustre
1664                          */
1665                         else
1666                                 rc = -EALREADY;
1667                         goto out;
1668                 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1669                         /* if not recreating peer as configured and
1670                          * not locking primary nid, no need to
1671                          * do anything if primary nid is not being changed
1672                          */
1673                         if (nid_same(&lp->lp_primary_nid, nid)) {
1674                                 rc = -EEXIST;
1675                                 goto out;
1676                         }
1677                 }
1678                 /* Delete and recreate the peer.
1679                  * We can get here:
1680                  * 1. If the peer is being recreated as a configured NID
1681                  * 2. if there already exists a peer which
1682                  *    was discovered manually, but is recreated via Lustre
1683                  *    with PRIMARY_lock
1684                  */
1685                 rc = lnet_peer_del(lp);
1686                 if (rc)
1687                         goto out;
1688         }
1689
1690         /* Create peer, peer_net, and peer_ni. */
1691         rc = -ENOMEM;
1692         lp = lnet_peer_alloc(nid);
1693         if (!lp)
1694                 goto out;
1695         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1696         if (!lpn)
1697                 goto out_free_lp;
1698         lpni = lnet_peer_ni_alloc(nid);
1699         if (!lpni)
1700                 goto out_free_lpn;
1701
1702         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1703
1704 out_free_lpn:
1705         LIBCFS_FREE(lpn, sizeof(*lpn));
1706 out_free_lp:
1707         LIBCFS_FREE(lp, sizeof(*lp));
1708 out:
1709         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1710                libcfs_nidstr(nid), flags, rc);
1711         return rc;
1712 }
1713
1714 /*
1715  * Add a NID to a peer. Call with ln_api_mutex held.
1716  *
1717  * Error codes:
1718  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1719  *  -EEXIST:   The NID was configured by DLC for a different peer.
1720  *  -ENOMEM:   Out of memory.
1721  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1722  *             non-multi-rail peer.
1723  */
1724 static int
1725 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1726                   unsigned int flags)
1727 {
1728         struct lnet_peer_net *lpn;
1729         struct lnet_peer_ni *lpni;
1730         int rc = 0;
1731
1732         LASSERT(lp);
1733         LASSERT(nid);
1734
1735         /* A configured peer can only be updated through configuration. */
1736         if (!(flags & LNET_PEER_CONFIGURED)) {
1737                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1738                         rc = -EPERM;
1739                         goto out;
1740                 }
1741         }
1742
1743         /*
1744          * The MULTI_RAIL flag can be set but not cleared, because
1745          * that would leave the peer struct in an invalid state.
1746          */
1747         if (flags & LNET_PEER_MULTI_RAIL) {
1748                 spin_lock(&lp->lp_lock);
1749                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1750                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1751                         lnet_peer_clr_non_mr_pref_nids(lp);
1752                 }
1753                 spin_unlock(&lp->lp_lock);
1754         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1755                 rc = -EPERM;
1756                 goto out;
1757         }
1758
1759         lpni = lnet_peer_ni_find_locked(nid);
1760         if (lpni) {
1761                 /*
1762                  * A peer_ni already exists. This is only a problem if
1763                  * it is not connected to this peer and was configured
1764                  * by DLC.
1765                  */
1766                 if (lpni->lpni_peer_net->lpn_peer == lp)
1767                         goto out_free_lpni;
1768                 if (lnet_peer_ni_is_configured(lpni)) {
1769                         rc = -EEXIST;
1770                         goto out_free_lpni;
1771                 }
1772                 /* If this is the primary NID, destroy the peer. */
1773                 if (lnet_peer_ni_is_primary(lpni)) {
1774                         struct lnet_peer *lp2 =
1775                                 lpni->lpni_peer_net->lpn_peer;
1776                         int rtr_refcount = lp2->lp_rtr_refcount;
1777                         unsigned int peer2_state;
1778                         __u64 peer2_prim_lock_ts;
1779
1780                         /* If there's another peer that this NID belongs to
1781                          * and the primary NID for that peer is locked,
1782                          * then, unless it is the only NID, we don't want
1783                          * to mess with it.
1784                          * But the configuration is wrong at this point,
1785                          * so we should flag both of these peers as in a bad
1786                          * state
1787                          */
1788                         spin_lock(&lp2->lp_lock);
1789                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1790                             lp2->lp_nnis > 1) {
1791                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1792                                 spin_unlock(&lp2->lp_lock);
1793                                 spin_lock(&lp->lp_lock);
1794                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1795                                 spin_unlock(&lp->lp_lock);
1796                                 CERROR("Peer %s NID %s is already locked with peer %s\n",
1797                                         libcfs_nidstr(&lp->lp_primary_nid),
1798                                         libcfs_nidstr(nid),
1799                                         libcfs_nidstr(&lp2->lp_primary_nid));
1800                                 goto out_free_lpni;
1801                         }
1802                         peer2_state = lp2->lp_state;
1803                         peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1804                         spin_unlock(&lp2->lp_lock);
1805
1806                         /* NID which got locked the earliest should be
1807                          * kept as primary. In case if the peers were
1808                          * created by Lustre, this allows the
1809                          * first listed NID to stay primary as intended
1810                          * for the purpose of communicating with Lustre
1811                          * even if peer discovery succeeded using
1812                          * a different NID of MR peer.
1813                          */
1814                         spin_lock(&lp->lp_lock);
1815                         if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1816                             ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1817                             peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1818                              !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1819                                 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1820                                 lp->lp_primary_nid = *nid;
1821                                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1822                         }
1823                         spin_unlock(&lp->lp_lock);
1824                         /*
1825                          * if we're trying to delete a router it means
1826                          * we're moving this peer NI to a new peer so must
1827                          * transfer router properties to the new peer
1828                          */
1829                         if (rtr_refcount > 0) {
1830                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1831                                 lnet_rtr_transfer_to_peer(lp2, lp);
1832                         }
1833                         lnet_peer_del(lp2);
1834                         lnet_peer_ni_decref_locked(lpni);
1835                         lpni = lnet_peer_ni_alloc(nid);
1836                         if (!lpni) {
1837                                 rc = -ENOMEM;
1838                                 goto out_free_lpni;
1839                         }
1840                 }
1841         } else {
1842                 lpni = lnet_peer_ni_alloc(nid);
1843                 if (!lpni) {
1844                         rc = -ENOMEM;
1845                         goto out_free_lpni;
1846                 }
1847         }
1848
1849         /*
1850          * Get the peer_net. Check that we're not adding a second
1851          * peer_ni on a peer_net of a non-multi-rail peer.
1852          */
1853         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1854         if (!lpn) {
1855                 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1856                 if (!lpn) {
1857                         rc = -ENOMEM;
1858                         goto out_free_lpni;
1859                 }
1860         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1861                 rc = -ENOTUNIQ;
1862                 goto out_free_lpni;
1863         }
1864
1865         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1866
1867 out_free_lpni:
1868         lnet_peer_ni_decref_locked(lpni);
1869 out:
1870         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1871                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1872                flags, rc);
1873         return rc;
1874 }
1875
1876 /*
1877  * Update the primary NID of a peer, if possible.
1878  *
1879  * Call with the lnet_api_mutex held.
1880  */
1881 static int
1882 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1883                           unsigned int flags)
1884 {
1885         struct lnet_nid old = lp->lp_primary_nid;
1886         int rc = 0;
1887
1888         if (nid_same(&lp->lp_primary_nid, nid))
1889                 goto out;
1890
1891         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1892                 lp->lp_primary_nid = *nid;
1893
1894         rc = lnet_peer_add_nid(lp, nid, flags);
1895         if (rc) {
1896                 lp->lp_primary_nid = old;
1897                 goto out;
1898         }
1899 out:
1900         /* if this is a configured peer or the primary for that peer has
1901          * been locked, then we don't want to flag this scenario as
1902          * a failure
1903          */
1904         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1905             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1906                 return 0;
1907
1908         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1909                libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1910
1911         return rc;
1912 }
1913
1914 /*
1915  * lpni creation initiated due to traffic either sending or receiving.
1916  * Callers must hold ln_api_mutex
1917  * Ref taken on lnet_peer_ni returned by this function
1918  */
1919 static struct lnet_peer_ni *
1920 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1921 __must_hold(&the_lnet.ln_api_mutex)
1922 {
1923         struct lnet_peer *lp = NULL;
1924         struct lnet_peer_net *lpn = NULL;
1925         struct lnet_peer_ni *lpni;
1926         unsigned flags = 0;
1927         int rc = 0;
1928
1929         if (LNET_NID_IS_ANY(nid)) {
1930                 rc = -EINVAL;
1931                 goto out_err;
1932         }
1933
1934         /* lnet_net_lock is not needed here because ln_api_lock is held */
1935         lpni = lnet_peer_ni_find_locked(nid);
1936         if (lpni) {
1937                 /*
1938                  * We must have raced with another thread. Since we
1939                  * know next to nothing about a peer_ni created by
1940                  * traffic, we just assume everything is ok and
1941                  * return.
1942                  */
1943                 goto out;
1944         }
1945
1946         /* Create peer, peer_net, and peer_ni. */
1947         rc = -ENOMEM;
1948         lp = lnet_peer_alloc(nid);
1949         if (!lp)
1950                 goto out_err;
1951         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1952         if (!lpn)
1953                 goto out_err;
1954         lpni = lnet_peer_ni_alloc(nid);
1955         if (!lpni)
1956                 goto out_err;
1957         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1958
1959         /* lnet_peer_attach_peer_ni() always returns 0 */
1960         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1961
1962         lnet_peer_ni_addref_locked(lpni);
1963
1964 out_err:
1965         if (rc) {
1966                 if (lpn)
1967                         LIBCFS_FREE(lpn, sizeof(*lpn));
1968                 if (lp)
1969                         LIBCFS_FREE(lp, sizeof(*lp));
1970                 lpni = ERR_PTR(rc);
1971         }
1972 out:
1973         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1974         return lpni;
1975 }
1976
1977 /*
1978  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1979  *
1980  * This API handles the following combinations:
1981  *   Create a peer with its primary NI if only the prim_nid is provided
1982  *   Add a NID to a peer identified by the prim_nid. The peer identified
1983  *   by the prim_nid must already exist.
1984  *   The peer being created may be non-MR.
1985  *
1986  * The caller must hold ln_api_mutex. This prevents the peer from
1987  * being created/modified/deleted by a different thread.
1988  */
1989 static int
1990 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
1991                  unsigned int flags)
1992 __must_hold(&the_lnet.ln_api_mutex)
1993 {
1994         struct lnet_peer *lp = NULL;
1995         struct lnet_peer_ni *lpni;
1996
1997         /* The prim_nid must always be specified */
1998         if (LNET_NID_IS_ANY(prim_nid))
1999                 return -EINVAL;
2000
2001         if (mr)
2002                 flags |= LNET_PEER_MULTI_RAIL;
2003
2004         /*
2005          * If nid isn't specified, we must create a new peer with
2006          * prim_nid as its primary nid.
2007          */
2008         if (LNET_NID_IS_ANY(nid))
2009                 return lnet_peer_add(prim_nid, flags);
2010
2011         /* Look up the prim_nid, which must exist. */
2012         lpni = lnet_peer_ni_find_locked(prim_nid);
2013         if (!lpni)
2014                 return -ENOENT;
2015         lnet_peer_ni_decref_locked(lpni);
2016         lp = lpni->lpni_peer_net->lpn_peer;
2017
2018         /* Peer must have been configured. */
2019         if ((flags & LNET_PEER_CONFIGURED) &&
2020             !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2021                 CDEBUG(D_NET, "peer %s was not configured\n",
2022                        libcfs_nidstr(prim_nid));
2023                 return -ENOENT;
2024         }
2025
2026         /* Primary NID must match */
2027         if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2028                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2029                        libcfs_nidstr(prim_nid),
2030                        libcfs_nidstr(&lp->lp_primary_nid));
2031                 return -ENODEV;
2032         }
2033
2034         /* Multi-Rail flag must match. */
2035         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2036                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2037                        libcfs_nidstr(prim_nid));
2038                 return -EPERM;
2039         }
2040
2041         if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2042                 CDEBUG(D_NET,
2043                        "Don't add temporary peer NI for uptodate peer %s\n",
2044                        libcfs_nidstr(&lp->lp_primary_nid));
2045                 return -EINVAL;
2046         }
2047
2048         return lnet_peer_add_nid(lp, nid, flags);
2049 }
2050
2051 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2052                           bool mr, bool lock_prim)
2053 {
2054         int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2055
2056         return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2057 }
2058
2059 static int
2060 lnet_reset_peer(struct lnet_peer *lp)
2061 {
2062         struct lnet_peer_net *lpn, *lpntmp;
2063         struct lnet_peer_ni *lpni, *lpnitmp;
2064         unsigned int flags;
2065         int rc;
2066
2067         lnet_peer_cancel_discovery(lp);
2068
2069         flags = LNET_PEER_CONFIGURED;
2070         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2071                 flags |= LNET_PEER_MULTI_RAIL;
2072
2073         list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2074                 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2075                                          lpni_peer_nis) {
2076                         if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2077                                 continue;
2078
2079                         rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2080                         if (rc) {
2081                                 CERROR("Failed to delete %s from peer %s\n",
2082                                        libcfs_nidstr(&lpni->lpni_nid),
2083                                        libcfs_nidstr(&lp->lp_primary_nid));
2084                         }
2085                 }
2086         }
2087
2088         /* mark it for discovery the next time we use it */
2089         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2090         return 0;
2091 }
2092
2093 /*
2094  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2095  *
2096  * This API handles the following combinations:
2097  *   Delete a NI from a peer if both prim_nid and nid are provided.
2098  *   Delete a peer if only prim_nid is provided.
2099  *   Delete a peer if its primary nid is provided.
2100  *
2101  * The caller must hold ln_api_mutex. This prevents the peer from
2102  * being modified/deleted by a different thread.
2103  */
2104 int
2105 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2106                  int force)
2107 {
2108         struct lnet_peer *lp;
2109         struct lnet_peer_ni *lpni;
2110         unsigned int flags;
2111
2112         if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2113                 return -EINVAL;
2114
2115         lpni = lnet_peer_ni_find_locked(prim_nid);
2116         if (!lpni)
2117                 return -ENOENT;
2118         lnet_peer_ni_decref_locked(lpni);
2119         lp = lpni->lpni_peer_net->lpn_peer;
2120
2121         if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2122                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2123                        libcfs_nidstr(prim_nid),
2124                        libcfs_nidstr(&lp->lp_primary_nid));
2125                 return -ENODEV;
2126         }
2127
2128         lnet_net_lock(LNET_LOCK_EX);
2129         if (lp->lp_rtr_refcount > 0) {
2130                 lnet_net_unlock(LNET_LOCK_EX);
2131                 CERROR("%s is a router. Can not be deleted\n",
2132                        libcfs_nidstr(prim_nid));
2133                 return -EBUSY;
2134         }
2135         lnet_net_unlock(LNET_LOCK_EX);
2136
2137         if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2138                 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2139                         CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2140                                libcfs_nidstr(&lp->lp_primary_nid));
2141                         return lnet_reset_peer(lp);
2142                 } else {
2143                         return lnet_peer_del(lp);
2144                 }
2145         }
2146
2147         flags = LNET_PEER_CONFIGURED;
2148         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2149                 flags |= LNET_PEER_MULTI_RAIL;
2150
2151         return lnet_peer_del_nid(lp, nid, flags);
2152 }
2153
2154 void
2155 lnet_destroy_peer_ni_locked(struct kref *ref)
2156 {
2157         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2158                                                  lpni_kref);
2159         struct lnet_peer_table *ptable;
2160         struct lnet_peer_net *lpn;
2161
2162         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2163
2164         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2165         LASSERT(list_empty(&lpni->lpni_txq));
2166         LASSERT(lpni->lpni_txqnob == 0);
2167         LASSERT(list_empty(&lpni->lpni_peer_nis));
2168         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2169
2170         lpn = lpni->lpni_peer_net;
2171         lpni->lpni_peer_net = NULL;
2172         lpni->lpni_net = NULL;
2173
2174         if (!list_empty(&lpni->lpni_hashlist)) {
2175                 /* remove the peer ni from the zombie list */
2176                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2177                 spin_lock(&ptable->pt_zombie_lock);
2178                 list_del_init(&lpni->lpni_hashlist);
2179                 ptable->pt_zombies--;
2180                 spin_unlock(&ptable->pt_zombie_lock);
2181         }
2182
2183         if (lpni->lpni_pref_nnids > 1) {
2184                 struct lnet_nid_list *ne, *tmp;
2185
2186                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2187                                          nl_list) {
2188                         list_del_init(&ne->nl_list);
2189                         LIBCFS_FREE(ne, sizeof(*ne));
2190                 }
2191         }
2192         LIBCFS_FREE(lpni, sizeof(*lpni));
2193
2194         if (lpn)
2195                 lnet_peer_net_decref_locked(lpn);
2196 }
2197
2198 struct lnet_peer_ni *
2199 lnet_nid2peerni_ex(struct lnet_nid *nid)
2200 __must_hold(&the_lnet.ln_api_mutex)
2201 {
2202         struct lnet_peer_ni *lpni = NULL;
2203
2204         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2205                 return ERR_PTR(-ESHUTDOWN);
2206
2207         /*
2208          * find if a peer_ni already exists.
2209          * If so then just return that.
2210          */
2211         lpni = lnet_peer_ni_find_locked(nid);
2212         if (lpni)
2213                 return lpni;
2214
2215         lnet_net_unlock(LNET_LOCK_EX);
2216
2217         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2218
2219         lnet_net_lock(LNET_LOCK_EX);
2220
2221         return lpni;
2222 }
2223
2224 /*
2225  * Get a peer_ni for the given nid, create it if necessary. Takes a
2226  * hold on the peer_ni.
2227  */
2228 struct lnet_peer_ni *
2229 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2230                         struct lnet_nid *pref, int cpt)
2231 {
2232         struct lnet_peer_ni *lpni = NULL;
2233
2234         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2235                 return ERR_PTR(-ESHUTDOWN);
2236
2237         /*
2238          * find if a peer_ni already exists.
2239          * If so then just return that.
2240          */
2241         lpni = lnet_peer_ni_find_locked(nid);
2242         if (lpni)
2243                 return lpni;
2244
2245         /*
2246          * Slow path:
2247          * use the lnet_api_mutex to serialize the creation of the peer_ni
2248          * and the creation/deletion of the local ni/net. When a local ni is
2249          * created, if there exists a set of peer_nis on that network,
2250          * they need to be traversed and updated. When a local NI is
2251          * deleted, which could result in a network being deleted, then
2252          * all peer nis on that network need to be removed as well.
2253          *
2254          * Creation through traffic should also be serialized with
2255          * creation through DLC.
2256          */
2257         lnet_net_unlock(cpt);
2258         mutex_lock(&the_lnet.ln_api_mutex);
2259         /*
2260          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2261          * check here is sufficent.
2262          */
2263         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2264                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2265
2266         mutex_unlock(&the_lnet.ln_api_mutex);
2267         lnet_net_lock(cpt);
2268
2269         /* Lock has been dropped, check again for shutdown. */
2270         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2271                 if (!IS_ERR_OR_NULL(lpni))
2272                         lnet_peer_ni_decref_locked(lpni);
2273                 lpni = ERR_PTR(-ESHUTDOWN);
2274         }
2275
2276         return lpni;
2277 }
2278
2279 bool
2280 lnet_peer_gw_discovery(struct lnet_peer *lp)
2281 {
2282         bool rc = false;
2283
2284         spin_lock(&lp->lp_lock);
2285         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2286                 rc = true;
2287         spin_unlock(&lp->lp_lock);
2288
2289         return rc;
2290 }
2291
2292 bool
2293 lnet_peer_is_uptodate(struct lnet_peer *lp)
2294 {
2295         bool rc;
2296
2297         spin_lock(&lp->lp_lock);
2298         rc = lnet_peer_is_uptodate_locked(lp);
2299         spin_unlock(&lp->lp_lock);
2300         return rc;
2301 }
2302
2303 /*
2304  * Is a peer uptodate from the point of view of discovery?
2305  *
2306  * If it is currently being processed, obviously not.
2307  * A forced Ping or Push is also handled by the discovery thread.
2308  *
2309  * Otherwise look at whether the peer needs rediscovering.
2310  */
2311 bool
2312 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2313 __must_hold(&lp->lp_lock)
2314 {
2315         bool rc;
2316
2317         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2318                             LNET_PEER_FORCE_PING |
2319                             LNET_PEER_FORCE_PUSH)) {
2320                 rc = false;
2321         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2322                 rc = false;
2323         } else if (lnet_peer_needs_push(lp)) {
2324                 rc = false;
2325         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2326                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2327                         rc = true;
2328                 else
2329                         rc = false;
2330         } else {
2331                 rc = false;
2332         }
2333
2334         return rc;
2335 }
2336
2337 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2338 void
2339 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2340 {
2341         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2342          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2343          * when adding to the list and queuing the peer to ensure that we do not
2344          * strand any messages on the lp_dc_pendq. This scheme ensures the
2345          * message will be resent even if the peer is already being discovered.
2346          * Therefore we needn't check the return value of
2347          * lnet_peer_queue_for_discovery(lp).
2348          */
2349         lnet_net_lock(LNET_LOCK_EX);
2350         spin_lock(&lp->lp_lock);
2351         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2352         spin_unlock(&lp->lp_lock);
2353         lnet_peer_queue_for_discovery(lp);
2354         lnet_net_unlock(LNET_LOCK_EX);
2355 }
2356
2357 /*
2358  * Queue a peer for the attention of the discovery thread.  Call with
2359  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2360  * -EALREADY if the peer was already queued.
2361  */
2362 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2363 {
2364         int rc;
2365
2366         spin_lock(&lp->lp_lock);
2367         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2368                 lp->lp_state |= LNET_PEER_DISCOVERING;
2369         spin_unlock(&lp->lp_lock);
2370         if (list_empty(&lp->lp_dc_list)) {
2371                 lnet_peer_addref_locked(lp);
2372                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2373                 wake_up(&the_lnet.ln_dc_waitq);
2374                 rc = 0;
2375         } else {
2376                 rc = -EALREADY;
2377         }
2378
2379         CDEBUG(D_NET, "Queue peer %s: %d\n",
2380                libcfs_nidstr(&lp->lp_primary_nid), rc);
2381
2382         return rc;
2383 }
2384
2385 /*
2386  * Discovery of a peer is complete. Wake all waiters on the peer.
2387  * Call with lnet_net_lock/EX held.
2388  */
2389 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2390 {
2391         struct lnet_msg *msg, *tmp;
2392         int rc = 0;
2393         LIST_HEAD(pending_msgs);
2394
2395         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2396                libcfs_nidstr(&lp->lp_primary_nid));
2397
2398         spin_lock(&lp->lp_lock);
2399         /* Our caller dropped lp_lock which may have allowed another thread to
2400          * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2401          * Ensure it is cleared.
2402          */
2403         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2404         if (dc_error) {
2405                 lp->lp_dc_error = dc_error;
2406                 lp->lp_state |= LNET_PEER_REDISCOVER;
2407         }
2408         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2409         spin_unlock(&lp->lp_lock);
2410         list_del_init(&lp->lp_dc_list);
2411         wake_up(&lp->lp_dc_waitq);
2412
2413         if (lp->lp_rtr_refcount > 0)
2414                 lnet_router_discovery_complete(lp);
2415
2416         lnet_net_unlock(LNET_LOCK_EX);
2417
2418         /* iterate through all pending messages and send them again */
2419         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2420                 list_del_init(&msg->msg_list);
2421                 if (dc_error) {
2422                         lnet_finalize(msg, dc_error);
2423                         continue;
2424                 }
2425
2426                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2427                        lnet_msgtyp2str(msg->msg_type),
2428                        libcfs_idstr(&msg->msg_target));
2429                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2430                                &msg->msg_rtr_nid_param);
2431                 if (rc < 0) {
2432                         CNETERR("Error sending %s to %s: %d\n",
2433                                lnet_msgtyp2str(msg->msg_type),
2434                                libcfs_idstr(&msg->msg_target), rc);
2435                         lnet_finalize(msg, rc);
2436                 }
2437         }
2438         lnet_net_lock(LNET_LOCK_EX);
2439         lnet_peer_decref_locked(lp);
2440 }
2441
2442 /*
2443  * Handle inbound push.
2444  * Like any event handler, called with lnet_res_lock/CPT held.
2445  */
2446 void lnet_peer_push_event(struct lnet_event *ev)
2447 {
2448         struct lnet_ping_buffer *pbuf;
2449         struct lnet_peer *lp;
2450         int infobytes;
2451
2452         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2453
2454         /* lnet_find_peer() adds a refcount */
2455         lp = lnet_find_peer(&ev->source.nid);
2456         if (!lp) {
2457                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2458                        libcfs_nidstr(&ev->initiator.nid),
2459                        libcfs_nidstr(&ev->source.nid));
2460                 pbuf->pb_needs_post = true;
2461                 return;
2462         }
2463
2464         /* Ensure peer state remains consistent while we modify it. */
2465         spin_lock(&lp->lp_lock);
2466
2467         /*
2468          * If some kind of error happened the contents of the message
2469          * cannot be used. Clear the NIDS_UPTODATE and set the
2470          * FORCE_PING flag to trigger a ping.
2471          */
2472         if (ev->status) {
2473                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2474                 lp->lp_state |= LNET_PEER_FORCE_PING;
2475                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2476                        ev->status,
2477                        libcfs_nidstr(&lp->lp_primary_nid),
2478                        libcfs_nidstr(&ev->source.nid));
2479                 goto out;
2480         }
2481
2482         /*
2483          * A push with invalid or corrupted info. Clear the UPTODATE
2484          * flag to trigger a ping.
2485          */
2486         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2487                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2488                 lp->lp_state |= LNET_PEER_FORCE_PING;
2489                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2490                        libcfs_nidstr(&lp->lp_primary_nid));
2491                 goto out;
2492         }
2493
2494         /* Make sure we'll allocate the correct size ping buffer when
2495          * pinging the peer.
2496          */
2497         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2498         if (lp->lp_data_bytes < infobytes)
2499                 lp->lp_data_bytes = infobytes;
2500
2501         /*
2502          * A non-Multi-Rail peer is not supposed to be capable of
2503          * sending a push.
2504          */
2505         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2506                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2507                        libcfs_nidstr(&lp->lp_primary_nid));
2508                 goto out;
2509         }
2510
2511         /*
2512          * The peer may have discovery disabled at its end. Set
2513          * NO_DISCOVERY as appropriate.
2514          */
2515         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2516                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2517                        libcfs_nidstr(&lp->lp_primary_nid));
2518                 /*
2519                  * Mark the peer for deletion if we already know about it
2520                  * and it's going from discovery set to no discovery set
2521                  */
2522                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2523                                       LNET_PEER_DISCOVERING)) &&
2524                      lp->lp_state & LNET_PEER_DISCOVERED) {
2525                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2526                                libcfs_nidstr(&lp->lp_primary_nid),
2527                                lp->lp_state);
2528                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2529                 }
2530                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2531         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2532                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2533                        libcfs_nidstr(&lp->lp_primary_nid));
2534                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2535         }
2536
2537         /*
2538          * Update the MULTI_RAIL flag based on the push. If the peer
2539          * was configured with DLC then the setting should match what
2540          * DLC put in.
2541          * NB: We verified above that the MR feature bit is set in pi_features
2542          */
2543         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2544                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2545                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2546         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2547                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2548                       libcfs_nidstr(&lp->lp_primary_nid));
2549         } else if (lnet_peer_discovery_disabled) {
2550                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2551                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2552         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2553                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2554                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2555         } else {
2556                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2557                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2558                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2559                 lnet_peer_clr_non_mr_pref_nids(lp);
2560         }
2561
2562         /* Check for truncation of the Put message. Clear the
2563          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2564          * and tell discovery to allocate a bigger buffer.
2565          */
2566         if (ev->mlength < ev->rlength) {
2567                 if (the_lnet.ln_push_target_nbytes < infobytes)
2568                         the_lnet.ln_push_target_nbytes = infobytes;
2569                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2570                 lp->lp_state |= LNET_PEER_FORCE_PING;
2571                 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2572                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2573                 goto out;
2574         }
2575
2576         /* always assume new data */
2577         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2578         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2579
2580         /* If there is data present that hasn't been processed yet,
2581          * we'll replace it if the Put contained newer data and it
2582          * fits. We're racing with a Ping or earlier Push in this
2583          * case.
2584          */
2585         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2586                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2587                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2588                     infobytes <= lp->lp_data->pb_nbytes) {
2589                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2590                                infobytes);
2591                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2592                               libcfs_nidstr(&lp->lp_primary_nid),
2593                               LNET_PING_BUFFER_SEQNO(pbuf),
2594                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2595                 }
2596                 goto out;
2597         }
2598
2599         /*
2600          * Allocate a buffer to copy the data. On a failure we drop
2601          * the Push and set FORCE_PING to force the discovery
2602          * thread to fix the problem by pinging the peer.
2603          */
2604         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2605         if (!lp->lp_data) {
2606                 lp->lp_state |= LNET_PEER_FORCE_PING;
2607                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2608                        libcfs_nidstr(&lp->lp_primary_nid),
2609                        LNET_PING_BUFFER_SEQNO(pbuf));
2610                 goto out;
2611         }
2612
2613         /* Success */
2614         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2615                       FLEXIBLE_OBJECT);
2616         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2617         CDEBUG(D_NET, "Received Push %s %u\n",
2618                libcfs_nidstr(&lp->lp_primary_nid),
2619                LNET_PING_BUFFER_SEQNO(pbuf));
2620
2621 out:
2622         /* We've processed this buffer. It can be reposted */
2623         pbuf->pb_needs_post = true;
2624
2625         /*
2626          * Queue the peer for discovery if not done, force it on the request
2627          * queue and wake the discovery thread if the peer was already queued,
2628          * because its status changed.
2629          */
2630         spin_unlock(&lp->lp_lock);
2631         lnet_net_lock(LNET_LOCK_EX);
2632         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2633                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2634                 wake_up(&the_lnet.ln_dc_waitq);
2635         }
2636         /* Drop refcount from lookup */
2637         lnet_peer_decref_locked(lp);
2638         lnet_net_unlock(LNET_LOCK_EX);
2639 }
2640
2641 /*
2642  * Clear the discovery error state, unless we're already discovering
2643  * this peer, in which case the error is current.
2644  */
2645 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2646 {
2647         spin_lock(&lp->lp_lock);
2648         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2649                 lp->lp_dc_error = 0;
2650         spin_unlock(&lp->lp_lock);
2651 }
2652
2653 /*
2654  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2655  * dropped/retaken within this function. An lnet_peer_ni is passed in
2656  * because discovery could tear down an lnet_peer.
2657  */
2658 int
2659 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2660 {
2661         DEFINE_WAIT(wait);
2662         struct lnet_peer *lp;
2663         int rc = 0;
2664         int count = 0;
2665
2666 again:
2667         lnet_net_unlock(cpt);
2668         lnet_net_lock(LNET_LOCK_EX);
2669         lp = lpni->lpni_peer_net->lpn_peer;
2670         lnet_peer_clear_discovery_error(lp);
2671
2672         /*
2673          * We're willing to be interrupted. The lpni can become a
2674          * zombie if we race with DLC, so we must check for that.
2675          */
2676         for (;;) {
2677                 /* Keep lp alive when the lnet_net_lock is unlocked */
2678                 lnet_peer_addref_locked(lp);
2679                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2680                 if (signal_pending(current))
2681                         break;
2682                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2683                         break;
2684                 /*
2685                  * Don't repeat discovery if discovery is disabled. This is
2686                  * done to ensure we can use discovery as a standard ping as
2687                  * well for backwards compatibility with routers which do not
2688                  * have discovery or have discovery disabled
2689                  */
2690                 if (lnet_is_discovery_disabled(lp) && count > 0)
2691                         break;
2692                 if (lp->lp_dc_error)
2693                         break;
2694                 if (lnet_peer_is_uptodate(lp))
2695                         break;
2696                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2697                         break;
2698                 lnet_peer_queue_for_discovery(lp);
2699                 count++;
2700                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2701
2702                 /*
2703                  * If caller requested a non-blocking operation then
2704                  * return immediately. Once discovery is complete any
2705                  * pending messages that were stopped due to discovery
2706                  * will be transmitted.
2707                  */
2708                 if (!block)
2709                         break;
2710
2711                 lnet_net_unlock(LNET_LOCK_EX);
2712                 schedule();
2713                 finish_wait(&lp->lp_dc_waitq, &wait);
2714                 lnet_net_lock(LNET_LOCK_EX);
2715                 lnet_peer_decref_locked(lp);
2716                 /* Peer may have changed */
2717                 lp = lpni->lpni_peer_net->lpn_peer;
2718         }
2719         finish_wait(&lp->lp_dc_waitq, &wait);
2720
2721         lnet_net_unlock(LNET_LOCK_EX);
2722         lnet_net_lock(cpt);
2723         lnet_peer_decref_locked(lp);
2724         /*
2725          * The peer may have changed, so re-check and rediscover if that turns
2726          * out to have been the case. The reference count on lp ensured that
2727          * even if it was unlinked from lpni the memory could not be recycled.
2728          * Thus the check below is sufficient to determine whether the peer
2729          * changed. If the peer changed, then lp must not be dereferenced.
2730          */
2731         if (lp != lpni->lpni_peer_net->lpn_peer)
2732                 goto again;
2733
2734         if (signal_pending(current))
2735                 rc = -EINTR;
2736         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2737                 rc = -ESHUTDOWN;
2738         else if (lp->lp_dc_error)
2739                 rc = lp->lp_dc_error;
2740         else if (!block)
2741                 CDEBUG(D_NET, "non-blocking discovery\n");
2742         else if (!lnet_peer_is_uptodate(lp) &&
2743                  !(lnet_is_discovery_disabled(lp) ||
2744                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2745                 goto again;
2746
2747         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2748                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2749                libcfs_nidstr(&lpni->lpni_nid), rc,
2750                (!block) ? "pending discovery" : "discovery complete");
2751
2752         return rc;
2753 }
2754
2755 /* Handle an incoming ack for a push. */
2756 static void
2757 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2758 {
2759         struct lnet_ping_buffer *pbuf;
2760
2761         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2762         spin_lock(&lp->lp_lock);
2763         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2764         lp->lp_push_error = ev->status;
2765         if (ev->status)
2766                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2767         else
2768                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2769         spin_unlock(&lp->lp_lock);
2770
2771         CDEBUG(D_NET, "peer %s ev->status %d\n",
2772                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2773 }
2774
2775 static bool find_primary(struct lnet_nid *nid,
2776                          struct lnet_ping_buffer *pbuf)
2777 {
2778         struct lnet_ping_info *pi = &pbuf->pb_info;
2779         struct lnet_ping_iter piter;
2780         __u32 *stp;
2781
2782         if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2783                 /* First large nid is primary */
2784                 for (stp = ping_iter_first(&piter, pbuf, nid);
2785                      stp;
2786                      stp = ping_iter_next(&piter, nid)) {
2787                         if (nid_is_nid4(nid))
2788                                 continue;
2789                         /* nid has already been copied in */
2790                         return true;
2791                 }
2792                 /* no large nids ... weird ... ignore the flag
2793                  * and use first nid.
2794                  */
2795         }
2796         /* pi_nids[1] is primary */
2797         if (pi->pi_nnis < 2)
2798                 return false;
2799         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2800         return true;
2801 }
2802
2803 /* Handle a Reply message. This is the reply to a Ping message. */
2804 static void
2805 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2806 {
2807         struct lnet_ping_buffer *pbuf;
2808         struct lnet_nid primary;
2809         int infobytes;
2810         int rc;
2811         bool ping_feat_disc;
2812
2813         spin_lock(&lp->lp_lock);
2814
2815         lp->lp_disc_src_nid = ev->target.nid;
2816         lp->lp_disc_dst_nid = ev->source.nid;
2817
2818         /*
2819          * If some kind of error happened the contents of message
2820          * cannot be used. Set PING_FAILED to trigger a retry.
2821          */
2822         if (ev->status) {
2823                 lp->lp_state |= LNET_PEER_PING_FAILED;
2824                 lp->lp_ping_error = ev->status;
2825                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2826                        ev->status,
2827                        libcfs_nidstr(&lp->lp_primary_nid),
2828                        libcfs_nidstr(&ev->source.nid));
2829                 goto out;
2830         }
2831
2832         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2833         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2834                 lnet_swap_pinginfo(pbuf);
2835
2836         /*
2837          * A reply with invalid or corrupted info. Set PING_FAILED to
2838          * trigger a retry.
2839          */
2840         rc = lnet_ping_info_validate(&pbuf->pb_info);
2841         if (rc) {
2842                 lp->lp_state |= LNET_PEER_PING_FAILED;
2843                 lp->lp_ping_error = 0;
2844                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2845                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2846                 goto out;
2847         }
2848
2849         /*
2850          * The peer may have discovery disabled at its end. Set
2851          * NO_DISCOVERY as appropriate.
2852          */
2853         ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2854         if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2855                 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2856                        libcfs_nidstr(&lp->lp_primary_nid),
2857                        ping_feat_disc ? "enabled" : "disabled",
2858                        lnet_peer_discovery_disabled ? "disabled" : "enabled");
2859
2860                 /* Detect whether this peer has toggled discovery from on to
2861                  * off and whether we can delete and re-create the peer. Peers
2862                  * that were manually configured cannot be deleted by discovery.
2863                  * We need to delete this peer and re-create it if the peer was
2864                  * not configured manually, is currently considered DD capable,
2865                  * and either:
2866                  * 1. We've already discovered the peer (the peer has toggled
2867                  *    the discovery feature from on to off), or
2868                  * 2. The peer is considered MR, but it was not user configured
2869                  *    (this was a "temporary" peer created via the kernel APIs
2870                  *     that we're discovering for the first time)
2871                  */
2872                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2873                                       LNET_PEER_NO_DISCOVERY)) &&
2874                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2875                                      LNET_PEER_MULTI_RAIL))) {
2876                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2877                                libcfs_nidstr(&lp->lp_primary_nid),
2878                                lp->lp_state);
2879                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2880                 }
2881                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2882         } else {
2883                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2884                        libcfs_nidstr(&lp->lp_primary_nid));
2885                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2886         }
2887
2888         /*
2889          * Update the MULTI_RAIL flag based on the reply. If the peer
2890          * was configured with DLC then the setting should match what
2891          * DLC put in.
2892          */
2893         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2894                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2895                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2896                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2897                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2898                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2899                               libcfs_nidstr(&lp->lp_primary_nid));
2900                 } else if (lnet_peer_discovery_disabled) {
2901                         CDEBUG(D_NET,
2902                                "peer %s(%p) not MR: DD disabled locally\n",
2903                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2904                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2905                         CDEBUG(D_NET,
2906                                "peer %s(%p) not MR: DD disabled remotely\n",
2907                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2908                 } else {
2909                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2910                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2911                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2912                         lnet_peer_clr_non_mr_pref_nids(lp);
2913                 }
2914         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2915                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2916                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2917                               libcfs_nidstr(&lp->lp_primary_nid));
2918                 } else {
2919                         CERROR("Multi-Rail state vanished from %s\n",
2920                                libcfs_nidstr(&lp->lp_primary_nid));
2921                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2922                 }
2923         }
2924
2925         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2926         /*
2927          * Make sure we'll allocate the correct size ping buffer when
2928          * pinging the peer.
2929          */
2930         if (lp->lp_data_bytes < infobytes)
2931                 lp->lp_data_bytes = infobytes;
2932
2933         /* Check for truncation of the Reply. Clear PING_SENT and set
2934          * PING_FAILED to trigger a retry.
2935          */
2936         if (pbuf->pb_nbytes < infobytes) {
2937                 if (the_lnet.ln_push_target_nbytes < infobytes)
2938                         the_lnet.ln_push_target_nbytes = infobytes;
2939                 lp->lp_state |= LNET_PEER_PING_FAILED;
2940                 lp->lp_ping_error = 0;
2941                 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2942                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2943                 goto out;
2944         }
2945
2946         /*
2947          * Check the sequence numbers in the reply. These are only
2948          * available if the reply came from a Multi-Rail peer.
2949          */
2950         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2951             find_primary(&primary, pbuf) &&
2952             nid_same(&lp->lp_primary_nid, &primary)) {
2953                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2954                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2955                                 libcfs_nidstr(&lp->lp_primary_nid),
2956                                 LNET_PING_BUFFER_SEQNO(pbuf),
2957                                 lp->lp_peer_seqno);
2958
2959                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2960         }
2961
2962         /* We're happy with the state of the data in the buffer. */
2963         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2964                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2965                lp->lp_state);
2966         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2967                 lnet_ping_buffer_decref(lp->lp_data);
2968         else
2969                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2970         lnet_ping_buffer_addref(pbuf);
2971         lp->lp_data = pbuf;
2972 out:
2973         lp->lp_state &= ~LNET_PEER_PING_SENT;
2974         spin_unlock(&lp->lp_lock);
2975 }
2976
2977 /*
2978  * Send event handling. Only matters for error cases, where we clean
2979  * up state on the peer and peer_ni that would otherwise be updated in
2980  * the REPLY event handler for a successful Ping, and the ACK event
2981  * handler for a successful Push.
2982  */
2983 static int
2984 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2985 {
2986         int rc = 0;
2987
2988         if (!ev->status)
2989                 goto out;
2990
2991         spin_lock(&lp->lp_lock);
2992         if (ev->msg_type == LNET_MSG_GET) {
2993                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2994                 lp->lp_state |= LNET_PEER_PING_FAILED;
2995                 lp->lp_ping_error = ev->status;
2996         } else { /* ev->msg_type == LNET_MSG_PUT */
2997                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2998                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2999                 lp->lp_push_error = ev->status;
3000         }
3001         spin_unlock(&lp->lp_lock);
3002         rc = LNET_REDISCOVER_PEER;
3003 out:
3004         CDEBUG(D_NET, "%s Send to %s: %d\n",
3005                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3006                 libcfs_nidstr(&ev->target.nid), rc);
3007         return rc;
3008 }
3009
3010 /*
3011  * Unlink event handling. This event is only seen if a call to
3012  * LNetMDUnlink() caused the event to be unlinked. If this call was
3013  * made after the event was set up in LNetGet() or LNetPut() then we
3014  * assume the Ping or Push timed out.
3015  */
3016 static void
3017 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3018 {
3019         spin_lock(&lp->lp_lock);
3020         /* We've passed through LNetGet() */
3021         if (lp->lp_state & LNET_PEER_PING_SENT) {
3022                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3023                 lp->lp_state |= LNET_PEER_PING_FAILED;
3024                 lp->lp_ping_error = -ETIMEDOUT;
3025                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3026                         libcfs_nidstr(&lp->lp_primary_nid));
3027         }
3028         /* We've passed through LNetPut() */
3029         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3030                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3031                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3032                 lp->lp_push_error = -ETIMEDOUT;
3033                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3034                         libcfs_nidstr(&lp->lp_primary_nid));
3035         }
3036         spin_unlock(&lp->lp_lock);
3037 }
3038
3039 /*
3040  * Event handler for the discovery EQ.
3041  *
3042  * Called with lnet_res_lock(cpt) held. The cpt is the
3043  * lnet_cpt_of_cookie() of the md handle cookie.
3044  */
3045 static void lnet_discovery_event_handler(struct lnet_event *event)
3046 {
3047         struct lnet_peer *lp = event->md_user_ptr;
3048         struct lnet_ping_buffer *pbuf;
3049         int rc;
3050
3051         /* discovery needs to take another look */
3052         rc = LNET_REDISCOVER_PEER;
3053
3054         CDEBUG(D_NET, "Received event: %d\n", event->type);
3055
3056         switch (event->type) {
3057         case LNET_EVENT_ACK:
3058                 lnet_discovery_event_ack(lp, event);
3059                 break;
3060         case LNET_EVENT_REPLY:
3061                 lnet_discovery_event_reply(lp, event);
3062                 break;
3063         case LNET_EVENT_SEND:
3064                 /* Only send failure triggers a retry. */
3065                 rc = lnet_discovery_event_send(lp, event);
3066                 break;
3067         case LNET_EVENT_UNLINK:
3068                 /* LNetMDUnlink() was called */
3069                 lnet_discovery_event_unlink(lp, event);
3070                 break;
3071         default:
3072                 /* Invalid events. */
3073                 LBUG();
3074         }
3075         lnet_net_lock(LNET_LOCK_EX);
3076         if (event->unlinked) {
3077                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3078                 lnet_ping_buffer_decref(pbuf);
3079                 lnet_peer_decref_locked(lp);
3080         }
3081
3082         /* put peer back at end of request queue, if discovery not already
3083          * done */
3084         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3085             lnet_peer_queue_for_discovery(lp)) {
3086                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3087                 wake_up(&the_lnet.ln_dc_waitq);
3088         }
3089         lnet_net_unlock(LNET_LOCK_EX);
3090 }
3091
3092 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3093                      struct lnet_ping_buffer *pbuf,
3094                      struct lnet_nid *nid)
3095 {
3096         pi->pinfo = &pbuf->pb_info;
3097         pi->pos = &pbuf->pb_info.pi_ni;
3098         pi->end = (void *)pi->pinfo +
3099                   min_t(int, pbuf->pb_nbytes,
3100                         lnet_ping_info_size(pi->pinfo));
3101         /* lnet_ping_info_validiate ensures there will be one
3102          * lnet_ni_status at the start
3103          */
3104         if (nid)
3105                 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3106
3107         pi->pos += sizeof(struct lnet_ni_status);
3108         return &pbuf->pb_info.pi_ni[0].ns_status;
3109 }
3110
3111 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3112 {
3113         int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3114
3115         if (pi->pos < ((void *)pi->pinfo + off)) {
3116                 struct lnet_ni_status *ns = pi->pos;
3117
3118                 pi->pos = ns + 1;
3119                 if (pi->pos > pi->end)
3120                         return NULL;
3121                 if (nid)
3122                         lnet_nid4_to_nid(ns->ns_nid, nid);
3123                 return &ns->ns_status;
3124         }
3125
3126         while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3127                 struct lnet_ni_large_status *lns = pi->pos;
3128
3129                 if (pi->pos + 8 > pi->end)
3130                         /* Not safe to examine next */
3131                         return NULL;
3132                 pi->pos = lnet_ping_sts_next(lns);
3133                 if (pi->pos > pi->end)
3134                         return NULL;
3135                 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3136                         continue;
3137                 if (nid)
3138                         *nid = lns->ns_nid;
3139                 return &lns->ns_status;
3140         }
3141         return NULL;
3142 }
3143
3144 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3145 {
3146         struct lnet_ping_iter pi;
3147         u32 *st;
3148         int nnis = 0;
3149
3150         for (st = ping_iter_first(&pi, pbuf, NULL); st;
3151              st = ping_iter_next(&pi, NULL))
3152                 nnis += 1;
3153
3154         return nnis;
3155 }
3156
3157 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3158 {
3159         if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN)
3160                 lnet_handle_remote_failure_locked(lpni);
3161         else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3162                  !lpni->lpni_last_alive)
3163                 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3164 }
3165
3166 /*
3167  * Build a peer from incoming data.
3168  *
3169  * The NIDs in the incoming data are supposed to be structured as follows:
3170  *  - loopback
3171  *  - primary NID
3172  *  - other NIDs in same net
3173  *  - NIDs in second net
3174  *  - NIDs in third net
3175  *  - ...
3176  * This due to the way the list of NIDs in the data is created.
3177  *
3178  * Note that this function will mark the peer uptodate unless an
3179  * ENOMEM is encontered. All other errors are due to a conflict
3180  * between the DLC configuration and what discovery sees. We treat DLC
3181  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3182  * peer from becoming stuck in discovery.
3183  */
3184 static int lnet_peer_merge_data(struct lnet_peer *lp,
3185                                 struct lnet_ping_buffer *pbuf)
3186 {
3187         struct lnet_peer_net *lpn;
3188         struct lnet_peer_ni *lpni;
3189         struct lnet_nid *curnis = NULL;
3190         struct lnet_ni_large_status *addnis = NULL;
3191         struct lnet_nid *delnis = NULL;
3192         struct lnet_ping_iter pi;
3193         struct lnet_nid nid;
3194         u32 *stp;
3195         struct lnet_nid primary = {};
3196         bool want_large_primary;
3197         unsigned int flags;
3198         int ncurnis;
3199         int naddnis;
3200         int ndelnis;
3201         int nnis = 0;
3202         int i;
3203         int j;
3204         int rc;
3205         __u32 old_st;
3206
3207         flags = LNET_PEER_DISCOVERED;
3208         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3209                 flags |= LNET_PEER_MULTI_RAIL;
3210
3211         /*
3212          * Cache the routing feature for the peer; whether it is enabled
3213          * for disabled as reported by the remote peer.
3214          */
3215         spin_lock(&lp->lp_lock);
3216         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3217                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3218         else
3219                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3220         spin_unlock(&lp->lp_lock);
3221
3222         nnis = ping_info_count_entries(pbuf);
3223         nnis = max_t(int, lp->lp_nnis, nnis);
3224         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3225         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3226         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3227         if (!curnis || !addnis || !delnis) {
3228                 rc = -ENOMEM;
3229                 goto out;
3230         }
3231         ncurnis = 0;
3232         naddnis = 0;
3233         ndelnis = 0;
3234
3235         /* Construct the list of NIDs present in peer. */
3236         lpni = NULL;
3237         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3238                 curnis[ncurnis++] = lpni->lpni_nid;
3239
3240         /* Check for NIDs in pbuf not present in curnis[].
3241          * Skip the first, which is loop-back.  Take second as
3242          * primary, unless a large primary is found.
3243          */
3244         ping_iter_first(&pi, pbuf, NULL);
3245         stp = ping_iter_next(&pi, &nid);
3246         if (stp)
3247                 primary = nid;
3248         want_large_primary = (pbuf->pb_info.pi_features &
3249                               LNET_PING_FEAT_PRIMARY_LARGE);
3250         for (; stp; stp = ping_iter_next(&pi, &nid)) {
3251                 for (j = 0; j < ncurnis; j++)
3252                         if (nid_same(&nid, &curnis[j]))
3253                                 break;
3254                 if (j == ncurnis) {
3255                         addnis[naddnis].ns_nid = nid;
3256                         addnis[naddnis].ns_status = *stp;
3257                         naddnis += 1;
3258                 }
3259                 if (want_large_primary && nid.nid_size) {
3260                         primary = nid;
3261                         want_large_primary = false;
3262                 }
3263         }
3264         /*
3265          * Check for NIDs in curnis[] not present in pbuf.
3266          * The nested loop starts at 1 to skip the loopback NID.
3267          *
3268          * But never add the loopback NID to delnis[]: if it is
3269          * present in curnis[] then this peer is for this node.
3270          */
3271         for (i = 0; i < ncurnis; i++) {
3272                 if (nid_is_lo0(&curnis[i]))
3273                         continue;
3274                 ping_iter_first(&pi, pbuf, NULL);
3275                 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3276                         if (nid_same(&curnis[i], &nid)) {
3277                                 /*
3278                                  * update the information we cache for the
3279                                  * peer with the latest information we
3280                                  * received
3281                                  */
3282                                 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3283                                 if (lpni) {
3284                                         old_st = lpni->lpni_ns_status;
3285                                         lpni->lpni_ns_status = *stp;
3286                                         if (old_st != lpni->lpni_ns_status)
3287                                                 handle_disc_lpni_health(lpni);
3288                                         lnet_peer_ni_decref_locked(lpni);
3289                                 }
3290                                 break;
3291                         }
3292                 }
3293                 if (!stp)
3294                         delnis[ndelnis++] = curnis[i];
3295         }
3296
3297         /*
3298          * If we get here and the discovery is disabled then we don't want
3299          * to add or delete any NIs. We just updated the ones we have some
3300          * information on, and call it a day
3301          */
3302         rc = 0;
3303         if (lnet_is_discovery_disabled(lp))
3304                 goto out;
3305
3306         for (i = 0; i < naddnis; i++) {
3307                 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3308                 if (rc) {
3309                         CERROR("Error adding NID %s to peer %s: %d\n",
3310                                libcfs_nidstr(&addnis[i].ns_nid),
3311                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3312                         if (rc == -ENOMEM)
3313                                 goto out;
3314                 }
3315                 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3316                 if (lpni) {
3317                         lpni->lpni_ns_status = addnis[i].ns_status;
3318                         handle_disc_lpni_health(lpni);
3319                         lnet_peer_ni_decref_locked(lpni);
3320                 }
3321         }
3322
3323         for (i = 0; i < ndelnis; i++) {
3324                 /*
3325                  * for routers it's okay to delete the primary_nid because
3326                  * the upper layers don't really rely on it. So if we're
3327                  * being told that the router changed its primary_nid
3328                  * then it's okay to delete it.
3329                  */
3330                 if (lp->lp_rtr_refcount > 0)
3331                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3332                 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3333                 if (rc) {
3334                         CERROR("Error deleting NID %s from peer %s: %d\n",
3335                                libcfs_nidstr(&delnis[i]),
3336                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3337                         if (rc == -ENOMEM)
3338                                 goto out;
3339                 }
3340         }
3341
3342         /* The peer net for the primary NID should be the first entry in the
3343          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3344          * be the first entry in its peer net's lpn_peer_nis list.
3345          */
3346         find_primary(&nid, pbuf);
3347         lpni = lnet_peer_ni_find_locked(&nid);
3348         if (!lpni) {
3349                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3350                        libcfs_nidstr(&nid));
3351                 goto out;
3352         }
3353
3354         lnet_peer_ni_decref_locked(lpni);
3355
3356         lpn = lpni->lpni_peer_net;
3357         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3358                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3359
3360         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3361                 list_move(&lpni->lpni_peer_nis,
3362                           &lpni->lpni_peer_net->lpn_peer_nis);
3363
3364         /*
3365          * Errors other than -ENOMEM are due to peers having been
3366          * configured with DLC. Ignore these because DLC overrides
3367          * Discovery.
3368          */
3369         rc = 0;
3370 out:
3371         /* If this peer is a gateway, invoke the routing callback to update
3372          * the associated route status
3373          */
3374         if (lp->lp_rtr_refcount > 0)
3375                 lnet_router_discovery_ping_reply(lp, pbuf);
3376
3377         CFS_FREE_PTR_ARRAY(curnis, nnis);
3378         CFS_FREE_PTR_ARRAY(addnis, nnis);
3379         CFS_FREE_PTR_ARRAY(delnis, nnis);
3380         lnet_ping_buffer_decref(pbuf);
3381         CDEBUG(D_NET, "peer %s (%p): %d\n",
3382                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3383
3384         if (rc) {
3385                 spin_lock(&lp->lp_lock);
3386                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3387                 lp->lp_state |= LNET_PEER_FORCE_PING;
3388                 spin_unlock(&lp->lp_lock);
3389         }
3390         return rc;
3391 }
3392
3393 /*
3394  * The data in pbuf says lp is its primary peer, but the data was
3395  * received by a different peer. Try to update lp with the data.
3396  */
3397 static int
3398 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3399 {
3400         struct lnet_handle_md mdh;
3401
3402         /* Queue lp for discovery, and force it on the request queue. */
3403         lnet_net_lock(LNET_LOCK_EX);
3404         if (lnet_peer_queue_for_discovery(lp))
3405                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3406         lnet_net_unlock(LNET_LOCK_EX);
3407
3408         LNetInvalidateMDHandle(&mdh);
3409
3410         /*
3411          * Decide whether we can move the peer to the DATA_PRESENT state.
3412          *
3413          * We replace stale data for a multi-rail peer, repair PING_FAILED
3414          * status, and preempt FORCE_PING.
3415          *
3416          * If after that we have DATA_PRESENT, we merge it into this peer.
3417          */
3418         spin_lock(&lp->lp_lock);
3419         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3420                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3421                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3422                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3423                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3424                         lnet_ping_buffer_decref(pbuf);
3425                         pbuf = lp->lp_data;
3426                         lp->lp_data = NULL;
3427                 }
3428         }
3429         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3430                 lnet_ping_buffer_decref(lp->lp_data);
3431                 lp->lp_data = NULL;
3432                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3433         }
3434         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3435                 mdh = lp->lp_ping_mdh;
3436                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3437                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3438                 lp->lp_ping_error = 0;
3439         }
3440         if (lp->lp_state & LNET_PEER_FORCE_PING)
3441                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3442         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3443         spin_unlock(&lp->lp_lock);
3444
3445         if (!LNetMDHandleIsInvalid(mdh))
3446                 LNetMDUnlink(mdh);
3447
3448         if (pbuf)
3449                 return lnet_peer_merge_data(lp, pbuf);
3450
3451         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3452         return 0;
3453 }
3454
3455 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3456                                      struct lnet_ping_buffer *pbuf)
3457 {
3458         struct lnet_ping_iter pi;
3459         struct lnet_nid pnid;
3460         u32 *st;
3461
3462         for (st = ping_iter_first(&pi, pbuf, &pnid);
3463              st;
3464              st = ping_iter_next(&pi, &pnid))
3465                 if (nid_same(nid, &pnid))
3466                         return true;
3467         return false;
3468 }
3469
3470 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3471  * to the discovery queue a reference was taken that will prevent the peer from
3472  * actually being freed by this function. After this function exits the
3473  * discovery thread should call lnet_peer_discovery_complete() which will
3474  * drop that reference as well as wake any waiters that may also be holding a
3475  * ref on the peer
3476  */
3477 static int lnet_peer_deletion(struct lnet_peer *lp)
3478 __must_hold(&lp->lp_lock)
3479 {
3480         struct list_head rlist;
3481         struct lnet_route *route, *tmp;
3482         int sensitivity = lp->lp_health_sensitivity;
3483         int rc = 0;
3484
3485         INIT_LIST_HEAD(&rlist);
3486
3487         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3488                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3489
3490         /* no-op if lnet_peer_del() has already been called on this peer */
3491         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3492                 goto clear_discovering;
3493
3494         spin_unlock(&lp->lp_lock);
3495
3496         mutex_lock(&the_lnet.ln_api_mutex);
3497         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3498             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3499                 mutex_unlock(&the_lnet.ln_api_mutex);
3500                 spin_lock(&lp->lp_lock);
3501                 rc = -ESHUTDOWN;
3502                 goto clear_discovering;
3503         }
3504
3505         lnet_peer_cancel_discovery(lp);
3506         lnet_net_lock(LNET_LOCK_EX);
3507         list_for_each_entry_safe(route, tmp,
3508                                  &lp->lp_routes,
3509                                  lr_gwlist)
3510                 lnet_move_route(route, NULL, &rlist);
3511
3512         /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3513         rc = lnet_peer_del_locked(lp);
3514         if (rc)
3515                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3516                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3517
3518         lnet_net_unlock(LNET_LOCK_EX);
3519
3520         list_for_each_entry_safe(route, tmp,
3521                                  &rlist, lr_list) {
3522                 /* re-add these routes */
3523                 lnet_add_route(route->lr_net,
3524                                route->lr_hops,
3525                                &route->lr_nid,
3526                                route->lr_priority,
3527                                sensitivity);
3528                 LIBCFS_FREE(route, sizeof(*route));
3529         }
3530
3531         mutex_unlock(&the_lnet.ln_api_mutex);
3532
3533         spin_lock(&lp->lp_lock);
3534
3535         rc = 0;
3536
3537 clear_discovering:
3538         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3539                           LNET_PEER_FORCE_PUSH);
3540
3541         return rc;
3542 }
3543
3544 /*
3545  * Update a peer using the data received.
3546  */
3547 static int lnet_peer_data_present(struct lnet_peer *lp)
3548 __must_hold(&lp->lp_lock)
3549 {
3550         struct lnet_ping_buffer *pbuf;
3551         struct lnet_peer_ni *lpni;
3552         struct lnet_nid nid;
3553         unsigned int flags;
3554         int rc = 0;
3555
3556         pbuf = lp->lp_data;
3557         lp->lp_data = NULL;
3558         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3559         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3560         spin_unlock(&lp->lp_lock);
3561
3562         /*
3563          * Modifications of peer structures are done while holding the
3564          * ln_api_mutex. A global lock is required because we may be
3565          * modifying multiple peer structures, and a mutex greatly
3566          * simplifies memory management.
3567          *
3568          * The actual changes to the data structures must also protect
3569          * against concurrent lookups, for which the lnet_net_lock in
3570          * LNET_LOCK_EX mode is used.
3571          */
3572         mutex_lock(&the_lnet.ln_api_mutex);
3573         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3574                 rc = -ESHUTDOWN;
3575                 goto out;
3576         }
3577
3578         /*
3579          * If this peer is not on the peer list then it is being torn
3580          * down, and our reference count may be all that is keeping it
3581          * alive. Don't do any work on it.
3582          */
3583         if (list_empty(&lp->lp_peer_list)) {
3584                 lnet_ping_buffer_decref(pbuf);
3585                 goto out;
3586         }
3587
3588         flags = LNET_PEER_DISCOVERED;
3589         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3590                 flags |= LNET_PEER_MULTI_RAIL;
3591
3592         /*
3593          * Check whether the primary NID in the message matches the
3594          * primary NID of the peer. If it does, update the peer, if
3595          * it it does not, check whether there is already a peer with
3596          * that primary NID. If no such peer exists, try to update
3597          * the primary NID of the current peer (allowed if it was
3598          * created due to message traffic) and complete the update.
3599          * If the peer did exist, hand off the data to it.
3600          *
3601          * The peer for the loopback interface is a special case: this
3602          * is the peer for the local node, and we want to set its
3603          * primary NID to the correct value here. Moreover, this peer
3604          * can show up with only the loopback NID in the ping buffer.
3605          */
3606         if (!find_primary(&nid, pbuf)) {
3607                 lnet_ping_buffer_decref(pbuf);
3608                 goto out;
3609         }
3610         if (nid_is_lo0(&lp->lp_primary_nid)) {
3611                 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3612                 if (rc)
3613                         lnet_ping_buffer_decref(pbuf);
3614                 else
3615                         rc = lnet_peer_merge_data(lp, pbuf);
3616         /*
3617          * if the primary nid of the peer is present in the ping info returned
3618          * from the peer, but it's not the local primary peer we have
3619          * cached and discovery is disabled, then we don't want to update
3620          * our local peer info, by adding or removing NIDs, we just want
3621          * to update the status of the nids that we currently have
3622          * recorded in that peer.
3623          */
3624         } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3625                    (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3626                     lnet_is_discovery_disabled(lp))) {
3627                 rc = lnet_peer_merge_data(lp, pbuf);
3628         } else {
3629                 lpni = lnet_peer_ni_find_locked(&nid);
3630                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3631                         rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3632                         if (rc) {
3633                                 CERROR("Primary NID error %s versus %s: %d\n",
3634                                        libcfs_nidstr(&lp->lp_primary_nid),
3635                                        libcfs_nidstr(&nid), rc);
3636                                 lnet_ping_buffer_decref(pbuf);
3637                         } else {
3638                                 rc = lnet_peer_merge_data(lp, pbuf);
3639                         }
3640                         if (lpni)
3641                                 lnet_peer_ni_decref_locked(lpni);
3642                 } else {
3643                         struct lnet_peer *new_lp;
3644                         new_lp = lpni->lpni_peer_net->lpn_peer;
3645                         /*
3646                          * if lp has discovery/MR enabled that means new_lp
3647                          * should have discovery/MR enabled as well, since
3648                          * it's the same peer, which we're about to merge
3649                          */
3650                         spin_lock(&lp->lp_lock);
3651                         spin_lock(&new_lp->lp_lock);
3652                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3653                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3654                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3655                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3656                         /* If we're processing a ping reply then we may be
3657                          * about to send a push to the peer that we ping'd.
3658                          * Since the ping reply that we're processing was
3659                          * received by lp, we need to set the discovery source
3660                          * NID for new_lp to the NID stored in lp.
3661                          */
3662                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3663                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3664                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3665                         }
3666                         spin_unlock(&new_lp->lp_lock);
3667                         spin_unlock(&lp->lp_lock);
3668
3669                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3670                         lnet_consolidate_routes_locked(lp, new_lp);
3671                         lnet_peer_ni_decref_locked(lpni);
3672                 }
3673         }
3674 out:
3675         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3676                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3677                lp->lp_state);
3678         mutex_unlock(&the_lnet.ln_api_mutex);
3679
3680         spin_lock(&lp->lp_lock);
3681         /* Tell discovery to re-check the peer immediately. */
3682         if (!rc)
3683                 rc = LNET_REDISCOVER_PEER;
3684         return rc;
3685 }
3686
3687 /*
3688  * A ping failed. Clear the PING_FAILED state and set the
3689  * FORCE_PING state, to ensure a retry even if discovery is
3690  * disabled. This avoids being left with incorrect state.
3691  */
3692 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3693 __must_hold(&lp->lp_lock)
3694 {
3695         struct lnet_handle_md mdh;
3696         int rc;
3697
3698         mdh = lp->lp_ping_mdh;
3699         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3700         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3701         lp->lp_state |= LNET_PEER_FORCE_PING;
3702         rc = lp->lp_ping_error;
3703         lp->lp_ping_error = 0;
3704         spin_unlock(&lp->lp_lock);
3705
3706         if (!LNetMDHandleIsInvalid(mdh))
3707                 LNetMDUnlink(mdh);
3708
3709         CDEBUG(D_NET, "peer %s:%d\n",
3710                libcfs_nidstr(&lp->lp_primary_nid), rc);
3711
3712         spin_lock(&lp->lp_lock);
3713         return rc ? rc : LNET_REDISCOVER_PEER;
3714 }
3715
3716 /* Active side of ping. */
3717 static int lnet_peer_send_ping(struct lnet_peer *lp)
3718 __must_hold(&lp->lp_lock)
3719 {
3720         int bytes;
3721         int rc;
3722         int cpt;
3723
3724         lp->lp_state |= LNET_PEER_PING_SENT;
3725         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3726         spin_unlock(&lp->lp_lock);
3727
3728         cpt = lnet_net_lock_current();
3729         /* Refcount for MD. */
3730         lnet_peer_addref_locked(lp);
3731         lnet_net_unlock(cpt);
3732
3733         bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3734
3735         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3736                             the_lnet.ln_dc_handler, false);
3737         /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3738          * refcount on the peer, otherwise LNetMDUnlink will be called
3739          * which will eventually do that.
3740          */
3741         if (rc > 0) {
3742                 lnet_net_lock(cpt);
3743                 lnet_peer_decref_locked(lp);
3744                 lnet_net_unlock(cpt);
3745                 rc = -rc; /* change the rc to negative value */
3746                 goto fail_error;
3747         } else if (rc < 0) {
3748                 goto fail_error;
3749         }
3750
3751         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3752
3753         spin_lock(&lp->lp_lock);
3754         return 0;
3755
3756 fail_error:
3757         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3758         /*
3759          * The errors that get us here are considered hard errors and
3760          * cause Discovery to terminate. So we clear PING_SENT, but do
3761          * not set either PING_FAILED or FORCE_PING. In fact we need
3762          * to clear PING_FAILED, because the unlink event handler will
3763          * have set it if we called LNetMDUnlink() above.
3764          */
3765         spin_lock(&lp->lp_lock);
3766         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3767         return rc;
3768 }
3769
3770 /*
3771  * This function exists because you cannot call LNetMDUnlink() from an
3772  * event handler.
3773  */
3774 static int lnet_peer_push_failed(struct lnet_peer *lp)
3775 __must_hold(&lp->lp_lock)
3776 {
3777         struct lnet_handle_md mdh;
3778         int rc;
3779
3780         mdh = lp->lp_push_mdh;
3781         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3782         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3783         rc = lp->lp_push_error;
3784         lp->lp_push_error = 0;
3785         spin_unlock(&lp->lp_lock);
3786
3787         if (!LNetMDHandleIsInvalid(mdh))
3788                 LNetMDUnlink(mdh);
3789
3790         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3791         spin_lock(&lp->lp_lock);
3792         return rc ? rc : LNET_REDISCOVER_PEER;
3793 }
3794
3795 /*
3796  * Mark the peer as discovered.
3797  */
3798 static int lnet_peer_discovered(struct lnet_peer *lp)
3799 __must_hold(&lp->lp_lock)
3800 {
3801         lp->lp_state |= LNET_PEER_DISCOVERED;
3802         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3803                           LNET_PEER_REDISCOVER);
3804
3805         lp->lp_dc_error = 0;
3806
3807         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3808
3809         return 0;
3810 }
3811
3812 /* Active side of push. */
3813 static int lnet_peer_send_push(struct lnet_peer *lp)
3814 __must_hold(&lp->lp_lock)
3815 {
3816         struct lnet_ping_buffer *pbuf;
3817         struct lnet_processid id;
3818         struct lnet_md md;
3819         int cpt;
3820         int rc;
3821
3822         /* Don't push to a non-multi-rail peer. */
3823         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3824                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3825                 /* if peer's NIDs are uptodate then peer is discovered */
3826                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3827                         rc = lnet_peer_discovered(lp);
3828                         return rc;
3829                 }
3830
3831                 return 0;
3832         }
3833
3834         lp->lp_state |= LNET_PEER_PUSH_SENT;
3835         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3836         spin_unlock(&lp->lp_lock);
3837
3838         cpt = lnet_net_lock_current();
3839         pbuf = the_lnet.ln_ping_target;
3840         lnet_ping_buffer_addref(pbuf);
3841         lnet_net_unlock(cpt);
3842
3843         /* Push source MD */
3844         md.start     = &pbuf->pb_info;
3845         md.length    = pbuf->pb_nbytes;
3846         md.threshold = 2; /* Put/Ack */
3847         md.max_size  = 0;
3848         md.options   = LNET_MD_TRACK_RESPONSE;
3849         md.handler   = the_lnet.ln_dc_handler;
3850         md.user_ptr  = lp;
3851
3852         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3853         if (rc) {
3854                 lnet_ping_buffer_decref(pbuf);
3855                 CERROR("Can't bind push source MD: %d\n", rc);
3856                 goto fail_error;
3857         }
3858
3859         cpt = lnet_net_lock_current();
3860         /* Refcount for MD. */
3861         lnet_peer_addref_locked(lp);
3862         id.pid = LNET_PID_LUSTRE;
3863         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3864                 id.nid = lp->lp_disc_dst_nid;
3865         else
3866                 id.nid = lp->lp_primary_nid;
3867         lnet_net_unlock(cpt);
3868
3869         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3870                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3871                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3872
3873         /*
3874          * reset the discovery nid. There is no need to restrict sending
3875          * from that source, if we call lnet_push_update_to_peers(). It'll
3876          * get set to a specific NID, if we initiate discovery from the
3877          * scratch
3878          */
3879         lp->lp_disc_src_nid = LNET_ANY_NID;
3880         lp->lp_disc_dst_nid = LNET_ANY_NID;
3881
3882         if (rc)
3883                 goto fail_unlink;
3884
3885         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3886
3887         spin_lock(&lp->lp_lock);
3888         return 0;
3889
3890 fail_unlink:
3891         LNetMDUnlink(lp->lp_push_mdh);
3892         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3893 fail_error:
3894         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3895                lp, rc);
3896         /*
3897          * The errors that get us here are considered hard errors and
3898          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3899          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3900          * because the unlink event handler will have set it if we
3901          * called LNetMDUnlink() above.
3902          */
3903         spin_lock(&lp->lp_lock);
3904         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3905         return rc;
3906 }
3907
3908 /*
3909  * Wait for work to be queued or some other change that must be
3910  * attended to. Returns non-zero if the discovery thread should shut
3911  * down.
3912  */
3913 static int lnet_peer_discovery_wait_for_work(void)
3914 {
3915         int cpt;
3916         int rc = 0;
3917
3918         DEFINE_WAIT(wait);
3919
3920         cpt = lnet_net_lock_current();
3921         for (;;) {
3922                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3923                                 TASK_INTERRUPTIBLE);
3924                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3925                         break;
3926                 if (lnet_push_target_resize_needed() ||
3927                     the_lnet.ln_push_target->pb_needs_post)
3928                         break;
3929                 if (!list_empty(&the_lnet.ln_dc_request))
3930                         break;
3931                 if (!list_empty(&the_lnet.ln_msg_resend))
3932                         break;
3933                 lnet_net_unlock(cpt);
3934
3935                 /*
3936                  * wakeup max every second to check if there are peers that
3937                  * have been stuck on the working queue for greater than
3938                  * the peer timeout.
3939                  */
3940                 schedule_timeout(cfs_time_seconds(1));
3941                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3942                 cpt = lnet_net_lock_current();
3943         }
3944         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3945
3946         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3947                 rc = -ESHUTDOWN;
3948
3949         lnet_net_unlock(cpt);
3950
3951         CDEBUG(D_NET, "woken: %d\n", rc);
3952
3953         return rc;
3954 }
3955
3956 /*
3957  * Messages that were pending on a destroyed peer will be put on a global
3958  * resend list. The message resend list will be checked by
3959  * the discovery thread when it wakes up, and will resend messages. These
3960  * messages can still be sendable in the case the lpni which was the initial
3961  * cause of the message re-queue was transfered to another peer.
3962  *
3963  * It is possible that LNet could be shutdown while we're iterating
3964  * through the list. lnet_shudown_lndnets() will attempt to access the
3965  * resend list, but will have to wait until the spinlock is released, by
3966  * which time there shouldn't be any more messages on the resend list.
3967  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3968  * for the messages so they can be released. The other case is that
3969  * lnet_shudown_lndnets() can finalize all the messages before this
3970  * function can visit the resend list, in which case this function will be
3971  * a no-op.
3972  */
3973 static void lnet_resend_msgs(void)
3974 {
3975         struct lnet_msg *msg, *tmp;
3976         LIST_HEAD(resend);
3977         int rc;
3978
3979         spin_lock(&the_lnet.ln_msg_resend_lock);
3980         list_splice(&the_lnet.ln_msg_resend, &resend);
3981         spin_unlock(&the_lnet.ln_msg_resend_lock);
3982
3983         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3984                 list_del_init(&msg->msg_list);
3985                 rc = lnet_send(&msg->msg_src_nid_param, msg,
3986                                &msg->msg_rtr_nid_param);
3987                 if (rc < 0) {
3988                         CNETERR("Error sending %s to %s: %d\n",
3989                                lnet_msgtyp2str(msg->msg_type),
3990                                libcfs_idstr(&msg->msg_target), rc);
3991                         lnet_finalize(msg, rc);
3992                 }
3993         }
3994 }
3995
3996 /* The discovery thread. */
3997 static int lnet_peer_discovery(void *arg)
3998 {
3999         struct lnet_peer *lp;
4000         int rc;
4001
4002         wait_for_completion(&the_lnet.ln_started);
4003
4004         CDEBUG(D_NET, "started\n");
4005
4006         for (;;) {
4007                 if (lnet_peer_discovery_wait_for_work())
4008                         break;
4009
4010                 if (lnet_push_target_resize_needed())
4011                         lnet_push_target_resize();
4012                 else if (the_lnet.ln_push_target->pb_needs_post)
4013                         lnet_push_target_post(the_lnet.ln_push_target,
4014                                               &the_lnet.ln_push_target_md);
4015
4016                 lnet_resend_msgs();
4017
4018                 lnet_net_lock(LNET_LOCK_EX);
4019                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4020                         lnet_net_unlock(LNET_LOCK_EX);
4021                         break;
4022                 }
4023
4024                 /*
4025                  * Process all incoming discovery work requests.  When
4026                  * discovery must wait on a peer to change state, it
4027                  * is added to the tail of the ln_dc_working queue. A
4028                  * timestamp keeps track of when the peer was added,
4029                  * so we can time out discovery requests that take too
4030                  * long.
4031                  */
4032                 while (!list_empty(&the_lnet.ln_dc_request)) {
4033                         lp = list_first_entry(&the_lnet.ln_dc_request,
4034                                               struct lnet_peer, lp_dc_list);
4035                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4036                         /*
4037                          * set the time the peer was put on the dc_working
4038                          * queue. It shouldn't remain on the queue
4039                          * forever, in case the GET message (for ping)
4040                          * doesn't get a REPLY or the PUT message (for
4041                          * push) doesn't get an ACK.
4042                          */
4043                         lp->lp_last_queued = ktime_get_real_seconds();
4044                         lnet_net_unlock(LNET_LOCK_EX);
4045
4046                         if (lnet_push_target_resize_needed())
4047                                 lnet_push_target_resize();
4048                         else if (the_lnet.ln_push_target->pb_needs_post)
4049                                 lnet_push_target_post(the_lnet.ln_push_target,
4050                                                       &the_lnet.ln_push_target_md);
4051
4052                         /*
4053                          * Select an action depending on the state of
4054                          * the peer and whether discovery is disabled.
4055                          * The check whether discovery is disabled is
4056                          * done after the code that handles processing
4057                          * for arrived data, cleanup for failures, and
4058                          * forcing a Ping or Push.
4059                          */
4060                         spin_lock(&lp->lp_lock);
4061                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4062                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4063                                 lp->lp_state);
4064                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4065                                             LNET_PEER_MARK_DELETED))
4066                                 rc = lnet_peer_deletion(lp);
4067                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4068                                 rc = lnet_peer_data_present(lp);
4069                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
4070                                 rc = lnet_peer_ping_failed(lp);
4071                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4072                                 rc = lnet_peer_push_failed(lp);
4073                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
4074                                 rc = lnet_peer_send_ping(lp);
4075                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4076                                 rc = lnet_peer_send_push(lp);
4077                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4078                                 rc = lnet_peer_send_ping(lp);
4079                         else if (lnet_peer_needs_push(lp))
4080                                 rc = lnet_peer_send_push(lp);
4081                         else
4082                                 rc = lnet_peer_discovered(lp);
4083                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4084                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4085                                 lp->lp_state, rc);
4086
4087                         if (rc == LNET_REDISCOVER_PEER) {
4088                                 spin_unlock(&lp->lp_lock);
4089                                 lnet_net_lock(LNET_LOCK_EX);
4090                                 list_move(&lp->lp_dc_list,
4091                                           &the_lnet.ln_dc_request);
4092                         } else if (rc ||
4093                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4094                                 spin_unlock(&lp->lp_lock);
4095                                 lnet_net_lock(LNET_LOCK_EX);
4096                                 lnet_peer_discovery_complete(lp, rc);
4097                         } else {
4098                                 spin_unlock(&lp->lp_lock);
4099                                 lnet_net_lock(LNET_LOCK_EX);
4100                         }
4101
4102                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4103                                 break;
4104
4105                 }
4106
4107                 lnet_net_unlock(LNET_LOCK_EX);
4108         }
4109
4110         CDEBUG(D_NET, "stopping\n");
4111         /*
4112          * Clean up before telling lnet_peer_discovery_stop() that
4113          * we're done. Use wake_up() below to somewhat reduce the
4114          * size of the thundering herd if there are multiple threads
4115          * waiting on discovery of a single peer.
4116          */
4117
4118         /* Queue cleanup 1: stop all pending pings and pushes. */
4119         lnet_net_lock(LNET_LOCK_EX);
4120         while (!list_empty(&the_lnet.ln_dc_working)) {
4121                 lp = list_first_entry(&the_lnet.ln_dc_working,
4122                                       struct lnet_peer, lp_dc_list);
4123                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4124                 lnet_net_unlock(LNET_LOCK_EX);
4125                 lnet_peer_cancel_discovery(lp);
4126                 lnet_net_lock(LNET_LOCK_EX);
4127         }
4128         lnet_net_unlock(LNET_LOCK_EX);
4129
4130         /* Queue cleanup 2: wait for the expired queue to clear. */
4131         while (!list_empty(&the_lnet.ln_dc_expired))
4132                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4133
4134         /* Queue cleanup 3: clear the request queue. */
4135         lnet_net_lock(LNET_LOCK_EX);
4136         while (!list_empty(&the_lnet.ln_dc_request)) {
4137                 lp = list_first_entry(&the_lnet.ln_dc_request,
4138                                       struct lnet_peer, lp_dc_list);
4139                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4140         }
4141         lnet_net_unlock(LNET_LOCK_EX);
4142
4143         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
4144         the_lnet.ln_dc_handler = NULL;
4145
4146         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4147         wake_up(&the_lnet.ln_dc_waitq);
4148
4149         CDEBUG(D_NET, "stopped\n");
4150
4151         return 0;
4152 }
4153
4154 /* ln_api_mutex is held on entry. */
4155 int lnet_peer_discovery_start(void)
4156 {
4157         struct task_struct *task;
4158         int rc = 0;
4159
4160         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4161                 return -EALREADY;
4162
4163         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4164         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4165         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4166         if (IS_ERR(task)) {
4167                 rc = PTR_ERR(task);
4168                 CERROR("Can't start peer discovery thread: %d\n", rc);
4169
4170                 the_lnet.ln_dc_handler = NULL;
4171
4172                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4173         }
4174
4175         CDEBUG(D_NET, "discovery start: %d\n", rc);
4176
4177         return rc;
4178 }
4179
4180 /* ln_api_mutex is held on entry. */
4181 void lnet_peer_discovery_stop(void)
4182 {
4183         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4184                 return;
4185
4186         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4187         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4188
4189         /* In the LNetNIInit() path we may be stopping discovery before it
4190          * entered its work loop
4191          */
4192         if (!completion_done(&the_lnet.ln_started))
4193                 complete(&the_lnet.ln_started);
4194         else
4195                 wake_up(&the_lnet.ln_dc_waitq);
4196
4197         mutex_unlock(&the_lnet.ln_api_mutex);
4198         wait_event(the_lnet.ln_dc_waitq,
4199                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4200         mutex_lock(&the_lnet.ln_api_mutex);
4201
4202         LASSERT(list_empty(&the_lnet.ln_dc_request));
4203         LASSERT(list_empty(&the_lnet.ln_dc_working));
4204         LASSERT(list_empty(&the_lnet.ln_dc_expired));
4205
4206         CDEBUG(D_NET, "discovery stopped\n");
4207 }
4208
4209 /* Debugging */
4210
4211 void
4212 lnet_debug_peer(struct lnet_nid *nid)
4213 {
4214         char                    *aliveness = "NA";
4215         struct lnet_peer_ni     *lp;
4216         int                     cpt;
4217
4218         cpt = lnet_nid2cpt(nid, NULL);
4219         lnet_net_lock(cpt);
4220
4221         lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4222         if (IS_ERR(lp)) {
4223                 lnet_net_unlock(cpt);
4224                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4225                 return;
4226         }
4227
4228         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4229                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4230
4231         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4232                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4233                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4234                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4235                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4236
4237         lnet_peer_ni_decref_locked(lp);
4238
4239         lnet_net_unlock(cpt);
4240 }
4241
4242 /* Gathering information for userspace. */
4243
4244 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4245                           char aliveness[LNET_MAX_STR_LEN],
4246                           __u32 *cpt_iter, __u32 *refcount,
4247                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4248                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4249                           __u32 *peer_tx_qnob)
4250 {
4251         struct lnet_peer_table          *peer_table;
4252         struct lnet_peer_ni             *lp;
4253         int                             j;
4254         int                             lncpt;
4255         bool                            found = false;
4256
4257         /* get the number of CPTs */
4258         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4259
4260         /* if the cpt number to be examined is >= the number of cpts in
4261          * the system then indicate that there are no more cpts to examin
4262          */
4263         if (*cpt_iter >= lncpt)
4264                 return -ENOENT;
4265
4266         /* get the current table */
4267         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4268         /* if the ptable is NULL then there are no more cpts to examine */
4269         if (peer_table == NULL)
4270                 return -ENOENT;
4271
4272         lnet_net_lock(*cpt_iter);
4273
4274         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4275                 struct list_head *peers = &peer_table->pt_hash[j];
4276
4277                 list_for_each_entry(lp, peers, lpni_hashlist) {
4278                         if (!nid_is_nid4(&lp->lpni_nid))
4279                                 continue;
4280                         if (peer_index-- > 0)
4281                                 continue;
4282
4283                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4284                         if (lnet_isrouter(lp) ||
4285                                 lnet_peer_aliveness_enabled(lp))
4286                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4287                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4288
4289                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4290                         *refcount = kref_read(&lp->lpni_kref);
4291                         *ni_peer_tx_credits =
4292                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4293                         *peer_tx_credits = lp->lpni_txcredits;
4294                         *peer_rtr_credits = lp->lpni_rtrcredits;
4295                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4296                         *peer_tx_qnob = lp->lpni_txqnob;
4297
4298                         found = true;
4299                 }
4300
4301         }
4302         lnet_net_unlock(*cpt_iter);
4303
4304         *cpt_iter = lncpt;
4305
4306         return found ? 0 : -ENOENT;
4307 }
4308
4309 /* ln_api_mutex is held, which keeps the peer list stable */
4310 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4311 {
4312         struct lnet_ioctl_element_stats *lpni_stats;
4313         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4314         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4315         struct lnet_peer_ni_credit_info *lpni_info;
4316         struct lnet_peer_ni *lpni;
4317         struct lnet_peer *lp;
4318         lnet_nid_t nid4;
4319         struct lnet_nid nid;
4320         __u32 size;
4321         int rc;
4322
4323         lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4324         lp = lnet_find_peer(&nid);
4325
4326         if (!lp) {
4327                 rc = -ENOENT;
4328                 goto out;
4329         }
4330
4331         size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4332                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4333         size *= lp->lp_nnis;
4334         if (size > cfg->prcfg_size) {
4335                 cfg->prcfg_size = size;
4336                 rc = -E2BIG;
4337                 goto out_lp_decref;
4338         }
4339
4340         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4341         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4342         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4343         cfg->prcfg_count = lp->lp_nnis;
4344         cfg->prcfg_size = size;
4345         cfg->prcfg_state = lp->lp_state;
4346
4347         /* Allocate helper buffers. */
4348         rc = -ENOMEM;
4349         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4350         if (!lpni_info)
4351                 goto out_lp_decref;
4352         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4353         if (!lpni_stats)
4354                 goto out_free_info;
4355         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4356         if (!lpni_msg_stats)
4357                 goto out_free_stats;
4358         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4359         if (!lpni_hstats)
4360                 goto out_free_msg_stats;
4361
4362
4363         lpni = NULL;
4364         rc = -EFAULT;
4365         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4366                 if (!nid_is_nid4(&lpni->lpni_nid))
4367                         continue;
4368                 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4369                 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4370                         goto out_free_hstats;
4371                 bulk += sizeof(nid4);
4372
4373                 memset(lpni_info, 0, sizeof(*lpni_info));
4374                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4375                 if (lnet_isrouter(lpni) ||
4376                         lnet_peer_aliveness_enabled(lpni))
4377                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4378                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4379
4380                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4381                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4382                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4383                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4384                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4385                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4386                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4387                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4388                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4389                         goto out_free_hstats;
4390                 bulk += sizeof(*lpni_info);
4391
4392                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4393                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4394                                                             LNET_STATS_TYPE_SEND);
4395                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4396                                                             LNET_STATS_TYPE_RECV);
4397                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4398                                                             LNET_STATS_TYPE_DROP);
4399                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4400                         goto out_free_hstats;
4401                 bulk += sizeof(*lpni_stats);
4402                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4403                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4404                         goto out_free_hstats;
4405                 bulk += sizeof(*lpni_msg_stats);
4406                 lpni_hstats->hlpni_network_timeout =
4407                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4408                 lpni_hstats->hlpni_remote_dropped =
4409                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4410                 lpni_hstats->hlpni_remote_timeout =
4411                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4412                 lpni_hstats->hlpni_remote_error =
4413                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4414                 lpni_hstats->hlpni_health_value =
4415                   atomic_read(&lpni->lpni_healthv);
4416                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4417                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4418                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4419                         goto out_free_hstats;
4420                 bulk += sizeof(*lpni_hstats);
4421         }
4422         rc = 0;
4423
4424 out_free_hstats:
4425         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4426 out_free_msg_stats:
4427         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4428 out_free_stats:
4429         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4430 out_free_info:
4431         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4432 out_lp_decref:
4433         lnet_peer_decref_locked(lp);
4434 out:
4435         return rc;
4436 }
4437
4438 /* must hold net_lock/0 */
4439 void
4440 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4441                                      struct list_head *recovery_queue,
4442                                      time64_t now)
4443 {
4444         /* the mt could've shutdown and cleaned up the queues */
4445         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4446                 return;
4447
4448         if (!list_empty(&lpni->lpni_recovery))
4449                 return;
4450
4451         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4452                 return;
4453
4454         if (!lpni->lpni_last_alive) {
4455                 CDEBUG(D_NET,
4456                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4457                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4458                        lpni->lpni_last_alive);
4459                 return;
4460         }
4461
4462         if (lnet_recovery_limit &&
4463             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4464                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4465                        libcfs_nidstr(&lpni->lpni_nid),
4466                        lpni->lpni_last_alive);
4467                 /* Reset the ping count so that if this peer NI is added back to
4468                  * the recovery queue we will send the first ping right away.
4469                  */
4470                 lpni->lpni_ping_count = 0;
4471                 return;
4472         }
4473
4474         /* This peer NI is going on the recovery queue, so take a ref on it */
4475         lnet_peer_ni_addref_locked(lpni);
4476
4477         lnet_peer_ni_set_next_ping(lpni, now);
4478
4479         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4480                libcfs_nidstr(&lpni->lpni_nid),
4481                lpni->lpni_ping_count,
4482                lpni->lpni_next_ping,
4483                lpni->lpni_last_alive,
4484                atomic_read(&lpni->lpni_healthv));
4485
4486         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4487 }
4488
4489 /* Call with the ln_api_mutex held */
4490 void
4491 lnet_peer_ni_set_healthv(lnet_nid_t nid4, int value, bool all)
4492 {
4493         struct lnet_peer_table *ptable;
4494         struct lnet_peer *lp;
4495         struct lnet_peer_net *lpn;
4496         struct lnet_peer_ni *lpni;
4497         struct lnet_nid nid;
4498         int lncpt;
4499         int cpt;
4500         time64_t now;
4501
4502         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4503                 return;
4504
4505         lnet_nid4_to_nid(nid4, &nid);
4506         now = ktime_get_seconds();
4507
4508         if (!all) {
4509                 lnet_net_lock(LNET_LOCK_EX);
4510                 lpni = lnet_peer_ni_find_locked(&nid);
4511                 if (!lpni) {
4512                         lnet_net_unlock(LNET_LOCK_EX);
4513                         return;
4514                 }
4515                 lnet_set_lpni_healthv_locked(lpni, value);
4516                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4517                                              &the_lnet.ln_mt_peerNIRecovq, now);
4518                 lnet_peer_ni_decref_locked(lpni);
4519                 lnet_net_unlock(LNET_LOCK_EX);
4520                 return;
4521         }
4522
4523         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4524
4525         /*
4526          * Walk all the peers and reset the health value for each one to the
4527          * specified value.
4528          */
4529         lnet_net_lock(LNET_LOCK_EX);
4530         for (cpt = 0; cpt < lncpt; cpt++) {
4531                 ptable = the_lnet.ln_peer_tables[cpt];
4532                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4533                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4534                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4535                                                     lpni_peer_nis) {
4536                                         lnet_set_lpni_healthv_locked(lpni,
4537                                                                      value);
4538                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4539                                              &the_lnet.ln_mt_peerNIRecovq, now);
4540                                 }
4541                         }
4542                 }
4543         }
4544         lnet_net_unlock(LNET_LOCK_EX);
4545 }
4546