Whamcloud - gitweb
LU-17379 lnet: parallelize peer discovery via LNetAddPeer
[fs/lustre-release.git] / lnet / lnet / peer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4  * Use is subject to license terms.
5  *
6  * Copyright (c) 2012, 2017, Intel Corporation.
7  */
8
9 /* This file is part of Lustre, http://www.lustre.org/ */
10
11 #define DEBUG_SUBSYSTEM S_LNET
12
13 #include <linux/sched.h>
14 #ifdef HAVE_SCHED_HEADERS
15 #include <linux/sched/signal.h>
16 #endif
17 #include <linux/uaccess.h>
18
19 #include <lnet/udsp.h>
20 #include <lnet/lib-lnet.h>
21 #include <uapi/linux/lnet/lnet-dlc.h>
22
23 /* Value indicating that recovery needs to re-check a peer immediately. */
24 #define LNET_REDISCOVER_PEER    (1)
25
26 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
27 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
28                             unsigned int flags);
29
30 static void
31 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
32 {
33         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
34                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
35                 lnet_peer_ni_decref_locked(lpni);
36         }
37 }
38
39 void
40 lnet_peer_net_added(struct lnet_net *net)
41 {
42         struct lnet_peer_ni *lpni, *tmp;
43
44         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
45                                  lpni_on_remote_peer_ni_list) {
46
47                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
48                         lpni->lpni_net = net;
49
50                         spin_lock(&lpni->lpni_lock);
51                         lpni->lpni_txcredits =
52                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
53                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
54                         lpni->lpni_rtrcredits =
55                                 lnet_peer_buffer_credits(lpni->lpni_net);
56                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
57                         spin_unlock(&lpni->lpni_lock);
58
59                         lnet_peer_remove_from_remote_list(lpni);
60                 }
61         }
62 }
63
64 static void
65 lnet_peer_tables_destroy(void)
66 {
67         struct lnet_peer_table  *ptable;
68         struct list_head        *hash;
69         int                     i;
70         int                     j;
71
72         if (!the_lnet.ln_peer_tables)
73                 return;
74
75         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
76                 hash = ptable->pt_hash;
77                 if (!hash) /* not intialized */
78                         break;
79
80                 LASSERT(list_empty(&ptable->pt_zombie_list));
81
82                 ptable->pt_hash = NULL;
83                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
84                         LASSERT(list_empty(&hash[j]));
85
86                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
87         }
88
89         cfs_percpt_free(the_lnet.ln_peer_tables);
90         the_lnet.ln_peer_tables = NULL;
91 }
92
93 int
94 lnet_peer_tables_create(void)
95 {
96         struct lnet_peer_table  *ptable;
97         struct list_head        *hash;
98         int                     i;
99         int                     j;
100
101         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
102                                                    sizeof(*ptable));
103         if (the_lnet.ln_peer_tables == NULL) {
104                 CERROR("Failed to allocate cpu-partition peer tables\n");
105                 return -ENOMEM;
106         }
107
108         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
109                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
110                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
111                 if (hash == NULL) {
112                         CERROR("Failed to create peer hash table\n");
113                         lnet_peer_tables_destroy();
114                         return -ENOMEM;
115                 }
116
117                 spin_lock_init(&ptable->pt_zombie_lock);
118                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
119
120                 INIT_LIST_HEAD(&ptable->pt_peer_list);
121
122                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
123                         INIT_LIST_HEAD(&hash[j]);
124                 ptable->pt_hash = hash; /* sign of initialization */
125         }
126
127         return 0;
128 }
129
130 static struct lnet_peer_ni *
131 lnet_peer_ni_alloc(struct lnet_nid *nid)
132 {
133         struct lnet_peer_ni *lpni;
134         struct lnet_net *net;
135         int cpt;
136
137         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
138
139         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
140         if (!lpni)
141                 return NULL;
142
143         INIT_LIST_HEAD(&lpni->lpni_txq);
144         INIT_LIST_HEAD(&lpni->lpni_hashlist);
145         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
146         INIT_LIST_HEAD(&lpni->lpni_recovery);
147         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
148         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
149         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
150         kref_init(&lpni->lpni_kref);
151         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
152
153         spin_lock_init(&lpni->lpni_lock);
154
155         if (lnet_peers_start_down())
156                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
157         else
158                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
159         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
160         lpni->lpni_nid = *nid;
161         lpni->lpni_cpt = cpt;
162         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
163
164         net = lnet_get_net_locked(LNET_NID_NET(nid));
165         lpni->lpni_net = net;
166         if (net) {
167                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
168                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
169                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
170                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
171         } else {
172                 /*
173                  * This peer_ni is not on a local network, so we
174                  * cannot add the credits here. In case the net is
175                  * added later, add the peer_ni to the remote peer ni
176                  * list so it can be easily found and revisited.
177                  */
178                 /* FIXME: per-net implementation instead? */
179                 lnet_peer_ni_addref_locked(lpni);
180                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
181                               &the_lnet.ln_remote_peer_ni_list);
182         }
183
184         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
185
186         return lpni;
187 }
188
189 static struct lnet_peer_net *
190 lnet_peer_net_alloc(__u32 net_id)
191 {
192         struct lnet_peer_net *lpn;
193
194         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
195         if (!lpn)
196                 return NULL;
197
198         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
199         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
200         lpn->lpn_net_id = net_id;
201         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
202
203         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
204
205         return lpn;
206 }
207
208 void
209 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
210 {
211         struct lnet_peer *lp;
212
213         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
214
215         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
216         LASSERT(list_empty(&lpn->lpn_peer_nis));
217         LASSERT(list_empty(&lpn->lpn_peer_nets));
218         lp = lpn->lpn_peer;
219         lpn->lpn_peer = NULL;
220         LIBCFS_FREE(lpn, sizeof(*lpn));
221
222         lnet_peer_decref_locked(lp);
223 }
224
225 static struct lnet_peer *
226 lnet_peer_alloc(struct lnet_nid *nid)
227 {
228         struct lnet_peer *lp;
229
230         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
231         if (!lp)
232                 return NULL;
233
234         INIT_LIST_HEAD(&lp->lp_rtrq);
235         INIT_LIST_HEAD(&lp->lp_routes);
236         INIT_LIST_HEAD(&lp->lp_peer_list);
237         INIT_LIST_HEAD(&lp->lp_peer_nets);
238         INIT_LIST_HEAD(&lp->lp_dc_list);
239         INIT_LIST_HEAD(&lp->lp_dc_pendq);
240         INIT_LIST_HEAD(&lp->lp_rtr_list);
241         init_waitqueue_head(&lp->lp_dc_waitq);
242         spin_lock_init(&lp->lp_lock);
243         lp->lp_primary_nid = *nid;
244         lp->lp_disc_src_nid = LNET_ANY_NID;
245         lp->lp_disc_dst_nid = LNET_ANY_NID;
246         lp->lp_merge_primary_nid = LNET_ANY_NID;
247         if (lnet_peers_start_down())
248                 lp->lp_alive = false;
249         else
250                 lp->lp_alive = true;
251
252         /*
253          * all peers created on a router should have health on
254          * if it's not already on.
255          */
256         if (the_lnet.ln_routing && !lnet_health_sensitivity)
257                 lp->lp_health_sensitivity = 1;
258
259         /*
260          * Turn off discovery for loopback peer. If you're creating a peer
261          * for the loopback interface then that was initiated when we
262          * attempted to send a message over the loopback. There is no need
263          * to ever use a different interface when sending messages to
264          * myself.
265          */
266         if (nid_is_lo0(nid))
267                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
268         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
269
270         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
271
272         return lp;
273 }
274
275 void
276 lnet_destroy_peer_locked(struct lnet_peer *lp)
277 {
278         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
279
280         LASSERT(atomic_read(&lp->lp_refcount) == 0);
281         LASSERT(lp->lp_rtr_refcount == 0);
282         LASSERT(list_empty(&lp->lp_peer_nets));
283         LASSERT(list_empty(&lp->lp_peer_list));
284         LASSERT(list_empty(&lp->lp_dc_list));
285
286         if (lp->lp_data)
287                 lnet_ping_buffer_decref(lp->lp_data);
288
289         /*
290          * if there are messages still on the pending queue, then make
291          * sure to queue them on the ln_msg_resend list so they can be
292          * resent at a later point if the discovery thread is still
293          * running.
294          * If the discovery thread has stopped, then the wakeup will be a
295          * no-op, and it is expected the lnet_shutdown_lndnets() will
296          * eventually be called, which will traverse this list and
297          * finalize the messages on the list.
298          * We can not resend them now because we're holding the cpt lock.
299          * Releasing the lock can cause an inconsistent state
300          */
301         spin_lock(&the_lnet.ln_msg_resend_lock);
302         spin_lock(&lp->lp_lock);
303         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
304         spin_unlock(&lp->lp_lock);
305         spin_unlock(&the_lnet.ln_msg_resend_lock);
306         wake_up(&the_lnet.ln_dc_waitq);
307
308         LIBCFS_FREE(lp, sizeof(*lp));
309 }
310
311 /*
312  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
313  * that peer_net, detach the peer_net from the peer.
314  *
315  * Call with lnet_net_lock/EX held
316  */
317 static void
318 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
319 {
320         struct lnet_peer_table *ptable;
321         struct lnet_peer_net *lpn;
322         struct lnet_peer *lp;
323
324         /*
325          * Belts and suspenders: gracefully handle teardown of a
326          * partially connected peer_ni.
327          */
328         lpn = lpni->lpni_peer_net;
329
330         list_del_init(&lpni->lpni_peer_nis);
331         /*
332          * If there are no lpni's left, we detach lpn from
333          * lp_peer_nets, so it cannot be found anymore.
334          */
335         if (list_empty(&lpn->lpn_peer_nis))
336                 list_del_init(&lpn->lpn_peer_nets);
337
338         /* Update peer NID count. */
339         lp = lpn->lpn_peer;
340         lp->lp_nnis--;
341
342         /*
343          * If there are no more peer nets, make the peer unfindable
344          * via the peer_tables.
345          *
346          * Otherwise, if the peer is DISCOVERED, tell discovery to
347          * take another look at it. This is a no-op if discovery for
348          * this peer did the detaching.
349          */
350         if (list_empty(&lp->lp_peer_nets)) {
351                 list_del_init(&lp->lp_peer_list);
352                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
353                 ptable->pt_peers--;
354         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
355                 /* Discovery isn't running, nothing to do here. */
356         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
357                 lnet_peer_queue_for_discovery(lp);
358                 wake_up(&the_lnet.ln_dc_waitq);
359         }
360         CDEBUG(D_NET, "peer %s NID %s\n",
361                 libcfs_nidstr(&lp->lp_primary_nid),
362                 libcfs_nidstr(&lpni->lpni_nid));
363 }
364
365 /* called with lnet_net_lock LNET_LOCK_EX held */
366 static int
367 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
368 {
369         struct lnet_peer_table *ptable = NULL;
370
371         /* don't remove a peer_ni if it's also a gateway */
372         if (lnet_isrouter(lpni) && !force) {
373                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
374                        libcfs_nidstr(&lpni->lpni_nid));
375                 return -EBUSY;
376         }
377
378         lnet_peer_remove_from_remote_list(lpni);
379
380         /* remove peer ni from the hash list. */
381         list_del_init(&lpni->lpni_hashlist);
382
383         /*
384          * indicate the peer is being deleted so the monitor thread can
385          * remove it from the recovery queue.
386          */
387         spin_lock(&lpni->lpni_lock);
388         lpni->lpni_state |= LNET_PEER_NI_DELETING;
389         spin_unlock(&lpni->lpni_lock);
390
391         /* decrement the ref count on the peer table */
392         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
393
394         /*
395          * The peer_ni can no longer be found with a lookup. But there
396          * can be current users, so keep track of it on the zombie
397          * list until the reference count has gone to zero.
398          *
399          * The last reference may be lost in a place where the
400          * lnet_net_lock locks only a single cpt, and that cpt may not
401          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
402          * has its own lock.
403          */
404         spin_lock(&ptable->pt_zombie_lock);
405         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
406         ptable->pt_zombies++;
407         spin_unlock(&ptable->pt_zombie_lock);
408
409         /* no need to keep this peer_ni on the hierarchy anymore */
410         lnet_peer_detach_peer_ni_locked(lpni);
411
412         /* remove hashlist reference on peer_ni */
413         lnet_peer_ni_decref_locked(lpni);
414
415         return 0;
416 }
417
418 void lnet_peer_uninit(void)
419 {
420         struct lnet_peer_ni *lpni, *tmp;
421
422         lnet_net_lock(LNET_LOCK_EX);
423
424         /* remove all peer_nis from the remote peer and the hash list */
425         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
426                                  lpni_on_remote_peer_ni_list)
427                 lnet_peer_ni_del_locked(lpni, false);
428
429         lnet_peer_tables_destroy();
430
431         lnet_net_unlock(LNET_LOCK_EX);
432 }
433
434 static int
435 lnet_peer_del_locked(struct lnet_peer *peer)
436 {
437         struct lnet_peer_ni *lpni = NULL, *lpni2;
438         int rc = 0, rc2 = 0;
439
440         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
441
442         spin_lock(&peer->lp_lock);
443         peer->lp_state |= LNET_PEER_MARK_DELETED;
444         spin_unlock(&peer->lp_lock);
445
446         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
447         while (lpni != NULL) {
448                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
449                 rc = lnet_peer_ni_del_locked(lpni, false);
450                 if (rc != 0)
451                         rc2 = rc;
452                 lpni = lpni2;
453         }
454
455         return rc2;
456 }
457
458 /*
459  * Discovering this peer is taking too long. Cancel any Ping or Push
460  * that discovery is waiting on by unlinking the relevant MDs. The
461  * lnet_discovery_event_handler() will proceed from here and complete
462  * the cleanup.
463  */
464 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
465 {
466         struct lnet_handle_md ping_mdh;
467         struct lnet_handle_md push_mdh;
468
469         LNetInvalidateMDHandle(&ping_mdh);
470         LNetInvalidateMDHandle(&push_mdh);
471
472         spin_lock(&lp->lp_lock);
473         if (lp->lp_state & LNET_PEER_PING_SENT) {
474                 ping_mdh = lp->lp_ping_mdh;
475                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
476         }
477         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
478                 push_mdh = lp->lp_push_mdh;
479                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
480         }
481         spin_unlock(&lp->lp_lock);
482
483         if (!LNetMDHandleIsInvalid(ping_mdh))
484                 LNetMDUnlink(ping_mdh);
485         if (!LNetMDHandleIsInvalid(push_mdh))
486                 LNetMDUnlink(push_mdh);
487 }
488
489 static int
490 lnet_peer_del(struct lnet_peer *peer)
491 {
492         int rc;
493
494         lnet_peer_cancel_discovery(peer);
495         lnet_net_lock(LNET_LOCK_EX);
496         rc = lnet_peer_del_locked(peer);
497         lnet_net_unlock(LNET_LOCK_EX);
498
499         return rc;
500 }
501
502 /*
503  * Delete a NID from a peer. Call with ln_api_mutex held.
504  *
505  * Error codes:
506  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
507  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
508  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
509  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
510  */
511 static int
512 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
513                   unsigned int flags)
514 {
515         struct lnet_peer_ni *lpni;
516         struct lnet_nid primary_nid = lp->lp_primary_nid;
517         int rc = 0;
518         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
519
520         if (!(flags & LNET_PEER_CONFIGURED)) {
521                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
522                         rc = -EPERM;
523                         goto out;
524                 }
525         }
526
527         /* If we're asked to lock down the primary NID we shouldn't be
528          * deleting it
529          */
530         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
531             nid_same(&primary_nid, nid)) {
532                 rc = -EPERM;
533                 goto out;
534         }
535
536         lpni = lnet_peer_ni_find_locked(nid);
537         if (!lpni) {
538                 rc = -ENOENT;
539                 goto out;
540         }
541         if (lp != lpni->lpni_peer_net->lpn_peer) {
542                 rc = -ECHILD;
543                 lnet_peer_ni_decref_locked(lpni);
544                 goto out;
545         }
546
547         /*
548          * This function only allows deletion of the primary NID if it
549          * is the only NID.
550          */
551         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
552                 rc = -EBUSY;
553                 lnet_peer_ni_decref_locked(lpni);
554                 goto out;
555         }
556
557         lnet_net_lock(LNET_LOCK_EX);
558
559         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
560                 struct lnet_peer_ni *lpni2;
561                 /* assign the next peer_ni to be the primary */
562                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
563                 LASSERT(lpni2);
564                 lp->lp_primary_nid = lpni2->lpni_nid;
565         }
566         rc = lnet_peer_ni_del_locked(lpni, force);
567         lnet_peer_ni_decref_locked(lpni);
568
569         lnet_net_unlock(LNET_LOCK_EX);
570
571 out:
572         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
573                libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
574                flags, rc);
575
576         return rc;
577 }
578
579 static void
580 lnet_peer_table_cleanup_locked(struct lnet_net *net,
581                                struct lnet_peer_table *ptable)
582 {
583         int                      i;
584         struct lnet_peer_ni     *next;
585         struct lnet_peer_ni     *lpni;
586         struct lnet_peer        *peer;
587
588         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
589                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
590                                          lpni_hashlist) {
591                         if (net != NULL && net != lpni->lpni_net)
592                                 continue;
593
594                         peer = lpni->lpni_peer_net->lpn_peer;
595                         if (!nid_same(&peer->lp_primary_nid,
596                                        &lpni->lpni_nid)) {
597                                 lnet_peer_ni_del_locked(lpni, false);
598                                 continue;
599                         }
600                         /*
601                          * Removing the primary NID implies removing
602                          * the entire peer. Advance next beyond any
603                          * peer_ni that belongs to the same peer.
604                          */
605                         list_for_each_entry_from(next, &ptable->pt_hash[i],
606                                                  lpni_hashlist) {
607                                 if (next->lpni_peer_net->lpn_peer != peer)
608                                         break;
609                         }
610                         lnet_peer_del_locked(peer);
611                 }
612         }
613 }
614
615 static void
616 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
617 {
618         wait_var_event_warning(&ptable->pt_zombies,
619                                ptable->pt_zombies == 0,
620                                "Waiting for %d zombies on peer table\n",
621                                ptable->pt_zombies);
622 }
623
624 static void
625 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
626                                 struct lnet_peer_table *ptable)
627 {
628         struct lnet_peer_ni     *lp;
629         struct lnet_peer_ni     *tmp;
630         struct lnet_nid         gw_nid;
631         int                     i;
632
633         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
634                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
635                                          lpni_hashlist) {
636                         if (net != lp->lpni_net)
637                                 continue;
638
639                         if (!lnet_isrouter(lp))
640                                 continue;
641
642                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
643
644                         lnet_net_unlock(LNET_LOCK_EX);
645                         lnet_del_route(LNET_NET_ANY, &gw_nid);
646                         lnet_net_lock(LNET_LOCK_EX);
647                 }
648         }
649 }
650
651 void
652 lnet_peer_tables_cleanup(struct lnet_net *net)
653 {
654         int i;
655         struct lnet_peer_table *ptable;
656
657         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
658         /* If just deleting the peers for a NI, get rid of any routes these
659          * peers are gateways for. */
660         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
661                 lnet_net_lock(LNET_LOCK_EX);
662                 lnet_peer_table_del_rtrs_locked(net, ptable);
663                 lnet_net_unlock(LNET_LOCK_EX);
664         }
665
666         /* Start the cleanup process */
667         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
668                 lnet_net_lock(LNET_LOCK_EX);
669                 lnet_peer_table_cleanup_locked(net, ptable);
670                 lnet_net_unlock(LNET_LOCK_EX);
671         }
672
673         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
674                 lnet_peer_ni_finalize_wait(ptable);
675 }
676
677 static struct lnet_peer_ni *
678 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
679 {
680         struct list_head        *peers;
681         struct lnet_peer_ni     *lp;
682
683         if (the_lnet.ln_state != LNET_STATE_RUNNING)
684                 return NULL;
685
686         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
687         list_for_each_entry(lp, peers, lpni_hashlist) {
688                 if (nid_same(&lp->lpni_nid, nid)) {
689                         lnet_peer_ni_addref_locked(lp);
690                         return lp;
691                 }
692         }
693
694         return NULL;
695 }
696
697 struct lnet_peer_ni *
698 lnet_peer_ni_find_locked(struct lnet_nid *nid)
699 {
700         struct lnet_peer_ni *lpni;
701         struct lnet_peer_table *ptable;
702         int cpt;
703
704         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
705
706         ptable = the_lnet.ln_peer_tables[cpt];
707         lpni = lnet_get_peer_ni_locked(ptable, nid);
708
709         return lpni;
710 }
711
712 struct lnet_peer_ni *
713 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
714 {
715         struct lnet_peer_net *lpn;
716         struct lnet_peer_ni *lpni;
717
718         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
719         if (!lpn)
720                 return NULL;
721
722         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
723                 if (nid_same(&lpni->lpni_nid, nid))
724                         return lpni;
725         }
726
727         return NULL;
728 }
729
730 struct lnet_peer *
731 lnet_find_peer(struct lnet_nid *nid)
732 {
733         struct lnet_peer_ni *lpni;
734         struct lnet_peer *lp = NULL;
735         int cpt;
736
737         cpt = lnet_net_lock_current();
738         lpni = lnet_peer_ni_find_locked(nid);
739         if (lpni) {
740                 lp = lpni->lpni_peer_net->lpn_peer;
741                 lnet_peer_addref_locked(lp);
742                 lnet_peer_ni_decref_locked(lpni);
743         }
744         lnet_net_unlock(cpt);
745
746         return lp;
747 }
748
749 struct lnet_peer_net *
750 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
751 {
752         struct lnet_peer_net *net;
753
754         if (!prev_lpn_id) {
755                 /* no net id provided return the first net */
756                 net = list_first_entry_or_null(&lp->lp_peer_nets,
757                                                struct lnet_peer_net,
758                                                lpn_peer_nets);
759
760                 return net;
761         }
762
763         /* find the net after the one provided */
764         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
765                 if (net->lpn_net_id == prev_lpn_id) {
766                         /*
767                          * if we reached the end of the list loop to the
768                          * beginning.
769                          */
770                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
771                                 return list_first_entry_or_null(&lp->lp_peer_nets,
772                                                                 struct lnet_peer_net,
773                                                                 lpn_peer_nets);
774                         else
775                                 return list_next_entry(net, lpn_peer_nets);
776                 }
777         }
778
779         return NULL;
780 }
781
782 struct lnet_peer_ni *
783 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
784                              struct lnet_peer_net *peer_net,
785                              struct lnet_peer_ni *prev)
786 {
787         struct lnet_peer_ni *lpni;
788         struct lnet_peer_net *net = peer_net;
789
790         if (!prev) {
791                 if (!net) {
792                         if (list_empty(&peer->lp_peer_nets))
793                                 return NULL;
794
795                         net = list_first_entry(&peer->lp_peer_nets,
796                                                struct lnet_peer_net,
797                                                lpn_peer_nets);
798                 }
799                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
800                                         lpni_peer_nis);
801
802                 return lpni;
803         }
804
805         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
806                 /*
807                  * if you reached the end of the peer ni list and the peer
808                  * net is specified then there are no more peer nis in that
809                  * net.
810                  */
811                 if (net)
812                         return NULL;
813
814                 /*
815                  * we reached the end of this net ni list. move to the
816                  * next net
817                  */
818                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
819                     &peer->lp_peer_nets)
820                         /* no more nets and no more NIs. */
821                         return NULL;
822
823                 /* get the next net */
824                 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
825                                        struct lnet_peer_net,
826                                        lpn_peer_nets);
827                 /* get the ni on it */
828                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
829                                         lpni_peer_nis);
830
831                 return lpni;
832         }
833
834         /* there are more nis left */
835         lpni = list_first_entry(&prev->lpni_peer_nis,
836                                 struct lnet_peer_ni, lpni_peer_nis);
837
838         return lpni;
839 }
840
841 /* Call with the ln_api_mutex held */
842 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
843 {
844         struct lnet_process_id id;
845         struct lnet_peer_table *ptable;
846         struct lnet_peer *lp;
847         __u32 count = 0;
848         __u32 size = 0;
849         int lncpt;
850         int cpt;
851         __u32 i;
852         int rc;
853
854         rc = -ESHUTDOWN;
855         if (the_lnet.ln_state != LNET_STATE_RUNNING)
856                 goto done;
857
858         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
859
860         /*
861          * Count the number of peers, and return E2BIG if the buffer
862          * is too small. We'll also return the desired size.
863          */
864         rc = -E2BIG;
865         for (cpt = 0; cpt < lncpt; cpt++) {
866                 ptable = the_lnet.ln_peer_tables[cpt];
867                 count += ptable->pt_peers;
868         }
869         size = count * sizeof(*ids);
870         if (size > *sizep)
871                 goto done;
872
873         /*
874          * Walk the peer lists and copy out the primary nids.
875          * This is safe because the peer lists are only modified
876          * while the ln_api_mutex is held. So we don't need to
877          * hold the lnet_net_lock as well, and can therefore
878          * directly call copy_to_user().
879          */
880         rc = -EFAULT;
881         memset(&id, 0, sizeof(id));
882         id.pid = LNET_PID_LUSTRE;
883         i = 0;
884         for (cpt = 0; cpt < lncpt; cpt++) {
885                 ptable = the_lnet.ln_peer_tables[cpt];
886                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
887                         if (!nid_is_nid4(&lp->lp_primary_nid))
888                                 continue;
889                         if (i >= count)
890                                 goto done;
891                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
892                         if (copy_to_user(&ids[i], &id, sizeof(id)))
893                                 goto done;
894                         i++;
895                 }
896         }
897         rc = 0;
898 done:
899         *countp = count;
900         *sizep = size;
901         return rc;
902 }
903
904 /*
905  * Start pushes to peers that need to be updated for a configuration
906  * change on this node.
907  */
908 void
909 lnet_push_update_to_peers(int force)
910 {
911         struct lnet_peer_table *ptable;
912         struct lnet_peer *lp;
913         int lncpt;
914         int cpt;
915
916         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
917                 return;
918
919         lnet_net_lock(LNET_LOCK_EX);
920         if (lnet_peer_discovery_disabled)
921                 force = 0;
922         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
923         for (cpt = 0; cpt < lncpt; cpt++) {
924                 ptable = the_lnet.ln_peer_tables[cpt];
925                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
926                         if (force) {
927                                 spin_lock(&lp->lp_lock);
928                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
929                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
930                                 spin_unlock(&lp->lp_lock);
931                         }
932                         if (lnet_peer_needs_push(lp))
933                                 lnet_peer_queue_for_discovery(lp);
934                 }
935         }
936         lnet_net_unlock(LNET_LOCK_EX);
937         wake_up(&the_lnet.ln_dc_waitq);
938 }
939
940 /* find the NID in the preferred gateways for the remote peer
941  * return:
942  *      false: list is not empty and NID is not preferred
943  *      false: list is empty
944  *      true: nid is found in the list
945  */
946 bool
947 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
948                              struct lnet_nid *gw_nid)
949 {
950         struct lnet_nid_list *ne;
951
952         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
953                libcfs_nidstr(&lpni->lpni_nid),
954                list_empty(&lpni->lpni_rtr_pref_nids));
955
956         if (list_empty(&lpni->lpni_rtr_pref_nids))
957                 return false;
958
959         /* iterate through all the preferred NIDs and see if any of them
960          * matches the provided gw_nid
961          */
962         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
963                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
964                        libcfs_nidstr(&ne->nl_nid),
965                        libcfs_nidstr(gw_nid));
966                 if (nid_same(&ne->nl_nid, gw_nid))
967                         return true;
968         }
969
970         return false;
971 }
972
973 void
974 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
975 {
976         struct list_head zombies;
977         struct lnet_nid_list *ne;
978         struct lnet_nid_list *tmp;
979         int cpt = lpni->lpni_cpt;
980
981         INIT_LIST_HEAD(&zombies);
982
983         lnet_net_lock(cpt);
984         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
985         lnet_net_unlock(cpt);
986
987         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
988                 list_del(&ne->nl_list);
989                 LIBCFS_FREE(ne, sizeof(*ne));
990         }
991 }
992
993 int
994 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
995                        struct lnet_nid *gw_nid)
996 {
997         int cpt = lpni->lpni_cpt;
998         struct lnet_nid_list *ne = NULL;
999
1000         /* This function is called with api_mutex held. When the api_mutex
1001          * is held the list can not be modified, as it is only modified as
1002          * a result of applying a UDSP and that happens under api_mutex
1003          * lock.
1004          */
1005         __must_hold(&the_lnet.ln_api_mutex);
1006
1007         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1008                 if (nid_same(&ne->nl_nid, gw_nid))
1009                         return -EEXIST;
1010         }
1011
1012         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1013         if (!ne)
1014                 return -ENOMEM;
1015
1016         ne->nl_nid = *gw_nid;
1017
1018         /* Lock the cpt to protect against addition and checks in the
1019          * selection algorithm
1020          */
1021         lnet_net_lock(cpt);
1022         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1023         lnet_net_unlock(cpt);
1024
1025         return 0;
1026 }
1027
1028 /*
1029  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1030  * this is a preferred point-to-point path. Call with lnet_net_lock in
1031  * shared mmode.
1032  */
1033 bool
1034 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1035 {
1036         struct lnet_nid_list *ne;
1037
1038         if (lpni->lpni_pref_nnids == 0)
1039                 return false;
1040         if (lpni->lpni_pref_nnids == 1)
1041                 return nid_same(&lpni->lpni_pref.nid, nid);
1042         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1043                 if (nid_same(&ne->nl_nid, nid))
1044                         return true;
1045         }
1046         return false;
1047 }
1048
1049 /*
1050  * Set a single ni as preferred, provided no preferred ni is already
1051  * defined. Only to be used for non-multi-rail peer_ni.
1052  */
1053 int
1054 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1055                                   struct lnet_nid *nid)
1056 {
1057         int rc = 0;
1058
1059         if (!nid)
1060                 return -EINVAL;
1061         spin_lock(&lpni->lpni_lock);
1062         if (LNET_NID_IS_ANY(nid)) {
1063                 rc = -EINVAL;
1064         } else if (lpni->lpni_pref_nnids > 0) {
1065                 rc = -EPERM;
1066         } else if (lpni->lpni_pref_nnids == 0) {
1067                 lpni->lpni_pref.nid = *nid;
1068                 lpni->lpni_pref_nnids = 1;
1069                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1070         }
1071         spin_unlock(&lpni->lpni_lock);
1072
1073         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1074                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1075         return rc;
1076 }
1077
1078 /*
1079  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1080  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1081  */
1082 static int
1083 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1084 {
1085         int rc = 0;
1086
1087         spin_lock(&lpni->lpni_lock);
1088         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1089                 lpni->lpni_pref_nnids = 0;
1090                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1091         } else if (lpni->lpni_pref_nnids == 0) {
1092                 rc = -ENOENT;
1093         } else {
1094                 rc = -EPERM;
1095         }
1096         spin_unlock(&lpni->lpni_lock);
1097
1098         CDEBUG(D_NET, "peer %s: %d\n",
1099                libcfs_nidstr(&lpni->lpni_nid), rc);
1100         return rc;
1101 }
1102
1103 void
1104 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1105 {
1106         lpni->lpni_sel_priority = priority;
1107 }
1108
1109 /*
1110  * Clear the preferred NIDs from a non-multi-rail peer.
1111  */
1112 static void
1113 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1114 {
1115         struct lnet_peer_ni *lpni = NULL;
1116
1117         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1118                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1119 }
1120
1121 int
1122 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1123 {
1124         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1125         struct lnet_nid_list *ne1 = NULL;
1126         struct lnet_nid_list *ne2 = NULL;
1127         struct lnet_nid *tmp_nid = NULL;
1128         int rc = 0;
1129
1130         if (LNET_NID_IS_ANY(nid)) {
1131                 rc = -EINVAL;
1132                 goto out;
1133         }
1134
1135         if (lpni->lpni_pref_nnids == 1 &&
1136             nid_same(&lpni->lpni_pref.nid, nid)) {
1137                 rc = -EEXIST;
1138                 goto out;
1139         }
1140
1141         /* A non-MR node may have only one preferred NI per peer_ni */
1142         if (lpni->lpni_pref_nnids > 0 &&
1143             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1144                 rc = -EPERM;
1145                 goto out;
1146         }
1147
1148         /* add the new preferred nid to the list of preferred nids */
1149         if (lpni->lpni_pref_nnids != 0) {
1150                 size_t alloc_size = sizeof(*ne1);
1151
1152                 if (lpni->lpni_pref_nnids == 1) {
1153                         tmp_nid = &lpni->lpni_pref.nid;
1154                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1155                 }
1156
1157                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1158                         if (nid_same(&ne1->nl_nid, nid)) {
1159                                 rc = -EEXIST;
1160                                 goto out;
1161                         }
1162                 }
1163
1164                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1165                                  alloc_size);
1166                 if (!ne1) {
1167                         rc = -ENOMEM;
1168                         goto out;
1169                 }
1170
1171                 /* move the originally stored nid to the list */
1172                 if (lpni->lpni_pref_nnids == 1) {
1173                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1174                                 lpni->lpni_cpt, alloc_size);
1175                         if (!ne2) {
1176                                 rc = -ENOMEM;
1177                                 goto out;
1178                         }
1179                         INIT_LIST_HEAD(&ne2->nl_list);
1180                         ne2->nl_nid = *tmp_nid;
1181                 }
1182                 ne1->nl_nid = *nid;
1183         }
1184
1185         lnet_net_lock(LNET_LOCK_EX);
1186         spin_lock(&lpni->lpni_lock);
1187         if (lpni->lpni_pref_nnids == 0) {
1188                 lpni->lpni_pref.nid = *nid;
1189         } else {
1190                 if (ne2)
1191                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1192                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1193         }
1194         lpni->lpni_pref_nnids++;
1195         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1196         spin_unlock(&lpni->lpni_lock);
1197         lnet_net_unlock(LNET_LOCK_EX);
1198
1199 out:
1200         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1201                 spin_lock(&lpni->lpni_lock);
1202                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1203                 spin_unlock(&lpni->lpni_lock);
1204         }
1205         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1206                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1207         return rc;
1208 }
1209
1210 int
1211 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1212 {
1213         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1214         struct lnet_nid_list *ne = NULL;
1215         int rc = 0;
1216
1217         if (lpni->lpni_pref_nnids == 0) {
1218                 rc = -ENOENT;
1219                 goto out;
1220         }
1221
1222         if (lpni->lpni_pref_nnids == 1) {
1223                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1224                         rc = -ENOENT;
1225                         goto out;
1226                 }
1227         } else {
1228                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1229                         if (nid_same(&ne->nl_nid, nid))
1230                                 goto remove_nid_entry;
1231                 }
1232                 rc = -ENOENT;
1233                 ne = NULL;
1234                 goto out;
1235         }
1236
1237 remove_nid_entry:
1238         lnet_net_lock(LNET_LOCK_EX);
1239         spin_lock(&lpni->lpni_lock);
1240         if (lpni->lpni_pref_nnids == 1)
1241                 lpni->lpni_pref.nid = LNET_ANY_NID;
1242         else {
1243                 list_del_init(&ne->nl_list);
1244                 if (lpni->lpni_pref_nnids == 2) {
1245                         struct lnet_nid_list *ne, *tmp;
1246
1247                         list_for_each_entry_safe(ne, tmp,
1248                                                  &lpni->lpni_pref.nids,
1249                                                  nl_list) {
1250                                 lpni->lpni_pref.nid = ne->nl_nid;
1251                                 list_del_init(&ne->nl_list);
1252                                 LIBCFS_FREE(ne, sizeof(*ne));
1253                         }
1254                 }
1255         }
1256         lpni->lpni_pref_nnids--;
1257         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1258         spin_unlock(&lpni->lpni_lock);
1259         lnet_net_unlock(LNET_LOCK_EX);
1260
1261         if (ne)
1262                 LIBCFS_FREE(ne, sizeof(*ne));
1263 out:
1264         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1265                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1266         return rc;
1267 }
1268
1269 void
1270 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1271 {
1272         struct list_head zombies;
1273         struct lnet_nid_list *ne;
1274         struct lnet_nid_list *tmp;
1275
1276         INIT_LIST_HEAD(&zombies);
1277
1278         lnet_net_lock(LNET_LOCK_EX);
1279         if (lpni->lpni_pref_nnids == 1)
1280                 lpni->lpni_pref.nid = LNET_ANY_NID;
1281         else if (lpni->lpni_pref_nnids > 1)
1282                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1283         lpni->lpni_pref_nnids = 0;
1284         lnet_net_unlock(LNET_LOCK_EX);
1285
1286         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1287                 list_del_init(&ne->nl_list);
1288                 LIBCFS_FREE(ne, sizeof(*ne));
1289         }
1290 }
1291
1292 void
1293 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1294 {
1295         struct lnet_peer_ni *lpni;
1296
1297         *result = *nid;
1298         lpni = lnet_peer_ni_find_locked(nid);
1299         if (lpni) {
1300                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1301                 lnet_peer_ni_decref_locked(lpni);
1302         }
1303 }
1304
1305 bool
1306 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1307 __must_hold(&lp->lp_lock)
1308 {
1309         if (lnet_peer_discovery_disabled)
1310                 return true;
1311
1312         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1313             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1314                 return true;
1315         }
1316
1317         return false;
1318 }
1319
1320 /*
1321  * Peer Discovery
1322  */
1323 bool
1324 lnet_is_discovery_disabled(struct lnet_peer *lp)
1325 {
1326         bool rc = false;
1327
1328         spin_lock(&lp->lp_lock);
1329         rc = lnet_is_discovery_disabled_locked(lp);
1330         spin_unlock(&lp->lp_lock);
1331
1332         return rc;
1333 }
1334
1335 static void
1336 lnet_discover_peer_nid(struct lnet_nid *nid)
1337 {
1338         int cpt = lnet_net_lock_current();
1339         struct lnet_peer_ni *lpni = lnet_peer_ni_find_locked(nid);
1340
1341         if (lpni) {
1342                 lnet_discover_peer_locked(lpni, cpt, false);
1343                 lnet_peer_ni_decref_locked(lpni);
1344         }
1345         lnet_net_unlock(cpt);
1346 }
1347
1348 int
1349 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1350 {
1351         struct lnet_nid pnid = LNET_ANY_NID;
1352         bool mr;
1353         int i, rc;
1354         int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1355
1356         if (!nids || num_nids < 1)
1357                 return -EINVAL;
1358
1359         rc = LNetNIInit(LNET_PID_ANY);
1360         if (rc < 0)
1361                 return rc;
1362
1363         mutex_lock(&the_lnet.ln_api_mutex);
1364
1365         mr = lnet_peer_discovery_disabled == 0;
1366
1367         rc = 0;
1368         CDEBUG(D_NET, "num_nids %d\n", num_nids);
1369
1370         for (i = 0; i < num_nids; i++) {
1371                 if (nid_is_lo0(&nids[i]))
1372                         continue;
1373
1374                 if (LNET_NID_IS_ANY(&pnid)) {
1375                         pnid = nids[i];
1376                         rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1377                         if (rc == -EALREADY) {
1378                                 struct lnet_peer *lp;
1379
1380                                 CDEBUG(D_NET, "A peer exists for NID %s\n",
1381                                        libcfs_nidstr(&pnid));
1382                                 rc = 0;
1383                                 /* Adds a refcount */
1384                                 lp = lnet_find_peer(&pnid);
1385                                 LASSERT(lp);
1386                                 pnid = lp->lp_primary_nid;
1387                                 /* Drop refcount from lookup */
1388                                 lnet_peer_decref_locked(lp);
1389                         } else if (mr && !rc) {
1390                                 lnet_discover_peer_nid(&pnid);
1391                         }
1392                 } else if (lnet_peer_discovery_disabled) {
1393                         rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1394                                               flags);
1395                 } else if (!nid_same(&pnid, &nids[i])) {
1396                         rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID,
1397                                               mr, 0);
1398                         if (!rc) {
1399                                 if (lock_prim_nid) {
1400                                         struct lnet_peer *lp;
1401                                         lp = lnet_find_peer(&nids[i]);
1402                                         if (lp) {
1403                                                 lp->lp_merge_primary_nid = pnid;
1404                                                 lnet_peer_decref_locked(lp);
1405                                         }
1406                                 }
1407                                 lnet_discover_peer_nid(&nids[i]);
1408                         }
1409                 }
1410
1411                 if (rc && rc != -EEXIST)
1412                         goto unlock;
1413         }
1414
1415 unlock:
1416         mutex_unlock(&the_lnet.ln_api_mutex);
1417
1418         LNetNIFini();
1419
1420         return rc == -EEXIST ? 0 : rc;
1421 }
1422 EXPORT_SYMBOL(LNetAddPeer);
1423
1424 void LNetPrimaryNID(struct lnet_nid *nid)
1425 {
1426         struct lnet_peer *lp;
1427         struct lnet_peer_ni *lpni;
1428         struct lnet_nid orig;
1429         int rc = 0;
1430         int cpt;
1431
1432         if (!nid || nid_is_lo0(nid))
1433                 return;
1434         orig = *nid;
1435
1436         cpt = lnet_net_lock_current();
1437         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1438         if (IS_ERR(lpni)) {
1439                 rc = PTR_ERR(lpni);
1440                 goto out_unlock;
1441         }
1442         lp = lpni->lpni_peer_net->lpn_peer;
1443
1444         /* If discovery is disabled locally then we needn't bother running
1445          * discovery here because discovery will not modify whatever
1446          * primary NID is currently set for this peer. If the specified peer is
1447          * down then this discovery can introduce long delays into the mount
1448          * process, so skip it if it isn't necessary.
1449          */
1450 again:
1451         spin_lock(&lp->lp_lock);
1452         /* DD disabled, nothing to do */
1453         if (lnet_peer_discovery_disabled) {
1454                 *nid = lp->lp_primary_nid;
1455                 spin_unlock(&lp->lp_lock);
1456                 goto out_decref;
1457         }
1458
1459         /* Peer already up to date, nothing to do */
1460         if (lnet_peer_is_uptodate_locked(lp)) {
1461                 *nid = lp->lp_primary_nid;
1462                 spin_unlock(&lp->lp_lock);
1463                 goto out_decref;
1464         }
1465         spin_unlock(&lp->lp_lock);
1466
1467         /* If primary nid locking is enabled, discovery is performed
1468          * in the background.
1469          * If primary nid locking is disabled, discovery blocks here.
1470          * Messages to the peer will not go through until the discovery is
1471          * complete.
1472          */
1473         if (lock_prim_nid && lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1474                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1475         else
1476                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1477         if (rc)
1478                 goto out_decref;
1479
1480         /* The lpni (or lp) for this NID may have changed and our ref is
1481          * the only thing keeping the old one around. Release the ref
1482          * and lookup the lpni again
1483          */
1484         lnet_peer_ni_decref_locked(lpni);
1485         lpni = lnet_peer_ni_find_locked(nid);
1486         if (!lpni) {
1487                 rc = -ENOENT;
1488                 goto out_unlock;
1489         }
1490         lp = lpni->lpni_peer_net->lpn_peer;
1491
1492         if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1493                 goto again;
1494         *nid = lp->lp_primary_nid;
1495 out_decref:
1496         lnet_peer_ni_decref_locked(lpni);
1497 out_unlock:
1498         lnet_net_unlock(cpt);
1499
1500         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1501                libcfs_nidstr(nid), rc);
1502 }
1503 EXPORT_SYMBOL(LNetPrimaryNID);
1504
1505 bool
1506 LNetPeerDiscovered(struct lnet_nid *nid)
1507 {
1508         int cpt, disc = false;
1509         struct lnet_peer *lp;
1510
1511         lp = lnet_find_peer(nid);
1512         if (!lp)
1513                 goto out;
1514
1515         cpt = lnet_net_lock_current();
1516         spin_lock(&lp->lp_lock);
1517         if (((lp->lp_state & LNET_PEER_DISCOVERED) &&
1518             (lp->lp_state & LNET_PEER_NIDS_UPTODATE)) ||
1519             (lp->lp_state & LNET_PEER_NO_DISCOVERY))
1520                 disc = true;
1521         spin_unlock(&lp->lp_lock);
1522
1523         /* Drop refcount from lookup */
1524         lnet_peer_decref_locked(lp);
1525         lnet_net_unlock(cpt);
1526 out:
1527         CDEBUG(D_NET, "Peer NID %s discovered: %d\n", libcfs_nidstr(nid),
1528                disc);
1529         return disc;
1530 }
1531 EXPORT_SYMBOL(LNetPeerDiscovered);
1532
1533 struct lnet_peer_net *
1534 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1535 {
1536         struct lnet_peer_net *peer_net;
1537         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1538                 if (peer_net->lpn_net_id == net_id)
1539                         return peer_net;
1540         }
1541         return NULL;
1542 }
1543
1544 /*
1545  * Attach a peer_ni to a peer_net and peer. This function assumes
1546  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1547  * may be attached to a different peer, in which case it will be
1548  * properly detached first. The whole operation is done atomically.
1549  *
1550  * This function consumes the reference on lpni and Always returns 0.
1551  * This is the last function called from functions that do return an
1552  * int, so returning 0 here allows the compiler to do a tail call.
1553  */
1554 static int
1555 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1556                          struct lnet_peer_net *lpn,
1557                          struct lnet_peer_ni *lpni,
1558                          unsigned flags)
1559 {
1560         struct lnet_peer_table *ptable;
1561         bool new_lpn = false;
1562         int rc;
1563
1564         /* Install the new peer_ni */
1565         lnet_net_lock(LNET_LOCK_EX);
1566         /* Add peer_ni to global peer table hash, if necessary. */
1567         if (list_empty(&lpni->lpni_hashlist)) {
1568                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1569
1570                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1571                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1572                 ptable->pt_version++;
1573                 lnet_peer_ni_addref_locked(lpni);
1574         }
1575
1576         /* Detach the peer_ni from an existing peer, if necessary. */
1577         if (lpni->lpni_peer_net) {
1578                 LASSERT(lpni->lpni_peer_net != lpn);
1579                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1580                 lnet_peer_detach_peer_ni_locked(lpni);
1581                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1582                 lpni->lpni_peer_net = NULL;
1583         }
1584
1585         /* Add peer_ni to peer_net */
1586         lpni->lpni_peer_net = lpn;
1587         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1588                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1589         else
1590                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1591         lnet_update_peer_net_healthv(lpni);
1592         lnet_peer_net_addref_locked(lpn);
1593
1594         /* Add peer_net to peer */
1595         if (!lpn->lpn_peer) {
1596                 new_lpn = true;
1597                 lpn->lpn_peer = lp;
1598                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1599                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1600                 else
1601                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1602                 lnet_peer_addref_locked(lp);
1603         }
1604
1605         /* Add peer to global peer list, if necessary */
1606         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1607         if (list_empty(&lp->lp_peer_list)) {
1608                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1609                 ptable->pt_peers++;
1610         }
1611
1612         /* Update peer state */
1613         spin_lock(&lp->lp_lock);
1614         if (flags & LNET_PEER_CONFIGURED) {
1615                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1616                         lp->lp_state |= LNET_PEER_CONFIGURED;
1617         }
1618         if (flags & LNET_PEER_MULTI_RAIL) {
1619                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1620                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1621                         lnet_peer_clr_non_mr_pref_nids(lp);
1622                 }
1623         }
1624         if (flags & LNET_PEER_LOCK_PRIMARY) {
1625                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1626                 lp->lp_prim_lock_ts = ktime_get_ns();
1627         }
1628         spin_unlock(&lp->lp_lock);
1629
1630         lp->lp_nnis++;
1631
1632         /* apply UDSPs */
1633         if (new_lpn) {
1634                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1635                 if (rc)
1636                         CERROR("Failed to apply UDSPs on lpn %s\n",
1637                                libcfs_net2str(lpn->lpn_net_id));
1638         }
1639         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1640         if (rc)
1641                 CERROR("Failed to apply UDSPs on lpni %s\n",
1642                        libcfs_nidstr(&lpni->lpni_nid));
1643
1644         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1645                libcfs_nidstr(&lp->lp_primary_nid),
1646                libcfs_nidstr(&lpni->lpni_nid), flags);
1647         lnet_peer_ni_decref_locked(lpni);
1648         lnet_net_unlock(LNET_LOCK_EX);
1649
1650         return 0;
1651 }
1652
1653 /*
1654  * Create a new peer, with nid as its primary nid.
1655  *
1656  * Call with the lnet_api_mutex held.
1657  */
1658 static int
1659 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1660 {
1661         struct lnet_peer *lp;
1662         struct lnet_peer_net *lpn;
1663         struct lnet_peer_ni *lpni;
1664         int rc = 0;
1665
1666         LASSERT(nid);
1667
1668         /*
1669          * No need for the lnet_net_lock here, because the
1670          * lnet_api_mutex is held.
1671          */
1672         lpni = lnet_peer_ni_find_locked(nid);
1673         if (lpni) {
1674                 /* A peer with this NID already exists. */
1675                 lp = lpni->lpni_peer_net->lpn_peer;
1676                 lnet_peer_ni_decref_locked(lpni);
1677                 /*
1678                  * This is an error if the peer was configured and the
1679                  * primary NID differs or an attempt is made to change
1680                  * the Multi-Rail flag. Otherwise the assumption is
1681                  * that an existing peer is being modified.
1682                  */
1683                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1684                         if (!nid_same(&lp->lp_primary_nid, nid))
1685                                 rc = -EEXIST;
1686                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1687                                 rc = -EPERM;
1688                         goto out;
1689                 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1690                         if (nid_same(&lp->lp_primary_nid, nid))
1691                                 rc = -EEXIST;
1692                         /* we're trying to recreate an existing peer which
1693                          * has already been created and its primary
1694                          * locked. This is likely due to two servers
1695                          * existing on the same node. So we'll just refer
1696                          * to that node with the primary NID which was
1697                          * first added by Lustre
1698                          */
1699                         else
1700                                 rc = -EALREADY;
1701                         goto out;
1702                 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1703                         /* if not recreating peer as configured and
1704                          * not locking primary nid, no need to
1705                          * do anything if primary nid is not being changed
1706                          */
1707                         if (nid_same(&lp->lp_primary_nid, nid)) {
1708                                 rc = -EEXIST;
1709                                 goto out;
1710                         }
1711                 }
1712                 /* Delete and recreate the peer.
1713                  * We can get here:
1714                  * 1. If the peer is being recreated as a configured NID
1715                  * 2. if there already exists a peer which
1716                  *    was discovered manually, but is recreated via Lustre
1717                  *    with PRIMARY_lock
1718                  */
1719                 rc = lnet_peer_del(lp);
1720                 if (rc)
1721                         goto out;
1722         }
1723
1724         /* Create peer, peer_net, and peer_ni. */
1725         rc = -ENOMEM;
1726         lp = lnet_peer_alloc(nid);
1727         if (!lp)
1728                 goto out;
1729         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1730         if (!lpn)
1731                 goto out_free_lp;
1732         lpni = lnet_peer_ni_alloc(nid);
1733         if (!lpni)
1734                 goto out_free_lpn;
1735
1736         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1737
1738 out_free_lpn:
1739         LIBCFS_FREE(lpn, sizeof(*lpn));
1740 out_free_lp:
1741         LIBCFS_FREE(lp, sizeof(*lp));
1742 out:
1743         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1744                libcfs_nidstr(nid), flags, rc);
1745         return rc;
1746 }
1747
1748 /*
1749  * Add a NID to a peer. Call with ln_api_mutex held.
1750  *
1751  * Error codes:
1752  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1753  *  -EEXIST:   The NID was configured by DLC for a different peer.
1754  *  -ENOMEM:   Out of memory.
1755  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1756  *             non-multi-rail peer.
1757  */
1758 static int
1759 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1760                   unsigned int flags)
1761 {
1762         struct lnet_peer_net *lpn;
1763         struct lnet_peer_ni *lpni;
1764         int rc = 0;
1765
1766         LASSERT(lp);
1767         LASSERT(nid);
1768
1769         /* A configured peer can only be updated through configuration. */
1770         if (!(flags & LNET_PEER_CONFIGURED)) {
1771                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1772                         rc = -EPERM;
1773                         goto out;
1774                 }
1775         }
1776
1777         /*
1778          * The MULTI_RAIL flag can be set but not cleared, because
1779          * that would leave the peer struct in an invalid state.
1780          */
1781         if (flags & LNET_PEER_MULTI_RAIL) {
1782                 spin_lock(&lp->lp_lock);
1783                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1784                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1785                         lnet_peer_clr_non_mr_pref_nids(lp);
1786                 }
1787                 spin_unlock(&lp->lp_lock);
1788         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1789                 rc = -EPERM;
1790                 goto out;
1791         }
1792
1793         lpni = lnet_peer_ni_find_locked(nid);
1794         if (lpni) {
1795                 /*
1796                  * A peer_ni already exists. This is only a problem if
1797                  * it is not connected to this peer and was configured
1798                  * by DLC.
1799                  */
1800                 if (lpni->lpni_peer_net->lpn_peer == lp)
1801                         goto out_free_lpni;
1802                 if (lnet_peer_ni_is_configured(lpni)) {
1803                         rc = -EEXIST;
1804                         goto out_free_lpni;
1805                 }
1806                 /* If this is the primary NID, destroy the peer. */
1807                 if (lnet_peer_ni_is_primary(lpni)) {
1808                         struct lnet_peer *lp2 =
1809                                 lpni->lpni_peer_net->lpn_peer;
1810                         int rtr_refcount = lp2->lp_rtr_refcount;
1811                         unsigned int peer2_state;
1812                         __u64 peer2_prim_lock_ts;
1813
1814                         /* If there's another peer that this NID belongs to
1815                          * and the primary NID for that peer is locked,
1816                          * then, unless it is the only NID, we don't want
1817                          * to mess with it.
1818                          * But the configuration is wrong at this point,
1819                          * so we should flag both of these peers as in a bad
1820                          * state
1821                          */
1822                         spin_lock(&lp2->lp_lock);
1823                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1824                             lp2->lp_nnis > 1) {
1825                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1826                                 spin_unlock(&lp2->lp_lock);
1827                                 spin_lock(&lp->lp_lock);
1828                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1829                                 spin_unlock(&lp->lp_lock);
1830                                 CERROR("Peer %s NID %s is already locked with peer %s\n",
1831                                         libcfs_nidstr(&lp->lp_primary_nid),
1832                                         libcfs_nidstr(nid),
1833                                         libcfs_nidstr(&lp2->lp_primary_nid));
1834                                 goto out_free_lpni;
1835                         }
1836                         peer2_state = lp2->lp_state;
1837                         peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1838                         spin_unlock(&lp2->lp_lock);
1839
1840                         /* NID which got locked the earliest should be
1841                          * kept as primary. In case if the peers were
1842                          * created by Lustre, this allows the
1843                          * first listed NID to stay primary as intended
1844                          * for the purpose of communicating with Lustre
1845                          * even if peer discovery succeeded using
1846                          * a different NID of MR peer.
1847                          */
1848                         spin_lock(&lp->lp_lock);
1849                         if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1850                             ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1851                             peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1852                              !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1853                                 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1854                                 lp->lp_primary_nid = *nid;
1855                                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1856                         }
1857                         spin_unlock(&lp->lp_lock);
1858                         /*
1859                          * if we're trying to delete a router it means
1860                          * we're moving this peer NI to a new peer so must
1861                          * transfer router properties to the new peer
1862                          */
1863                         if (rtr_refcount > 0) {
1864                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1865                                 lnet_rtr_transfer_to_peer(lp2, lp);
1866                         }
1867                         lnet_peer_del(lp2);
1868                         lnet_peer_ni_decref_locked(lpni);
1869                         lpni = lnet_peer_ni_alloc(nid);
1870                         if (!lpni) {
1871                                 rc = -ENOMEM;
1872                                 goto out_free_lpni;
1873                         }
1874                 }
1875         } else {
1876                 lpni = lnet_peer_ni_alloc(nid);
1877                 if (!lpni) {
1878                         rc = -ENOMEM;
1879                         goto out_free_lpni;
1880                 }
1881         }
1882
1883         /*
1884          * Get the peer_net. Check that we're not adding a second
1885          * peer_ni on a peer_net of a non-multi-rail peer.
1886          */
1887         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1888         if (!lpn) {
1889                 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1890                 if (!lpn) {
1891                         rc = -ENOMEM;
1892                         goto out_free_lpni;
1893                 }
1894         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1895                 rc = -ENOTUNIQ;
1896                 goto out_free_lpni;
1897         }
1898
1899         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1900
1901 out_free_lpni:
1902         lnet_peer_ni_decref_locked(lpni);
1903 out:
1904         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1905                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1906                flags, rc);
1907         return rc;
1908 }
1909
1910 /*
1911  * Update the primary NID of a peer, if possible.
1912  *
1913  * Call with the lnet_api_mutex held.
1914  */
1915 static int
1916 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1917                           unsigned int flags)
1918 {
1919         struct lnet_nid old = lp->lp_primary_nid;
1920         int rc = 0;
1921
1922         if (nid_same(&lp->lp_primary_nid, nid))
1923                 goto out;
1924
1925         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1926                 lp->lp_primary_nid = *nid;
1927
1928         rc = lnet_peer_add_nid(lp, nid, flags);
1929         if (rc) {
1930                 lp->lp_primary_nid = old;
1931                 goto out;
1932         }
1933 out:
1934         /* if this is a configured peer or the primary for that peer has
1935          * been locked, then we don't want to flag this scenario as
1936          * a failure
1937          */
1938         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1939             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1940                 return 0;
1941
1942         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1943                libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1944
1945         return rc;
1946 }
1947
1948 /*
1949  * lpni creation initiated due to traffic either sending or receiving.
1950  * Callers must hold ln_api_mutex
1951  * Ref taken on lnet_peer_ni returned by this function
1952  */
1953 static struct lnet_peer_ni *
1954 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1955 __must_hold(&the_lnet.ln_api_mutex)
1956 {
1957         struct lnet_peer *lp = NULL;
1958         struct lnet_peer_net *lpn = NULL;
1959         struct lnet_peer_ni *lpni;
1960         unsigned flags = 0;
1961         int rc = 0;
1962
1963         if (LNET_NID_IS_ANY(nid)) {
1964                 rc = -EINVAL;
1965                 goto out_err;
1966         }
1967
1968         /* lnet_net_lock is not needed here because ln_api_lock is held */
1969         lpni = lnet_peer_ni_find_locked(nid);
1970         if (lpni) {
1971                 /*
1972                  * We must have raced with another thread. Since we
1973                  * know next to nothing about a peer_ni created by
1974                  * traffic, we just assume everything is ok and
1975                  * return.
1976                  */
1977                 goto out;
1978         }
1979
1980         /* Create peer, peer_net, and peer_ni. */
1981         rc = -ENOMEM;
1982         lp = lnet_peer_alloc(nid);
1983         if (!lp)
1984                 goto out_err;
1985         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1986         if (!lpn)
1987                 goto out_err;
1988         lpni = lnet_peer_ni_alloc(nid);
1989         if (!lpni)
1990                 goto out_err;
1991         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1992
1993         /* lnet_peer_attach_peer_ni() always returns 0 */
1994         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1995
1996         lnet_peer_ni_addref_locked(lpni);
1997
1998 out_err:
1999         if (rc) {
2000                 if (lpn)
2001                         LIBCFS_FREE(lpn, sizeof(*lpn));
2002                 if (lp)
2003                         LIBCFS_FREE(lp, sizeof(*lp));
2004                 lpni = ERR_PTR(rc);
2005         }
2006 out:
2007         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
2008         return lpni;
2009 }
2010
2011 /*
2012  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
2013  *
2014  * This API handles the following combinations:
2015  *   Create a peer with its primary NI if only the prim_nid is provided
2016  *   Add a NID to a peer identified by the prim_nid. The peer identified
2017  *   by the prim_nid must already exist.
2018  *   The peer being created may be non-MR.
2019  *
2020  * The caller must hold ln_api_mutex. This prevents the peer from
2021  * being created/modified/deleted by a different thread.
2022  */
2023 static int
2024 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
2025                  unsigned int flags)
2026 __must_hold(&the_lnet.ln_api_mutex)
2027 {
2028         struct lnet_peer *lp = NULL;
2029         struct lnet_peer_ni *lpni;
2030
2031         /* The prim_nid must always be specified */
2032         if (LNET_NID_IS_ANY(prim_nid))
2033                 return -EINVAL;
2034
2035         if (mr)
2036                 flags |= LNET_PEER_MULTI_RAIL;
2037
2038         /*
2039          * If nid isn't specified, we must create a new peer with
2040          * prim_nid as its primary nid.
2041          */
2042         if (LNET_NID_IS_ANY(nid))
2043                 return lnet_peer_add(prim_nid, flags);
2044
2045         /* Look up the prim_nid, which must exist. */
2046         lpni = lnet_peer_ni_find_locked(prim_nid);
2047         if (!lpni)
2048                 return -ENOENT;
2049         lp = lpni->lpni_peer_net->lpn_peer;
2050         lnet_peer_ni_decref_locked(lpni);
2051
2052         /* Peer must have been configured. */
2053         if ((flags & LNET_PEER_CONFIGURED) &&
2054             !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2055                 CDEBUG(D_NET, "peer %s was not configured\n",
2056                        libcfs_nidstr(prim_nid));
2057                 return -ENOENT;
2058         }
2059
2060         /* Primary NID must match */
2061         if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2062                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2063                        libcfs_nidstr(prim_nid),
2064                        libcfs_nidstr(&lp->lp_primary_nid));
2065                 return -ENODEV;
2066         }
2067
2068         /* Multi-Rail flag must match. */
2069         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2070                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2071                        libcfs_nidstr(prim_nid));
2072                 return -EPERM;
2073         }
2074
2075         if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2076                 CDEBUG(D_NET,
2077                        "Don't add temporary peer NI for uptodate peer %s\n",
2078                        libcfs_nidstr(&lp->lp_primary_nid));
2079                 return -EINVAL;
2080         }
2081
2082         return lnet_peer_add_nid(lp, nid, flags);
2083 }
2084
2085 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2086                           bool mr, bool lock_prim)
2087 {
2088         int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2089
2090         return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2091 }
2092
2093 static int
2094 lnet_reset_peer(struct lnet_peer *lp)
2095 {
2096         struct lnet_peer_net *lpn, *lpntmp;
2097         struct lnet_peer_ni *lpni, *lpnitmp;
2098         unsigned int flags;
2099         int rc;
2100
2101         lnet_peer_cancel_discovery(lp);
2102
2103         flags = LNET_PEER_CONFIGURED;
2104         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2105                 flags |= LNET_PEER_MULTI_RAIL;
2106
2107         list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2108                 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2109                                          lpni_peer_nis) {
2110                         if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2111                                 continue;
2112
2113                         rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2114                         if (rc) {
2115                                 CERROR("Failed to delete %s from peer %s\n",
2116                                        libcfs_nidstr(&lpni->lpni_nid),
2117                                        libcfs_nidstr(&lp->lp_primary_nid));
2118                         }
2119                 }
2120         }
2121
2122         /* mark it for discovery the next time we use it */
2123         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2124         return 0;
2125 }
2126
2127 /*
2128  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2129  *
2130  * This API handles the following combinations:
2131  *   Delete a NI from a peer if both prim_nid and nid are provided.
2132  *   Delete a peer if only prim_nid is provided.
2133  *   Delete a peer if its primary nid is provided.
2134  *
2135  * The caller must hold ln_api_mutex. This prevents the peer from
2136  * being modified/deleted by a different thread.
2137  */
2138 int
2139 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2140                  int force)
2141 {
2142         struct lnet_peer *lp;
2143         struct lnet_peer_ni *lpni;
2144         unsigned int flags;
2145
2146         if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2147                 return -EINVAL;
2148
2149         lpni = lnet_peer_ni_find_locked(prim_nid);
2150         if (!lpni)
2151                 return -ENOENT;
2152         lp = lpni->lpni_peer_net->lpn_peer;
2153         lnet_peer_ni_decref_locked(lpni);
2154
2155         if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2156                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2157                        libcfs_nidstr(prim_nid),
2158                        libcfs_nidstr(&lp->lp_primary_nid));
2159                 return -ENODEV;
2160         }
2161
2162         lnet_net_lock(LNET_LOCK_EX);
2163         if (lp->lp_rtr_refcount > 0) {
2164                 lnet_net_unlock(LNET_LOCK_EX);
2165                 CERROR("%s is a router. Can not be deleted\n",
2166                        libcfs_nidstr(prim_nid));
2167                 return -EBUSY;
2168         }
2169         lnet_net_unlock(LNET_LOCK_EX);
2170
2171         if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2172                 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2173                         CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2174                                libcfs_nidstr(&lp->lp_primary_nid));
2175                         return lnet_reset_peer(lp);
2176                 } else {
2177                         return lnet_peer_del(lp);
2178                 }
2179         }
2180
2181         flags = LNET_PEER_CONFIGURED;
2182         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2183                 flags |= LNET_PEER_MULTI_RAIL;
2184
2185         return lnet_peer_del_nid(lp, nid, flags);
2186 }
2187
2188 void
2189 lnet_destroy_peer_ni_locked(struct kref *ref)
2190 {
2191         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2192                                                  lpni_kref);
2193         struct lnet_peer_table *ptable;
2194         struct lnet_peer_net *lpn;
2195
2196         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2197
2198         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2199         LASSERT(list_empty(&lpni->lpni_txq));
2200         LASSERT(lpni->lpni_txqnob == 0);
2201         LASSERT(list_empty(&lpni->lpni_peer_nis));
2202         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2203
2204         lpn = lpni->lpni_peer_net;
2205         lpni->lpni_peer_net = NULL;
2206         lpni->lpni_net = NULL;
2207
2208         if (!list_empty(&lpni->lpni_hashlist)) {
2209                 /* remove the peer ni from the zombie list */
2210                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2211                 spin_lock(&ptable->pt_zombie_lock);
2212                 list_del_init(&lpni->lpni_hashlist);
2213                 ptable->pt_zombies--;
2214                 spin_unlock(&ptable->pt_zombie_lock);
2215         }
2216
2217         if (lpni->lpni_pref_nnids > 1) {
2218                 struct lnet_nid_list *ne, *tmp;
2219
2220                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2221                                          nl_list) {
2222                         list_del_init(&ne->nl_list);
2223                         LIBCFS_FREE(ne, sizeof(*ne));
2224                 }
2225         }
2226         LIBCFS_FREE(lpni, sizeof(*lpni));
2227
2228         if (lpn)
2229                 lnet_peer_net_decref_locked(lpn);
2230 }
2231
2232 struct lnet_peer_ni *
2233 lnet_nid2peerni_ex(struct lnet_nid *nid)
2234 __must_hold(&the_lnet.ln_api_mutex)
2235 {
2236         struct lnet_peer_ni *lpni = NULL;
2237
2238         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2239                 return ERR_PTR(-ESHUTDOWN);
2240
2241         /*
2242          * find if a peer_ni already exists.
2243          * If so then just return that.
2244          */
2245         lpni = lnet_peer_ni_find_locked(nid);
2246         if (lpni)
2247                 return lpni;
2248
2249         lnet_net_unlock(LNET_LOCK_EX);
2250
2251         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2252
2253         lnet_net_lock(LNET_LOCK_EX);
2254
2255         return lpni;
2256 }
2257
2258 /*
2259  * Get a peer_ni for the given nid, create it if necessary. Takes a
2260  * hold on the peer_ni.
2261  */
2262 struct lnet_peer_ni *
2263 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2264                         struct lnet_nid *pref, int cpt)
2265 {
2266         struct lnet_peer_ni *lpni = NULL;
2267
2268         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2269                 return ERR_PTR(-ESHUTDOWN);
2270
2271         /*
2272          * find if a peer_ni already exists.
2273          * If so then just return that.
2274          */
2275         lpni = lnet_peer_ni_find_locked(nid);
2276         if (lpni)
2277                 return lpni;
2278
2279         /*
2280          * Slow path:
2281          * use the lnet_api_mutex to serialize the creation of the peer_ni
2282          * and the creation/deletion of the local ni/net. When a local ni is
2283          * created, if there exists a set of peer_nis on that network,
2284          * they need to be traversed and updated. When a local NI is
2285          * deleted, which could result in a network being deleted, then
2286          * all peer nis on that network need to be removed as well.
2287          *
2288          * Creation through traffic should also be serialized with
2289          * creation through DLC.
2290          */
2291         lnet_net_unlock(cpt);
2292         mutex_lock(&the_lnet.ln_api_mutex);
2293         /*
2294          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2295          * check here is sufficent.
2296          */
2297         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2298                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2299
2300         mutex_unlock(&the_lnet.ln_api_mutex);
2301         lnet_net_lock(cpt);
2302
2303         /* Lock has been dropped, check again for shutdown. */
2304         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2305                 if (!IS_ERR_OR_NULL(lpni))
2306                         lnet_peer_ni_decref_locked(lpni);
2307                 lpni = ERR_PTR(-ESHUTDOWN);
2308         }
2309
2310         return lpni;
2311 }
2312
2313 bool
2314 lnet_peer_gw_discovery(struct lnet_peer *lp)
2315 {
2316         bool rc = false;
2317
2318         spin_lock(&lp->lp_lock);
2319         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2320                 rc = true;
2321         spin_unlock(&lp->lp_lock);
2322
2323         return rc;
2324 }
2325
2326 bool
2327 lnet_peer_is_uptodate(struct lnet_peer *lp)
2328 {
2329         bool rc;
2330
2331         spin_lock(&lp->lp_lock);
2332         rc = lnet_peer_is_uptodate_locked(lp);
2333         spin_unlock(&lp->lp_lock);
2334         return rc;
2335 }
2336
2337 /*
2338  * Is a peer uptodate from the point of view of discovery?
2339  *
2340  * If it is currently being processed, obviously not.
2341  * A forced Ping or Push is also handled by the discovery thread.
2342  *
2343  * Otherwise look at whether the peer needs rediscovering.
2344  */
2345 bool
2346 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2347 __must_hold(&lp->lp_lock)
2348 {
2349         bool rc;
2350
2351         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2352                             LNET_PEER_FORCE_PING |
2353                             LNET_PEER_FORCE_PUSH)) {
2354                 rc = false;
2355         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2356                 rc = false;
2357         } else if (lnet_peer_needs_push(lp)) {
2358                 rc = false;
2359         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2360                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2361                         rc = true;
2362                 else
2363                         rc = false;
2364         } else {
2365                 rc = false;
2366         }
2367
2368         return rc;
2369 }
2370
2371 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2372 void
2373 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2374 {
2375         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2376          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2377          * when adding to the list and queuing the peer to ensure that we do not
2378          * strand any messages on the lp_dc_pendq. This scheme ensures the
2379          * message will be resent even if the peer is already being discovered.
2380          * Therefore we needn't check the return value of
2381          * lnet_peer_queue_for_discovery(lp).
2382          */
2383         lnet_net_lock(LNET_LOCK_EX);
2384         spin_lock(&lp->lp_lock);
2385         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2386         spin_unlock(&lp->lp_lock);
2387         lnet_peer_queue_for_discovery(lp);
2388         lnet_net_unlock(LNET_LOCK_EX);
2389 }
2390
2391 /*
2392  * Queue a peer for the attention of the discovery thread.  Call with
2393  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2394  * -EALREADY if the peer was already queued.
2395  */
2396 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2397 {
2398         int rc;
2399
2400         spin_lock(&lp->lp_lock);
2401         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2402                 lp->lp_state |= LNET_PEER_DISCOVERING;
2403         spin_unlock(&lp->lp_lock);
2404         if (list_empty(&lp->lp_dc_list)) {
2405                 lnet_peer_addref_locked(lp);
2406                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2407                 wake_up(&the_lnet.ln_dc_waitq);
2408                 rc = 0;
2409         } else {
2410                 rc = -EALREADY;
2411         }
2412
2413         CDEBUG(D_NET, "Queue peer %s: %d\n",
2414                libcfs_nidstr(&lp->lp_primary_nid), rc);
2415
2416         return rc;
2417 }
2418
2419 /*
2420  * Discovery of a peer is complete. Wake all waiters on the peer.
2421  * Call with lnet_net_lock/EX held.
2422  */
2423 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2424 {
2425         struct lnet_msg *msg, *tmp;
2426         int rc = 0;
2427         LIST_HEAD(pending_msgs);
2428
2429         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2430                libcfs_nidstr(&lp->lp_primary_nid));
2431
2432         spin_lock(&lp->lp_lock);
2433         /* Our caller dropped lp_lock which may have allowed another thread to
2434          * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2435          * Ensure it is cleared.
2436          */
2437         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2438         if (dc_error) {
2439                 lp->lp_dc_error = dc_error;
2440                 lp->lp_state |= LNET_PEER_REDISCOVER;
2441         }
2442         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2443         spin_unlock(&lp->lp_lock);
2444         list_del_init(&lp->lp_dc_list);
2445         wake_up(&lp->lp_dc_waitq);
2446
2447         if (lp->lp_rtr_refcount > 0)
2448                 lnet_router_discovery_complete(lp);
2449
2450         lnet_net_unlock(LNET_LOCK_EX);
2451
2452         /* iterate through all pending messages and send them again */
2453         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2454                 list_del_init(&msg->msg_list);
2455                 if (dc_error) {
2456                         lnet_finalize(msg, dc_error);
2457                         continue;
2458                 }
2459
2460                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2461                        lnet_msgtyp2str(msg->msg_type),
2462                        libcfs_idstr(&msg->msg_target));
2463                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2464                                &msg->msg_rtr_nid_param);
2465                 if (rc < 0) {
2466                         CNETERR("Error sending %s to %s: %d\n",
2467                                lnet_msgtyp2str(msg->msg_type),
2468                                libcfs_idstr(&msg->msg_target), rc);
2469                         lnet_finalize(msg, rc);
2470                 }
2471         }
2472         lnet_net_lock(LNET_LOCK_EX);
2473         lnet_peer_decref_locked(lp);
2474 }
2475
2476 /*
2477  * Handle inbound push.
2478  * Like any event handler, called with lnet_res_lock/CPT held.
2479  */
2480 void lnet_peer_push_event(struct lnet_event *ev)
2481 {
2482         struct lnet_ping_buffer *pbuf;
2483         struct lnet_peer *lp;
2484         int infobytes;
2485
2486         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2487
2488         /* lnet_find_peer() adds a refcount */
2489         lp = lnet_find_peer(&ev->source.nid);
2490         if (!lp) {
2491                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2492                        libcfs_nidstr(&ev->initiator.nid),
2493                        libcfs_nidstr(&ev->source.nid));
2494                 pbuf->pb_needs_post = true;
2495                 return;
2496         }
2497
2498         /* Ensure peer state remains consistent while we modify it. */
2499         spin_lock(&lp->lp_lock);
2500
2501         /*
2502          * If some kind of error happened the contents of the message
2503          * cannot be used. Clear the NIDS_UPTODATE and set the
2504          * FORCE_PING flag to trigger a ping.
2505          */
2506         if (ev->status) {
2507                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2508                 lp->lp_state |= LNET_PEER_FORCE_PING;
2509                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2510                        ev->status,
2511                        libcfs_nidstr(&lp->lp_primary_nid),
2512                        libcfs_nidstr(&ev->source.nid));
2513                 goto out;
2514         }
2515
2516         /*
2517          * A push with invalid or corrupted info. Clear the UPTODATE
2518          * flag to trigger a ping.
2519          */
2520         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2521                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2522                 lp->lp_state |= LNET_PEER_FORCE_PING;
2523                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2524                        libcfs_nidstr(&lp->lp_primary_nid));
2525                 goto out;
2526         }
2527
2528         /* Make sure we'll allocate the correct size ping buffer when
2529          * pinging the peer.
2530          */
2531         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2532         if (lp->lp_data_bytes < infobytes)
2533                 lp->lp_data_bytes = infobytes;
2534
2535         /*
2536          * A non-Multi-Rail peer is not supposed to be capable of
2537          * sending a push.
2538          */
2539         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2540                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2541                        libcfs_nidstr(&lp->lp_primary_nid));
2542                 goto out;
2543         }
2544
2545         /*
2546          * The peer may have discovery disabled at its end. Set
2547          * NO_DISCOVERY as appropriate.
2548          */
2549         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2550                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2551                        libcfs_nidstr(&lp->lp_primary_nid));
2552                 /*
2553                  * Mark the peer for deletion if we already know about it
2554                  * and it's going from discovery set to no discovery set
2555                  */
2556                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2557                                       LNET_PEER_DISCOVERING)) &&
2558                      lp->lp_state & LNET_PEER_DISCOVERED) {
2559                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2560                                libcfs_nidstr(&lp->lp_primary_nid),
2561                                lp->lp_state);
2562                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2563                 }
2564                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2565         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2566                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2567                        libcfs_nidstr(&lp->lp_primary_nid));
2568                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2569         }
2570
2571         /*
2572          * Update the MULTI_RAIL flag based on the push. If the peer
2573          * was configured with DLC then the setting should match what
2574          * DLC put in.
2575          * NB: We verified above that the MR feature bit is set in pi_features
2576          */
2577         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2578                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2579                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2580         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2581                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2582                       libcfs_nidstr(&lp->lp_primary_nid));
2583         } else if (lnet_peer_discovery_disabled) {
2584                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2585                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2586         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2587                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2588                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2589         } else {
2590                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2591                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2592                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2593                 lnet_peer_clr_non_mr_pref_nids(lp);
2594         }
2595
2596         /* Check for truncation of the Put message. Clear the
2597          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2598          * and tell discovery to allocate a bigger buffer.
2599          */
2600         if (ev->mlength < ev->rlength) {
2601                 if (the_lnet.ln_push_target_nbytes < infobytes)
2602                         the_lnet.ln_push_target_nbytes = infobytes;
2603                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2604                 lp->lp_state |= LNET_PEER_FORCE_PING;
2605                 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2606                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2607                 goto out;
2608         }
2609
2610         /* always assume new data */
2611         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2612         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2613
2614         /* If there is data present that hasn't been processed yet,
2615          * we'll replace it if the Put contained newer data and it
2616          * fits. We're racing with a Ping or earlier Push in this
2617          * case.
2618          */
2619         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2620                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2621                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2622                     infobytes <= lp->lp_data->pb_nbytes) {
2623                         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2624                                infobytes, FLEXIBLE_OBJECT);
2625                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2626                               libcfs_nidstr(&lp->lp_primary_nid),
2627                               LNET_PING_BUFFER_SEQNO(pbuf),
2628                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2629                 }
2630                 goto out;
2631         }
2632
2633         /*
2634          * Allocate a buffer to copy the data. On a failure we drop
2635          * the Push and set FORCE_PING to force the discovery
2636          * thread to fix the problem by pinging the peer.
2637          */
2638         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2639         if (!lp->lp_data) {
2640                 lp->lp_state |= LNET_PEER_FORCE_PING;
2641                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2642                        libcfs_nidstr(&lp->lp_primary_nid),
2643                        LNET_PING_BUFFER_SEQNO(pbuf));
2644                 goto out;
2645         }
2646
2647         /* Success */
2648         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2649                       FLEXIBLE_OBJECT);
2650         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2651         CDEBUG(D_NET, "Received Push %s %u\n",
2652                libcfs_nidstr(&lp->lp_primary_nid),
2653                LNET_PING_BUFFER_SEQNO(pbuf));
2654
2655 out:
2656         /* We've processed this buffer. It can be reposted */
2657         pbuf->pb_needs_post = true;
2658
2659         /*
2660          * Queue the peer for discovery if not done, force it on the request
2661          * queue and wake the discovery thread if the peer was already queued,
2662          * because its status changed.
2663          */
2664         spin_unlock(&lp->lp_lock);
2665         lnet_net_lock(LNET_LOCK_EX);
2666         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2667                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2668                 wake_up(&the_lnet.ln_dc_waitq);
2669         }
2670         /* Drop refcount from lookup */
2671         lnet_peer_decref_locked(lp);
2672         lnet_net_unlock(LNET_LOCK_EX);
2673 }
2674
2675 /*
2676  * Clear the discovery error state, unless we're already discovering
2677  * this peer, in which case the error is current.
2678  */
2679 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2680 {
2681         spin_lock(&lp->lp_lock);
2682         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2683                 lp->lp_dc_error = 0;
2684         spin_unlock(&lp->lp_lock);
2685 }
2686
2687 /*
2688  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2689  * dropped/retaken within this function. An lnet_peer_ni is passed in
2690  * because discovery could tear down an lnet_peer.
2691  */
2692 int
2693 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2694 {
2695         DEFINE_WAIT(wait);
2696         struct lnet_peer *lp = NULL;
2697         int rc = 0;
2698         int count = 0;
2699
2700 again:
2701         if (lp)
2702                 lnet_peer_decref_locked(lp);
2703         lnet_net_unlock(cpt);
2704         lnet_net_lock(LNET_LOCK_EX);
2705         lp = lpni->lpni_peer_net->lpn_peer;
2706         lnet_peer_clear_discovery_error(lp);
2707
2708         /*
2709          * We're willing to be interrupted. The lpni can become a
2710          * zombie if we race with DLC, so we must check for that.
2711          */
2712         for (;;) {
2713                 /* Keep lp alive when the lnet_net_lock is unlocked */
2714                 lnet_peer_addref_locked(lp);
2715                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2716                 if (signal_pending(current))
2717                         break;
2718                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2719                         break;
2720                 /*
2721                  * Don't repeat discovery if discovery is disabled. This is
2722                  * done to ensure we can use discovery as a standard ping as
2723                  * well for backwards compatibility with routers which do not
2724                  * have discovery or have discovery disabled
2725                  */
2726                 if (lnet_is_discovery_disabled(lp) && count > 0)
2727                         break;
2728                 if (lp->lp_dc_error)
2729                         break;
2730                 if (lnet_peer_is_uptodate(lp))
2731                         break;
2732                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2733                         break;
2734                 lnet_peer_queue_for_discovery(lp);
2735                 count++;
2736                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2737
2738                 /*
2739                  * If caller requested a non-blocking operation then
2740                  * return immediately. Once discovery is complete any
2741                  * pending messages that were stopped due to discovery
2742                  * will be transmitted.
2743                  */
2744                 if (!block)
2745                         break;
2746
2747                 lnet_net_unlock(LNET_LOCK_EX);
2748                 schedule();
2749                 finish_wait(&lp->lp_dc_waitq, &wait);
2750                 lnet_net_lock(LNET_LOCK_EX);
2751                 lnet_peer_decref_locked(lp);
2752                 /* Peer may have changed */
2753                 lp = lpni->lpni_peer_net->lpn_peer;
2754         }
2755         finish_wait(&lp->lp_dc_waitq, &wait);
2756
2757         lnet_net_unlock(LNET_LOCK_EX);
2758         lnet_net_lock(cpt);
2759         /*
2760          * The peer may have changed, so re-check and rediscover if that turns
2761          * out to have been the case. The reference count on lp ensured that
2762          * even if it was unlinked from lpni the memory could not be recycled.
2763          * Thus the check below is sufficient to determine whether the peer
2764          * changed. If the peer changed, then lp must not be dereferenced.
2765          */
2766         if (lp != lpni->lpni_peer_net->lpn_peer)
2767                 goto again;
2768
2769         if (signal_pending(current))
2770                 rc = -EINTR;
2771         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2772                 rc = -ESHUTDOWN;
2773         else if (lp->lp_dc_error)
2774                 rc = lp->lp_dc_error;
2775         else if (!block)
2776                 CDEBUG(D_NET, "non-blocking discovery\n");
2777         else if (!lnet_peer_is_uptodate(lp) &&
2778                  !(lnet_is_discovery_disabled(lp) ||
2779                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2780                 goto again;
2781
2782         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2783                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2784                libcfs_nidstr(&lpni->lpni_nid), rc,
2785                (!block) ? "pending discovery" : "discovery complete");
2786         lnet_peer_decref_locked(lp);
2787
2788         return rc;
2789 }
2790
2791 /* Handle an incoming ack for a push. */
2792 static void
2793 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2794 {
2795         struct lnet_ping_buffer *pbuf;
2796
2797         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2798         spin_lock(&lp->lp_lock);
2799         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2800         lp->lp_push_error = ev->status;
2801         if (ev->status)
2802                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2803         else
2804                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2805         spin_unlock(&lp->lp_lock);
2806
2807         CDEBUG(D_NET, "peer %s ev->status %d\n",
2808                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2809 }
2810
2811 static bool find_primary(struct lnet_nid *nid,
2812                          struct lnet_ping_buffer *pbuf)
2813 {
2814         struct lnet_ping_info *pi = &pbuf->pb_info;
2815         struct lnet_ping_iter piter;
2816         __u32 *stp;
2817
2818         if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2819                 /* First large nid is primary */
2820                 for (stp = ping_iter_first(&piter, pbuf, nid);
2821                      stp;
2822                      stp = ping_iter_next(&piter, nid)) {
2823                         if (nid_is_nid4(nid))
2824                                 continue;
2825                         /* nid has already been copied in */
2826                         return true;
2827                 }
2828                 /* no large nids ... weird ... ignore the flag
2829                  * and use first nid.
2830                  */
2831         }
2832         /* pi_nids[1] is primary */
2833         if (pi->pi_nnis < 2)
2834                 return false;
2835         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2836         return true;
2837 }
2838
2839 /* Handle a Reply message. This is the reply to a Ping message. */
2840 static void
2841 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2842 {
2843         struct lnet_ping_buffer *pbuf;
2844         struct lnet_nid primary;
2845         int infobytes;
2846         int rc;
2847         bool ping_feat_disc;
2848
2849         spin_lock(&lp->lp_lock);
2850
2851         lp->lp_disc_src_nid = ev->target.nid;
2852         lp->lp_disc_dst_nid = ev->source.nid;
2853
2854         /*
2855          * If some kind of error happened the contents of message
2856          * cannot be used. Set PING_FAILED to trigger a retry.
2857          */
2858         if (ev->status) {
2859                 lp->lp_state |= LNET_PEER_PING_FAILED;
2860                 lp->lp_ping_error = ev->status;
2861                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2862                        ev->status,
2863                        libcfs_nidstr(&lp->lp_primary_nid),
2864                        libcfs_nidstr(&ev->source.nid));
2865                 goto out;
2866         }
2867
2868         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2869         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2870                 lnet_swap_pinginfo(pbuf);
2871
2872         /*
2873          * A reply with invalid or corrupted info. Set PING_FAILED to
2874          * trigger a retry.
2875          */
2876         rc = lnet_ping_info_validate(&pbuf->pb_info);
2877         if (rc) {
2878                 lp->lp_state |= LNET_PEER_PING_FAILED;
2879                 lp->lp_ping_error = 0;
2880                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2881                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2882                 goto out;
2883         }
2884
2885         /*
2886          * The peer may have discovery disabled at its end. Set
2887          * NO_DISCOVERY as appropriate.
2888          */
2889         ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2890         if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2891                 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2892                        libcfs_nidstr(&lp->lp_primary_nid),
2893                        ping_feat_disc ? "enabled" : "disabled",
2894                        lnet_peer_discovery_disabled ? "disabled" : "enabled");
2895
2896                 /* Detect whether this peer has toggled discovery from on to
2897                  * off and whether we can delete and re-create the peer. Peers
2898                  * that were manually configured cannot be deleted by discovery.
2899                  * We need to delete this peer and re-create it if the peer was
2900                  * not configured manually, is currently considered DD capable,
2901                  * and either:
2902                  * 1. We've already discovered the peer (the peer has toggled
2903                  *    the discovery feature from on to off), or
2904                  * 2. The peer is considered MR, but it was not user configured
2905                  *    (this was a "temporary" peer created via the kernel APIs
2906                  *     that we're discovering for the first time)
2907                  */
2908                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2909                                       LNET_PEER_NO_DISCOVERY)) &&
2910                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2911                                      LNET_PEER_MULTI_RAIL))) {
2912                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2913                                libcfs_nidstr(&lp->lp_primary_nid),
2914                                lp->lp_state);
2915                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2916                 }
2917                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2918         } else {
2919                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2920                        libcfs_nidstr(&lp->lp_primary_nid));
2921                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2922         }
2923
2924         /*
2925          * Update the MULTI_RAIL flag based on the reply. If the peer
2926          * was configured with DLC then the setting should match what
2927          * DLC put in.
2928          */
2929         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2930                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2931                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2932                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2933                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2934                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2935                               libcfs_nidstr(&lp->lp_primary_nid));
2936                 } else if (lnet_peer_discovery_disabled) {
2937                         CDEBUG(D_NET,
2938                                "peer %s(%p) not MR: DD disabled locally\n",
2939                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2940                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2941                         CDEBUG(D_NET,
2942                                "peer %s(%p) not MR: DD disabled remotely\n",
2943                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2944                 } else {
2945                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2946                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2947                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2948                         lnet_peer_clr_non_mr_pref_nids(lp);
2949                 }
2950         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2951                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2952                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2953                               libcfs_nidstr(&lp->lp_primary_nid));
2954                 } else {
2955                         CERROR("Multi-Rail state vanished from %s\n",
2956                                libcfs_nidstr(&lp->lp_primary_nid));
2957                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2958                 }
2959         }
2960
2961         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2962         /*
2963          * Make sure we'll allocate the correct size ping buffer when
2964          * pinging the peer.
2965          */
2966         if (lp->lp_data_bytes < infobytes)
2967                 lp->lp_data_bytes = infobytes;
2968
2969         /* Check for truncation of the Reply. Clear PING_SENT and set
2970          * PING_FAILED to trigger a retry.
2971          */
2972         if (pbuf->pb_nbytes < infobytes) {
2973                 if (the_lnet.ln_push_target_nbytes < infobytes)
2974                         the_lnet.ln_push_target_nbytes = infobytes;
2975                 lp->lp_state |= LNET_PEER_PING_FAILED;
2976                 lp->lp_ping_error = 0;
2977                 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2978                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2979                 goto out;
2980         }
2981
2982         /*
2983          * Check the sequence numbers in the reply. These are only
2984          * available if the reply came from a Multi-Rail peer.
2985          */
2986         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2987             find_primary(&primary, pbuf) &&
2988             nid_same(&lp->lp_primary_nid, &primary)) {
2989                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2990                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2991                                 libcfs_nidstr(&lp->lp_primary_nid),
2992                                 LNET_PING_BUFFER_SEQNO(pbuf),
2993                                 lp->lp_peer_seqno);
2994
2995                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2996         }
2997
2998         /* We're happy with the state of the data in the buffer. */
2999         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
3000                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
3001                lp->lp_state);
3002         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3003                 lnet_ping_buffer_decref(lp->lp_data);
3004         else
3005                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
3006         lnet_ping_buffer_addref(pbuf);
3007         lp->lp_data = pbuf;
3008 out:
3009         lp->lp_state &= ~LNET_PEER_PING_SENT;
3010         spin_unlock(&lp->lp_lock);
3011 }
3012
3013 /*
3014  * Send event handling. Only matters for error cases, where we clean
3015  * up state on the peer and peer_ni that would otherwise be updated in
3016  * the REPLY event handler for a successful Ping, and the ACK event
3017  * handler for a successful Push.
3018  */
3019 static int
3020 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
3021 {
3022         int rc = 0;
3023
3024         if (!ev->status)
3025                 goto out;
3026
3027         spin_lock(&lp->lp_lock);
3028         if (ev->msg_type == LNET_MSG_GET) {
3029                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3030                 lp->lp_state |= LNET_PEER_PING_FAILED;
3031                 lp->lp_ping_error = ev->status;
3032         } else { /* ev->msg_type == LNET_MSG_PUT */
3033                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3034                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3035                 lp->lp_push_error = ev->status;
3036         }
3037         spin_unlock(&lp->lp_lock);
3038         rc = LNET_REDISCOVER_PEER;
3039 out:
3040         CDEBUG(D_NET, "%s Send to %s: %d\n",
3041                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3042                 libcfs_nidstr(&ev->target.nid), rc);
3043         return rc;
3044 }
3045
3046 /*
3047  * Unlink event handling. This event is only seen if a call to
3048  * LNetMDUnlink() caused the event to be unlinked. If this call was
3049  * made after the event was set up in LNetGet() or LNetPut() then we
3050  * assume the Ping or Push timed out.
3051  */
3052 static void
3053 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3054 {
3055         spin_lock(&lp->lp_lock);
3056         /* We've passed through LNetGet() */
3057         if (lp->lp_state & LNET_PEER_PING_SENT) {
3058                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3059                 lp->lp_state |= LNET_PEER_PING_FAILED;
3060                 lp->lp_ping_error = -ETIMEDOUT;
3061                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3062                         libcfs_nidstr(&lp->lp_primary_nid));
3063         }
3064         /* We've passed through LNetPut() */
3065         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3066                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3067                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3068                 lp->lp_push_error = -ETIMEDOUT;
3069                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3070                         libcfs_nidstr(&lp->lp_primary_nid));
3071         }
3072         spin_unlock(&lp->lp_lock);
3073 }
3074
3075 /*
3076  * Event handler for the discovery EQ.
3077  *
3078  * Called with lnet_res_lock(cpt) held. The cpt is the
3079  * lnet_cpt_of_cookie() of the md handle cookie.
3080  */
3081 static void lnet_discovery_event_handler(struct lnet_event *event)
3082 {
3083         struct lnet_peer *lp = event->md_user_ptr;
3084         struct lnet_ping_buffer *pbuf;
3085         int rc;
3086
3087         /* discovery needs to take another look */
3088         rc = LNET_REDISCOVER_PEER;
3089
3090         CDEBUG(D_NET, "Received event: %d\n", event->type);
3091
3092         switch (event->type) {
3093         case LNET_EVENT_ACK:
3094                 lnet_discovery_event_ack(lp, event);
3095                 break;
3096         case LNET_EVENT_REPLY:
3097                 lnet_discovery_event_reply(lp, event);
3098                 break;
3099         case LNET_EVENT_SEND:
3100                 /* Only send failure triggers a retry. */
3101                 rc = lnet_discovery_event_send(lp, event);
3102                 break;
3103         case LNET_EVENT_UNLINK:
3104                 /* LNetMDUnlink() was called */
3105                 lnet_discovery_event_unlink(lp, event);
3106                 break;
3107         default:
3108                 /* Invalid events. */
3109                 LBUG();
3110         }
3111         lnet_net_lock(LNET_LOCK_EX);
3112
3113         /* put peer back at end of request queue, if discovery not already
3114          * done */
3115         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3116             lnet_peer_queue_for_discovery(lp)) {
3117                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3118                 wake_up(&the_lnet.ln_dc_waitq);
3119         }
3120         if (event->unlinked) {
3121                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3122                 lnet_ping_buffer_decref(pbuf);
3123                 lnet_peer_decref_locked(lp);
3124         }
3125         lnet_net_unlock(LNET_LOCK_EX);
3126 }
3127
3128 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3129                      struct lnet_ping_buffer *pbuf,
3130                      struct lnet_nid *nid)
3131 {
3132         pi->pinfo = &pbuf->pb_info;
3133         pi->pos = &pbuf->pb_info.pi_ni;
3134         pi->end = (void *)pi->pinfo +
3135                   min_t(int, pbuf->pb_nbytes,
3136                         lnet_ping_info_size(pi->pinfo));
3137         /* lnet_ping_info_validiate ensures there will be one
3138          * lnet_ni_status at the start
3139          */
3140         if (nid)
3141                 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3142
3143         pi->pos += sizeof(struct lnet_ni_status);
3144         return &pbuf->pb_info.pi_ni[0].ns_status;
3145 }
3146
3147 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3148 {
3149         int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3150
3151         if (pi->pos < ((void *)pi->pinfo + off)) {
3152                 struct lnet_ni_status *ns = pi->pos;
3153
3154                 pi->pos = ns + 1;
3155                 if (pi->pos > pi->end)
3156                         return NULL;
3157                 if (nid)
3158                         lnet_nid4_to_nid(ns->ns_nid, nid);
3159                 return &ns->ns_status;
3160         }
3161
3162         while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3163                 struct lnet_ni_large_status *lns = pi->pos;
3164
3165                 if (pi->pos + 8 > pi->end)
3166                         /* Not safe to examine next */
3167                         return NULL;
3168                 pi->pos = lnet_ping_sts_next(lns);
3169                 if (pi->pos > pi->end)
3170                         return NULL;
3171                 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3172                         continue;
3173                 if (nid)
3174                         *nid = lns->ns_nid;
3175                 return &lns->ns_status;
3176         }
3177         return NULL;
3178 }
3179
3180 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3181 {
3182         struct lnet_ping_iter pi;
3183         u32 *st;
3184         int nnis = 0;
3185
3186         for (st = ping_iter_first(&pi, pbuf, NULL); st;
3187              st = ping_iter_next(&pi, NULL))
3188                 nnis += 1;
3189
3190         return nnis;
3191 }
3192
3193 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3194 {
3195         if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN) {
3196                 lnet_net_lock(0);
3197                 lnet_handle_remote_failure_locked(lpni);
3198                 lnet_net_unlock(0);
3199         } else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3200                  !lpni->lpni_last_alive)
3201                 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3202 }
3203
3204 /*
3205  * Build a peer from incoming data.
3206  *
3207  * The NIDs in the incoming data are supposed to be structured as follows:
3208  *  - loopback
3209  *  - primary NID
3210  *  - other NIDs in same net
3211  *  - NIDs in second net
3212  *  - NIDs in third net
3213  *  - ...
3214  * This due to the way the list of NIDs in the data is created.
3215  *
3216  * Note that this function will mark the peer uptodate unless an
3217  * ENOMEM is encontered. All other errors are due to a conflict
3218  * between the DLC configuration and what discovery sees. We treat DLC
3219  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3220  * peer from becoming stuck in discovery.
3221  */
3222 static int lnet_peer_merge_data(struct lnet_peer *lp,
3223                                 struct lnet_ping_buffer *pbuf)
3224 {
3225         struct lnet_peer_net *lpn;
3226         struct lnet_peer_ni *lpni;
3227         struct lnet_nid *curnis = NULL;
3228         struct lnet_ni_large_status *addnis = NULL;
3229         struct lnet_nid *delnis = NULL;
3230         struct lnet_ping_iter pi;
3231         struct lnet_nid nid;
3232         u32 *stp;
3233         struct lnet_nid primary = {};
3234         bool want_large_primary;
3235         unsigned int flags;
3236         int ncurnis;
3237         int naddnis;
3238         int ndelnis;
3239         int nnis = 0;
3240         int i;
3241         int j;
3242         int rc;
3243         __u32 old_st;
3244
3245         flags = LNET_PEER_DISCOVERED;
3246         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3247                 flags |= LNET_PEER_MULTI_RAIL;
3248
3249         /*
3250          * Cache the routing feature for the peer; whether it is enabled
3251          * for disabled as reported by the remote peer.
3252          */
3253         spin_lock(&lp->lp_lock);
3254         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3255                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3256         else
3257                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3258         spin_unlock(&lp->lp_lock);
3259
3260         nnis = ping_info_count_entries(pbuf);
3261         nnis = max_t(int, lp->lp_nnis, nnis);
3262         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3263         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3264         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3265         if (!curnis || !addnis || !delnis) {
3266                 rc = -ENOMEM;
3267                 goto out;
3268         }
3269         ncurnis = 0;
3270         naddnis = 0;
3271         ndelnis = 0;
3272
3273         /* Construct the list of NIDs present in peer. */
3274         lpni = NULL;
3275         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3276                 curnis[ncurnis++] = lpni->lpni_nid;
3277
3278         /* Check for NIDs in pbuf not present in curnis[].
3279          * Skip the first, which is loop-back.  Take second as
3280          * primary, unless a large primary is found.
3281          */
3282         ping_iter_first(&pi, pbuf, NULL);
3283         stp = ping_iter_next(&pi, &nid);
3284         if (stp)
3285                 primary = nid;
3286         want_large_primary = (pbuf->pb_info.pi_features &
3287                               LNET_PING_FEAT_PRIMARY_LARGE);
3288         for (; stp; stp = ping_iter_next(&pi, &nid)) {
3289                 for (j = 0; j < ncurnis; j++)
3290                         if (nid_same(&nid, &curnis[j]))
3291                                 break;
3292                 if (j == ncurnis) {
3293                         addnis[naddnis].ns_nid = nid;
3294                         addnis[naddnis].ns_status = *stp;
3295                         naddnis += 1;
3296                 }
3297                 if (want_large_primary && nid.nid_size) {
3298                         primary = nid;
3299                         want_large_primary = false;
3300                 }
3301         }
3302         /*
3303          * Check for NIDs in curnis[] not present in pbuf.
3304          * The nested loop starts at 1 to skip the loopback NID.
3305          *
3306          * But never add the loopback NID to delnis[]: if it is
3307          * present in curnis[] then this peer is for this node.
3308          */
3309         for (i = 0; i < ncurnis; i++) {
3310                 if (nid_is_lo0(&curnis[i]))
3311                         continue;
3312                 ping_iter_first(&pi, pbuf, NULL);
3313                 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3314                         if (nid_same(&curnis[i], &nid)) {
3315                                 /*
3316                                  * update the information we cache for the
3317                                  * peer with the latest information we
3318                                  * received
3319                                  */
3320                                 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3321                                 if (lpni) {
3322                                         old_st = lpni->lpni_ns_status;
3323                                         lpni->lpni_ns_status = *stp;
3324                                         if (old_st != lpni->lpni_ns_status)
3325                                                 handle_disc_lpni_health(lpni);
3326                                         lnet_peer_ni_decref_locked(lpni);
3327                                 }
3328                                 break;
3329                         }
3330                 }
3331                 if (!stp)
3332                         delnis[ndelnis++] = curnis[i];
3333         }
3334
3335         /*
3336          * If we get here and the discovery is disabled then we don't want
3337          * to add or delete any NIs. We just updated the ones we have some
3338          * information on, and call it a day
3339          */
3340         rc = 0;
3341         if (lnet_is_discovery_disabled(lp))
3342                 goto out;
3343
3344         for (i = 0; i < naddnis; i++) {
3345                 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3346                 if (rc) {
3347                         CERROR("Error adding NID %s to peer %s: %d\n",
3348                                libcfs_nidstr(&addnis[i].ns_nid),
3349                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3350                         if (rc == -ENOMEM)
3351                                 goto out;
3352                 }
3353                 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3354                 if (lpni) {
3355                         lpni->lpni_ns_status = addnis[i].ns_status;
3356                         handle_disc_lpni_health(lpni);
3357                         lnet_peer_ni_decref_locked(lpni);
3358                 }
3359         }
3360
3361         for (i = 0; i < ndelnis; i++) {
3362                 /*
3363                  * for routers it's okay to delete the primary_nid because
3364                  * the upper layers don't really rely on it. So if we're
3365                  * being told that the router changed its primary_nid
3366                  * then it's okay to delete it.
3367                  */
3368                 if (lp->lp_rtr_refcount > 0)
3369                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3370                 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3371                 if (rc) {
3372                         CERROR("Error deleting NID %s from peer %s: %d\n",
3373                                libcfs_nidstr(&delnis[i]),
3374                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3375                         if (rc == -ENOMEM)
3376                                 goto out;
3377                 }
3378         }
3379
3380         /* The peer net for the primary NID should be the first entry in the
3381          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3382          * be the first entry in its peer net's lpn_peer_nis list.
3383          */
3384         find_primary(&nid, pbuf);
3385         lpni = lnet_peer_ni_find_locked(&nid);
3386         if (!lpni) {
3387                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3388                        libcfs_nidstr(&nid));
3389                 goto out;
3390         }
3391
3392         lpn = lpni->lpni_peer_net;
3393         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3394                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3395
3396         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3397                 list_move(&lpni->lpni_peer_nis,
3398                           &lpni->lpni_peer_net->lpn_peer_nis);
3399
3400         lnet_peer_ni_decref_locked(lpni);
3401         /*
3402          * Errors other than -ENOMEM are due to peers having been
3403          * configured with DLC. Ignore these because DLC overrides
3404          * Discovery.
3405          */
3406         rc = 0;
3407 out:
3408         /* If this peer is a gateway, invoke the routing callback to update
3409          * the associated route status
3410          */
3411         if (lp->lp_rtr_refcount > 0)
3412                 lnet_router_discovery_ping_reply(lp, pbuf);
3413
3414         CFS_FREE_PTR_ARRAY(curnis, nnis);
3415         CFS_FREE_PTR_ARRAY(addnis, nnis);
3416         CFS_FREE_PTR_ARRAY(delnis, nnis);
3417         lnet_ping_buffer_decref(pbuf);
3418         CDEBUG(D_NET, "peer %s (%p): %d\n",
3419                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3420
3421         if (rc) {
3422                 spin_lock(&lp->lp_lock);
3423                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3424                 lp->lp_state |= LNET_PEER_FORCE_PING;
3425                 spin_unlock(&lp->lp_lock);
3426         }
3427         return rc;
3428 }
3429
3430 /*
3431  * The data in pbuf says lp is its primary peer, but the data was
3432  * received by a different peer. Try to update lp with the data.
3433  */
3434 static int
3435 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3436 {
3437         struct lnet_handle_md mdh;
3438
3439         /* Queue lp for discovery, and force it on the request queue. */
3440         lnet_net_lock(LNET_LOCK_EX);
3441         if (lnet_peer_queue_for_discovery(lp))
3442                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3443         lnet_net_unlock(LNET_LOCK_EX);
3444
3445         LNetInvalidateMDHandle(&mdh);
3446
3447         /*
3448          * Decide whether we can move the peer to the DATA_PRESENT state.
3449          *
3450          * We replace stale data for a multi-rail peer, repair PING_FAILED
3451          * status, and preempt FORCE_PING.
3452          *
3453          * If after that we have DATA_PRESENT, we merge it into this peer.
3454          */
3455         spin_lock(&lp->lp_lock);
3456         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3457                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3458                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3459                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3460                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3461                         lnet_ping_buffer_decref(pbuf);
3462                         pbuf = lp->lp_data;
3463                         lp->lp_data = NULL;
3464                 }
3465         }
3466         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3467                 lnet_ping_buffer_decref(lp->lp_data);
3468                 lp->lp_data = NULL;
3469                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3470         }
3471         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3472                 mdh = lp->lp_ping_mdh;
3473                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3474                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3475                 lp->lp_ping_error = 0;
3476         }
3477         if (lp->lp_state & LNET_PEER_FORCE_PING)
3478                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3479         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3480         spin_unlock(&lp->lp_lock);
3481
3482         if (!LNetMDHandleIsInvalid(mdh))
3483                 LNetMDUnlink(mdh);
3484
3485         if (pbuf)
3486                 return lnet_peer_merge_data(lp, pbuf);
3487
3488         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3489         return 0;
3490 }
3491
3492 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3493                                      struct lnet_ping_buffer *pbuf)
3494 {
3495         struct lnet_ping_iter pi;
3496         struct lnet_nid pnid;
3497         u32 *st;
3498
3499         for (st = ping_iter_first(&pi, pbuf, &pnid);
3500              st;
3501              st = ping_iter_next(&pi, &pnid))
3502                 if (nid_same(nid, &pnid))
3503                         return true;
3504         return false;
3505 }
3506
3507 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3508  * to the discovery queue a reference was taken that will prevent the peer from
3509  * actually being freed by this function. After this function exits the
3510  * discovery thread should call lnet_peer_discovery_complete() which will
3511  * drop that reference as well as wake any waiters that may also be holding a
3512  * ref on the peer
3513  */
3514 static int lnet_peer_deletion(struct lnet_peer *lp)
3515 __must_hold(&lp->lp_lock)
3516 {
3517         struct list_head rlist;
3518         struct lnet_route *route, *tmp;
3519         int sensitivity = lp->lp_health_sensitivity;
3520         int rc = 0;
3521
3522         INIT_LIST_HEAD(&rlist);
3523
3524         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3525                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3526
3527         /* no-op if lnet_peer_del() has already been called on this peer */
3528         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3529                 goto clear_discovering;
3530
3531         spin_unlock(&lp->lp_lock);
3532
3533         mutex_lock(&the_lnet.ln_api_mutex);
3534         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3535             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3536                 mutex_unlock(&the_lnet.ln_api_mutex);
3537                 spin_lock(&lp->lp_lock);
3538                 rc = -ESHUTDOWN;
3539                 goto clear_discovering;
3540         }
3541
3542         lnet_peer_cancel_discovery(lp);
3543         lnet_net_lock(LNET_LOCK_EX);
3544         list_for_each_entry_safe(route, tmp,
3545                                  &lp->lp_routes,
3546                                  lr_gwlist)
3547                 lnet_move_route(route, NULL, &rlist);
3548
3549         /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3550         rc = lnet_peer_del_locked(lp);
3551         if (rc)
3552                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3553                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3554
3555         lnet_net_unlock(LNET_LOCK_EX);
3556
3557         list_for_each_entry_safe(route, tmp,
3558                                  &rlist, lr_list) {
3559                 /* re-add these routes */
3560                 lnet_add_route(route->lr_net,
3561                                route->lr_hops,
3562                                &route->lr_nid,
3563                                route->lr_priority,
3564                                sensitivity);
3565                 LIBCFS_FREE(route, sizeof(*route));
3566         }
3567
3568         mutex_unlock(&the_lnet.ln_api_mutex);
3569
3570         spin_lock(&lp->lp_lock);
3571
3572         rc = 0;
3573
3574 clear_discovering:
3575         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3576                           LNET_PEER_FORCE_PUSH);
3577
3578         return rc;
3579 }
3580
3581 /*
3582  * Update a peer using the data received.
3583  */
3584 static int lnet_peer_data_present(struct lnet_peer *lp)
3585 __must_hold(&lp->lp_lock)
3586 {
3587         struct lnet_ping_buffer *pbuf;
3588         struct lnet_peer_ni *lpni;
3589         struct lnet_nid nid;
3590         unsigned int flags;
3591         int rc = 0;
3592
3593         pbuf = lp->lp_data;
3594         lp->lp_data = NULL;
3595         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3596         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3597         spin_unlock(&lp->lp_lock);
3598
3599         /*
3600          * Modifications of peer structures are done while holding the
3601          * ln_api_mutex. A global lock is required because we may be
3602          * modifying multiple peer structures, and a mutex greatly
3603          * simplifies memory management.
3604          *
3605          * The actual changes to the data structures must also protect
3606          * against concurrent lookups, for which the lnet_net_lock in
3607          * LNET_LOCK_EX mode is used.
3608          */
3609         mutex_lock(&the_lnet.ln_api_mutex);
3610         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3611                 lnet_ping_buffer_decref(pbuf);
3612                 rc = -ESHUTDOWN;
3613                 goto out;
3614         }
3615
3616         /*
3617          * If this peer is not on the peer list then it is being torn
3618          * down, and our reference count may be all that is keeping it
3619          * alive. Don't do any work on it.
3620          */
3621         if (list_empty(&lp->lp_peer_list)) {
3622                 lnet_ping_buffer_decref(pbuf);
3623                 goto out;
3624         }
3625
3626         flags = LNET_PEER_DISCOVERED;
3627         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3628                 flags |= LNET_PEER_MULTI_RAIL;
3629         /*
3630          * Check whether the primary NID in the message matches the
3631          * primary NID of the peer. If it does, update the peer, if
3632          * it it does not, check whether there is already a peer with
3633          * that primary NID. If no such peer exists, try to update
3634          * the primary NID of the current peer (allowed if it was
3635          * created due to message traffic) and complete the update.
3636          * If the peer did exist, hand off the data to it.
3637          *
3638          * The peer for the loopback interface is a special case: this
3639          * is the peer for the local node, and we want to set its
3640          * primary NID to the correct value here. Moreover, this peer
3641          * can show up with only the loopback NID in the ping buffer.
3642          */
3643         if (!find_primary(&nid, pbuf)) {
3644                 lnet_ping_buffer_decref(pbuf);
3645                 goto out;
3646         }
3647         /* If lp_merge_primary_nid is set, assign it as primary,
3648          * which causes the peers to merge.
3649          */
3650         if (!LNET_NID_IS_ANY(&lp->lp_merge_primary_nid)) {
3651
3652                 rc = lnet_peer_set_primary_nid(lp, &lp->lp_merge_primary_nid,
3653                                                flags);
3654                 lp->lp_merge_primary_nid = LNET_ANY_NID;
3655         }
3656
3657         if (nid_is_lo0(&lp->lp_primary_nid)) {
3658                 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3659                 if (rc)
3660                         lnet_ping_buffer_decref(pbuf);
3661                 else
3662                         rc = lnet_peer_merge_data(lp, pbuf);
3663         /*
3664          * if the primary nid of the peer is present in the ping info returned
3665          * from the peer, but it's not the local primary peer we have
3666          * cached and discovery is disabled, then we don't want to update
3667          * our local peer info, by adding or removing NIDs, we just want
3668          * to update the status of the nids that we currently have
3669          * recorded in that peer.
3670          */
3671         } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3672                    (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3673                     lnet_is_discovery_disabled(lp))) {
3674                 rc = lnet_peer_merge_data(lp, pbuf);
3675         } else {
3676                 lpni = lnet_peer_ni_find_locked(&nid);
3677                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3678                         rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3679                         if (rc) {
3680                                 CERROR("Primary NID error %s versus %s: %d\n",
3681                                        libcfs_nidstr(&lp->lp_primary_nid),
3682                                        libcfs_nidstr(&nid), rc);
3683                                 lnet_ping_buffer_decref(pbuf);
3684                         } else {
3685                                 rc = lnet_peer_merge_data(lp, pbuf);
3686                         }
3687                         if (lpni)
3688                                 lnet_peer_ni_decref_locked(lpni);
3689                 } else {
3690                         struct lnet_peer *new_lp;
3691                         new_lp = lpni->lpni_peer_net->lpn_peer;
3692                         /*
3693                          * if lp has discovery/MR enabled that means new_lp
3694                          * should have discovery/MR enabled as well, since
3695                          * it's the same peer, which we're about to merge
3696                          */
3697                         spin_lock(&lp->lp_lock);
3698                         spin_lock(&new_lp->lp_lock);
3699                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3700                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3701                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3702                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3703                         /* If we're processing a ping reply then we may be
3704                          * about to send a push to the peer that we ping'd.
3705                          * Since the ping reply that we're processing was
3706                          * received by lp, we need to set the discovery source
3707                          * NID for new_lp to the NID stored in lp.
3708                          */
3709                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3710                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3711                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3712                         }
3713                         spin_unlock(&new_lp->lp_lock);
3714                         spin_unlock(&lp->lp_lock);
3715
3716                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3717                         lnet_consolidate_routes_locked(lp, new_lp);
3718                         lnet_peer_ni_decref_locked(lpni);
3719                 }
3720         }
3721 out:
3722         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3723                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3724                lp->lp_state);
3725         mutex_unlock(&the_lnet.ln_api_mutex);
3726
3727         spin_lock(&lp->lp_lock);
3728         /* Tell discovery to re-check the peer immediately. */
3729         if (!rc)
3730                 rc = LNET_REDISCOVER_PEER;
3731         return rc;
3732 }
3733
3734 /*
3735  * A ping failed. Clear the PING_FAILED state and set the
3736  * FORCE_PING state, to ensure a retry even if discovery is
3737  * disabled. This avoids being left with incorrect state.
3738  */
3739 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3740 __must_hold(&lp->lp_lock)
3741 {
3742         struct lnet_handle_md mdh;
3743         int rc;
3744
3745         mdh = lp->lp_ping_mdh;
3746         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3747         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3748         lp->lp_state |= LNET_PEER_FORCE_PING;
3749         rc = lp->lp_ping_error;
3750         lp->lp_ping_error = 0;
3751         spin_unlock(&lp->lp_lock);
3752
3753         if (!LNetMDHandleIsInvalid(mdh))
3754                 LNetMDUnlink(mdh);
3755
3756         CDEBUG(D_NET, "peer %s:%d\n",
3757                libcfs_nidstr(&lp->lp_primary_nid), rc);
3758
3759         spin_lock(&lp->lp_lock);
3760         return rc ? rc : LNET_REDISCOVER_PEER;
3761 }
3762
3763 /* Active side of ping. */
3764 static int lnet_peer_send_ping(struct lnet_peer *lp)
3765 __must_hold(&lp->lp_lock)
3766 {
3767         int bytes;
3768         int rc;
3769         int cpt;
3770
3771         lp->lp_state |= LNET_PEER_PING_SENT;
3772         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3773         spin_unlock(&lp->lp_lock);
3774
3775         cpt = lnet_net_lock_current();
3776         /* Refcount for MD. */
3777         lnet_peer_addref_locked(lp);
3778         lnet_net_unlock(cpt);
3779
3780         bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3781
3782         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3783                             the_lnet.ln_dc_handler, false);
3784         /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3785          * refcount on the peer, otherwise LNetMDUnlink will be called
3786          * which will eventually do that.
3787          */
3788         if (rc > 0) {
3789                 lnet_net_lock(cpt);
3790                 lnet_peer_decref_locked(lp);
3791                 lnet_net_unlock(cpt);
3792                 rc = -rc; /* change the rc to negative value */
3793                 goto fail_error;
3794         } else if (rc < 0) {
3795                 goto fail_error;
3796         }
3797
3798         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3799
3800         spin_lock(&lp->lp_lock);
3801         return 0;
3802
3803 fail_error:
3804         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3805         /*
3806          * The errors that get us here are considered hard errors and
3807          * cause Discovery to terminate. So we clear PING_SENT, but do
3808          * not set either PING_FAILED or FORCE_PING. In fact we need
3809          * to clear PING_FAILED, because the unlink event handler will
3810          * have set it if we called LNetMDUnlink() above.
3811          */
3812         spin_lock(&lp->lp_lock);
3813         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3814         return rc;
3815 }
3816
3817 /*
3818  * This function exists because you cannot call LNetMDUnlink() from an
3819  * event handler.
3820  */
3821 static int lnet_peer_push_failed(struct lnet_peer *lp)
3822 __must_hold(&lp->lp_lock)
3823 {
3824         struct lnet_handle_md mdh;
3825         int rc;
3826
3827         mdh = lp->lp_push_mdh;
3828         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3829         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3830         rc = lp->lp_push_error;
3831         lp->lp_push_error = 0;
3832         spin_unlock(&lp->lp_lock);
3833
3834         if (!LNetMDHandleIsInvalid(mdh))
3835                 LNetMDUnlink(mdh);
3836
3837         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3838         spin_lock(&lp->lp_lock);
3839         return rc ? rc : LNET_REDISCOVER_PEER;
3840 }
3841
3842 /*
3843  * Mark the peer as discovered.
3844  */
3845 static int lnet_peer_discovered(struct lnet_peer *lp)
3846 __must_hold(&lp->lp_lock)
3847 {
3848         lp->lp_state |= LNET_PEER_DISCOVERED;
3849         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3850                           LNET_PEER_REDISCOVER);
3851
3852         lp->lp_dc_error = 0;
3853
3854         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3855
3856         return 0;
3857 }
3858
3859 /* Active side of push. */
3860 static int lnet_peer_send_push(struct lnet_peer *lp)
3861 __must_hold(&lp->lp_lock)
3862 {
3863         struct lnet_ping_buffer *pbuf;
3864         struct lnet_processid id;
3865         struct lnet_md md;
3866         int cpt;
3867         int rc;
3868
3869         /* Don't push to a non-multi-rail peer. */
3870         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3871                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3872                 /* if peer's NIDs are uptodate then peer is discovered */
3873                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3874                         rc = lnet_peer_discovered(lp);
3875                         return rc;
3876                 }
3877
3878                 return 0;
3879         }
3880
3881         lp->lp_state |= LNET_PEER_PUSH_SENT;
3882         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3883         spin_unlock(&lp->lp_lock);
3884
3885         cpt = lnet_net_lock_current();
3886         pbuf = the_lnet.ln_ping_target;
3887         lnet_ping_buffer_addref(pbuf);
3888         lnet_net_unlock(cpt);
3889
3890         /* Push source MD */
3891         md.start     = &pbuf->pb_info;
3892         md.length    = pbuf->pb_nbytes;
3893         md.threshold = 2; /* Put/Ack */
3894         md.max_size  = 0;
3895         md.options   = LNET_MD_TRACK_RESPONSE;
3896         md.handler   = the_lnet.ln_dc_handler;
3897         md.user_ptr  = lp;
3898
3899         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3900         if (rc) {
3901                 lnet_ping_buffer_decref(pbuf);
3902                 CERROR("Can't bind push source MD: %d\n", rc);
3903                 goto fail_error;
3904         }
3905
3906         cpt = lnet_net_lock_current();
3907         /* Refcount for MD. */
3908         lnet_peer_addref_locked(lp);
3909         id.pid = LNET_PID_LUSTRE;
3910         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3911                 id.nid = lp->lp_disc_dst_nid;
3912         else
3913                 id.nid = lp->lp_primary_nid;
3914         lnet_net_unlock(cpt);
3915
3916         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3917                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3918                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3919
3920         /*
3921          * reset the discovery nid. There is no need to restrict sending
3922          * from that source, if we call lnet_push_update_to_peers(). It'll
3923          * get set to a specific NID, if we initiate discovery from the
3924          * scratch
3925          */
3926         lp->lp_disc_src_nid = LNET_ANY_NID;
3927         lp->lp_disc_dst_nid = LNET_ANY_NID;
3928
3929         if (rc)
3930                 goto fail_unlink;
3931
3932         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3933
3934         spin_lock(&lp->lp_lock);
3935         return 0;
3936
3937 fail_unlink:
3938         LNetMDUnlink(lp->lp_push_mdh);
3939         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3940 fail_error:
3941         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3942                lp, rc);
3943         /*
3944          * The errors that get us here are considered hard errors and
3945          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3946          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3947          * because the unlink event handler will have set it if we
3948          * called LNetMDUnlink() above.
3949          */
3950         spin_lock(&lp->lp_lock);
3951         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3952         return rc;
3953 }
3954
3955 /*
3956  * Wait for work to be queued or some other change that must be
3957  * attended to. Returns non-zero if the discovery thread should shut
3958  * down.
3959  */
3960 static int lnet_peer_discovery_wait_for_work(void)
3961 {
3962         int cpt;
3963         int rc = 0;
3964
3965         DEFINE_WAIT(wait);
3966
3967         cpt = lnet_net_lock_current();
3968         for (;;) {
3969                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3970                                 TASK_INTERRUPTIBLE);
3971                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3972                         break;
3973                 if (lnet_push_target_resize_needed() ||
3974                     the_lnet.ln_push_target->pb_needs_post)
3975                         break;
3976                 if (!list_empty(&the_lnet.ln_dc_request))
3977                         break;
3978                 if (!list_empty(&the_lnet.ln_msg_resend))
3979                         break;
3980                 lnet_net_unlock(cpt);
3981
3982                 /*
3983                  * wakeup max every second to check if there are peers that
3984                  * have been stuck on the working queue for greater than
3985                  * the peer timeout.
3986                  */
3987                 schedule_timeout(cfs_time_seconds(1));
3988                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3989                 cpt = lnet_net_lock_current();
3990         }
3991         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3992
3993         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3994                 rc = -ESHUTDOWN;
3995
3996         lnet_net_unlock(cpt);
3997
3998         CDEBUG(D_NET, "woken: %d\n", rc);
3999
4000         return rc;
4001 }
4002
4003 /*
4004  * Messages that were pending on a destroyed peer will be put on a global
4005  * resend list. The message resend list will be checked by
4006  * the discovery thread when it wakes up, and will resend messages. These
4007  * messages can still be sendable in the case the lpni which was the initial
4008  * cause of the message re-queue was transfered to another peer.
4009  *
4010  * It is possible that LNet could be shutdown while we're iterating
4011  * through the list. lnet_shudown_lndnets() will attempt to access the
4012  * resend list, but will have to wait until the spinlock is released, by
4013  * which time there shouldn't be any more messages on the resend list.
4014  * During shutdown lnet_send() will fail and lnet_finalize() will be called
4015  * for the messages so they can be released. The other case is that
4016  * lnet_shudown_lndnets() can finalize all the messages before this
4017  * function can visit the resend list, in which case this function will be
4018  * a no-op.
4019  */
4020 static void lnet_resend_msgs(void)
4021 {
4022         struct lnet_msg *msg, *tmp;
4023         LIST_HEAD(resend);
4024         int rc;
4025
4026         spin_lock(&the_lnet.ln_msg_resend_lock);
4027         list_splice(&the_lnet.ln_msg_resend, &resend);
4028         spin_unlock(&the_lnet.ln_msg_resend_lock);
4029
4030         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
4031                 list_del_init(&msg->msg_list);
4032                 rc = lnet_send(&msg->msg_src_nid_param, msg,
4033                                &msg->msg_rtr_nid_param);
4034                 if (rc < 0) {
4035                         CNETERR("Error sending %s to %s: %d\n",
4036                                lnet_msgtyp2str(msg->msg_type),
4037                                libcfs_idstr(&msg->msg_target), rc);
4038                         lnet_finalize(msg, rc);
4039                 }
4040         }
4041 }
4042
4043 /* The discovery thread. */
4044 static int lnet_peer_discovery(void *arg)
4045 {
4046         struct lnet_peer *lp;
4047         int retry = 3;
4048         int rc;
4049
4050         wait_for_completion(&the_lnet.ln_started);
4051
4052         CDEBUG(D_NET, "started\n");
4053
4054         for (;;) {
4055                 if (lnet_peer_discovery_wait_for_work())
4056                         break;
4057
4058                 if (lnet_push_target_resize_needed())
4059                         lnet_push_target_resize();
4060                 else if (the_lnet.ln_push_target->pb_needs_post)
4061                         lnet_push_target_post(the_lnet.ln_push_target,
4062                                               &the_lnet.ln_push_target_md);
4063
4064                 lnet_resend_msgs();
4065
4066                 lnet_net_lock(LNET_LOCK_EX);
4067                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4068                         lnet_net_unlock(LNET_LOCK_EX);
4069                         break;
4070                 }
4071
4072                 /*
4073                  * Process all incoming discovery work requests.  When
4074                  * discovery must wait on a peer to change state, it
4075                  * is added to the tail of the ln_dc_working queue. A
4076                  * timestamp keeps track of when the peer was added,
4077                  * so we can time out discovery requests that take too
4078                  * long.
4079                  */
4080                 while (!list_empty(&the_lnet.ln_dc_request)) {
4081                         lp = list_first_entry(&the_lnet.ln_dc_request,
4082                                               struct lnet_peer, lp_dc_list);
4083                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4084                         /*
4085                          * set the time the peer was put on the dc_working
4086                          * queue. It shouldn't remain on the queue
4087                          * forever, in case the GET message (for ping)
4088                          * doesn't get a REPLY or the PUT message (for
4089                          * push) doesn't get an ACK.
4090                          */
4091                         lp->lp_last_queued = ktime_get_real_seconds();
4092                         lnet_net_unlock(LNET_LOCK_EX);
4093
4094                         if (lnet_push_target_resize_needed())
4095                                 lnet_push_target_resize();
4096                         else if (the_lnet.ln_push_target->pb_needs_post)
4097                                 lnet_push_target_post(the_lnet.ln_push_target,
4098                                                       &the_lnet.ln_push_target_md);
4099
4100                         /*
4101                          * Select an action depending on the state of
4102                          * the peer and whether discovery is disabled.
4103                          * The check whether discovery is disabled is
4104                          * done after the code that handles processing
4105                          * for arrived data, cleanup for failures, and
4106                          * forcing a Ping or Push.
4107                          */
4108                         spin_lock(&lp->lp_lock);
4109                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4110                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4111                                 lp->lp_state);
4112                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4113                                             LNET_PEER_MARK_DELETED))
4114                                 rc = lnet_peer_deletion(lp);
4115                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4116                                 rc = lnet_peer_data_present(lp);
4117                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
4118                                 rc = lnet_peer_ping_failed(lp);
4119                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4120                                 rc = lnet_peer_push_failed(lp);
4121                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
4122                                 rc = lnet_peer_send_ping(lp);
4123                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4124                                 rc = lnet_peer_send_push(lp);
4125                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4126                                 rc = lnet_peer_send_ping(lp);
4127                         else if (lnet_peer_needs_push(lp))
4128                                 rc = lnet_peer_send_push(lp);
4129                         else
4130                                 rc = lnet_peer_discovered(lp);
4131                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4132                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4133                                 lp->lp_state, rc);
4134
4135                         if (rc == LNET_REDISCOVER_PEER) {
4136                                 spin_unlock(&lp->lp_lock);
4137                                 lnet_net_lock(LNET_LOCK_EX);
4138                                 list_move(&lp->lp_dc_list,
4139                                           &the_lnet.ln_dc_request);
4140                         } else if (rc ||
4141                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4142                                 spin_unlock(&lp->lp_lock);
4143                                 lnet_net_lock(LNET_LOCK_EX);
4144                                 lnet_peer_discovery_complete(lp, rc);
4145                         } else {
4146                                 spin_unlock(&lp->lp_lock);
4147                                 lnet_net_lock(LNET_LOCK_EX);
4148                         }
4149
4150                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4151                                 break;
4152
4153                 }
4154
4155                 lnet_net_unlock(LNET_LOCK_EX);
4156         }
4157
4158 cleanup:
4159         CDEBUG(D_NET, "stopping\n");
4160         /*
4161          * Clean up before telling lnet_peer_discovery_stop() that
4162          * we're done. Use wake_up() below to somewhat reduce the
4163          * size of the thundering herd if there are multiple threads
4164          * waiting on discovery of a single peer.
4165          */
4166
4167         /* Queue cleanup 1: stop all pending pings and pushes. */
4168         lnet_net_lock(LNET_LOCK_EX);
4169         while (!list_empty(&the_lnet.ln_dc_working)) {
4170                 lp = list_first_entry(&the_lnet.ln_dc_working,
4171                                       struct lnet_peer, lp_dc_list);
4172                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4173                 lnet_net_unlock(LNET_LOCK_EX);
4174                 lnet_peer_cancel_discovery(lp);
4175                 lnet_net_lock(LNET_LOCK_EX);
4176         }
4177         lnet_net_unlock(LNET_LOCK_EX);
4178
4179         /* Queue cleanup 2: wait for the expired queue to clear. */
4180         while (!list_empty(&the_lnet.ln_dc_expired))
4181                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4182
4183         /* Queue cleanup 3: clear the request queue. */
4184         lnet_net_lock(LNET_LOCK_EX);
4185         while (!list_empty(&the_lnet.ln_dc_request)) {
4186                 lp = list_first_entry(&the_lnet.ln_dc_request,
4187                                       struct lnet_peer, lp_dc_list);
4188                 lnet_net_unlock(LNET_LOCK_EX);
4189                 spin_lock(&lp->lp_lock);
4190                 if (lp->lp_state & LNET_PEER_PING_FAILED)
4191                         (void)lnet_peer_ping_failed(lp);
4192                 if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4193                         (void)lnet_peer_push_failed(lp);
4194                 spin_unlock(&lp->lp_lock);
4195                 lnet_net_lock(LNET_LOCK_EX);
4196                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4197         }
4198         lnet_net_unlock(LNET_LOCK_EX);
4199
4200         if (lnet_assert_handler_unused(the_lnet.ln_dc_handler, --retry <= 0))
4201                 goto cleanup;
4202
4203         the_lnet.ln_dc_handler = NULL;
4204
4205         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4206         wake_up(&the_lnet.ln_dc_waitq);
4207
4208         CDEBUG(D_NET, "stopped\n");
4209
4210         return 0;
4211 }
4212
4213 /* ln_api_mutex is held on entry. */
4214 int lnet_peer_discovery_start(void)
4215 {
4216         struct task_struct *task;
4217         int rc = 0;
4218
4219         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4220                 return -EALREADY;
4221
4222         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4223         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4224         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4225         if (IS_ERR(task)) {
4226                 rc = PTR_ERR(task);
4227                 CERROR("Can't start peer discovery thread: %d\n", rc);
4228
4229                 the_lnet.ln_dc_handler = NULL;
4230
4231                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4232         }
4233
4234         CDEBUG(D_NET, "discovery start: %d\n", rc);
4235
4236         return rc;
4237 }
4238
4239 /* ln_api_mutex is held on entry. */
4240 void lnet_peer_discovery_stop(void)
4241 {
4242         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4243                 return;
4244
4245         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4246         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4247
4248         /* In the LNetNIInit() path we may be stopping discovery before it
4249          * entered its work loop
4250          */
4251         if (!completion_done(&the_lnet.ln_started))
4252                 complete(&the_lnet.ln_started);
4253         else
4254                 wake_up(&the_lnet.ln_dc_waitq);
4255
4256         mutex_unlock(&the_lnet.ln_api_mutex);
4257         wait_event(the_lnet.ln_dc_waitq,
4258                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4259         mutex_lock(&the_lnet.ln_api_mutex);
4260
4261         LASSERT(list_empty(&the_lnet.ln_dc_request));
4262         LASSERT(list_empty(&the_lnet.ln_dc_working));
4263         LASSERT(list_empty(&the_lnet.ln_dc_expired));
4264
4265         CDEBUG(D_NET, "discovery stopped\n");
4266 }
4267
4268 /* Debugging */
4269
4270 void
4271 lnet_debug_peer(struct lnet_nid *nid)
4272 {
4273         char                    *aliveness = "NA";
4274         struct lnet_peer_ni     *lp;
4275         int                     cpt;
4276
4277         cpt = lnet_nid2cpt(nid, NULL);
4278         lnet_net_lock(cpt);
4279
4280         lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4281         if (IS_ERR(lp)) {
4282                 lnet_net_unlock(cpt);
4283                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4284                 return;
4285         }
4286
4287         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4288                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4289
4290         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4291                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4292                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4293                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4294                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4295
4296         lnet_peer_ni_decref_locked(lp);
4297
4298         lnet_net_unlock(cpt);
4299 }
4300
4301 /* Gathering information for userspace. */
4302
4303 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4304                           char aliveness[LNET_MAX_STR_LEN],
4305                           __u32 *cpt_iter, __u32 *refcount,
4306                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4307                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4308                           __u32 *peer_tx_qnob)
4309 {
4310         struct lnet_peer_table          *peer_table;
4311         struct lnet_peer_ni             *lp;
4312         int                             j;
4313         int                             lncpt;
4314         bool                            found = false;
4315
4316         /* get the number of CPTs */
4317         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4318
4319         /* if the cpt number to be examined is >= the number of cpts in
4320          * the system then indicate that there are no more cpts to examin
4321          */
4322         if (*cpt_iter >= lncpt)
4323                 return -ENOENT;
4324
4325         /* get the current table */
4326         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4327         /* if the ptable is NULL then there are no more cpts to examine */
4328         if (peer_table == NULL)
4329                 return -ENOENT;
4330
4331         lnet_net_lock(*cpt_iter);
4332
4333         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4334                 struct list_head *peers = &peer_table->pt_hash[j];
4335
4336                 list_for_each_entry(lp, peers, lpni_hashlist) {
4337                         if (!nid_is_nid4(&lp->lpni_nid))
4338                                 continue;
4339                         if (peer_index-- > 0)
4340                                 continue;
4341
4342                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4343                         if (lnet_isrouter(lp) ||
4344                                 lnet_peer_aliveness_enabled(lp))
4345                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4346                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4347
4348                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4349                         *refcount = kref_read(&lp->lpni_kref);
4350                         *ni_peer_tx_credits =
4351                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4352                         *peer_tx_credits = lp->lpni_txcredits;
4353                         *peer_rtr_credits = lp->lpni_rtrcredits;
4354                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4355                         *peer_tx_qnob = lp->lpni_txqnob;
4356
4357                         found = true;
4358                 }
4359
4360         }
4361         lnet_net_unlock(*cpt_iter);
4362
4363         *cpt_iter = lncpt;
4364
4365         return found ? 0 : -ENOENT;
4366 }
4367
4368 /* ln_api_mutex is held, which keeps the peer list stable */
4369 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4370 {
4371         struct lnet_ioctl_element_stats *lpni_stats;
4372         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4373         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4374         struct lnet_peer_ni_credit_info *lpni_info;
4375         struct lnet_peer_ni *lpni;
4376         struct lnet_peer *lp;
4377         lnet_nid_t nid4;
4378         struct lnet_nid nid;
4379         __u32 size;
4380         int rc;
4381
4382         lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4383         lp = lnet_find_peer(&nid);
4384         if (!lp) {
4385                 rc = -ENOENT;
4386                 goto out;
4387         }
4388
4389         size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4390                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4391         size *= lp->lp_nnis;
4392         if (size > cfg->prcfg_size) {
4393                 cfg->prcfg_size = size;
4394                 rc = -E2BIG;
4395                 goto out_lp_decref;
4396         }
4397
4398         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4399         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4400         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4401         cfg->prcfg_count = lp->lp_nnis;
4402         cfg->prcfg_size = size;
4403         cfg->prcfg_state = lp->lp_state;
4404
4405         /* Allocate helper buffers. */
4406         rc = -ENOMEM;
4407         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4408         if (!lpni_info)
4409                 goto out_lp_decref;
4410         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4411         if (!lpni_stats)
4412                 goto out_free_info;
4413         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4414         if (!lpni_msg_stats)
4415                 goto out_free_stats;
4416         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4417         if (!lpni_hstats)
4418                 goto out_free_msg_stats;
4419
4420
4421         lpni = NULL;
4422         rc = -EFAULT;
4423         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4424                 if (!nid_is_nid4(&lpni->lpni_nid))
4425                         continue;
4426                 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4427                 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4428                         goto out_free_hstats;
4429                 bulk += sizeof(nid4);
4430
4431                 memset(lpni_info, 0, sizeof(*lpni_info));
4432                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4433                 if (lnet_isrouter(lpni) ||
4434                         lnet_peer_aliveness_enabled(lpni))
4435                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4436                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4437
4438                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4439                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4440                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4441                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4442                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4443                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4444                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4445                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4446                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4447                         goto out_free_hstats;
4448                 bulk += sizeof(*lpni_info);
4449
4450                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4451                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4452                                                             LNET_STATS_TYPE_SEND);
4453                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4454                                                             LNET_STATS_TYPE_RECV);
4455                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4456                                                             LNET_STATS_TYPE_DROP);
4457                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4458                         goto out_free_hstats;
4459                 bulk += sizeof(*lpni_stats);
4460                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4461                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4462                         goto out_free_hstats;
4463                 bulk += sizeof(*lpni_msg_stats);
4464                 lpni_hstats->hlpni_network_timeout =
4465                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4466                 lpni_hstats->hlpni_remote_dropped =
4467                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4468                 lpni_hstats->hlpni_remote_timeout =
4469                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4470                 lpni_hstats->hlpni_remote_error =
4471                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4472                 lpni_hstats->hlpni_health_value =
4473                   atomic_read(&lpni->lpni_healthv);
4474                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4475                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4476                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4477                         goto out_free_hstats;
4478                 bulk += sizeof(*lpni_hstats);
4479         }
4480         rc = 0;
4481
4482 out_free_hstats:
4483         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4484 out_free_msg_stats:
4485         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4486 out_free_stats:
4487         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4488 out_free_info:
4489         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4490 out_lp_decref:
4491         lnet_peer_decref_locked(lp);
4492 out:
4493         return rc;
4494 }
4495
4496 /* must hold net_lock/0 */
4497 void
4498 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4499                                      struct list_head *recovery_queue,
4500                                      time64_t now)
4501 {
4502         /* the mt could've shutdown and cleaned up the queues */
4503         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4504                 return;
4505
4506         if (!list_empty(&lpni->lpni_recovery))
4507                 return;
4508
4509         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4510                 return;
4511
4512         if (!lpni->lpni_last_alive) {
4513                 CDEBUG(D_NET,
4514                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4515                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4516                        lpni->lpni_last_alive);
4517                 return;
4518         }
4519
4520         if (lnet_recovery_limit &&
4521             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4522                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4523                        libcfs_nidstr(&lpni->lpni_nid),
4524                        lpni->lpni_last_alive);
4525                 /* Reset the ping count so that if this peer NI is added back to
4526                  * the recovery queue we will send the first ping right away.
4527                  */
4528                 lpni->lpni_ping_count = 0;
4529                 return;
4530         }
4531
4532         /* This peer NI is going on the recovery queue, so take a ref on it */
4533         lnet_peer_ni_addref_locked(lpni);
4534
4535         lnet_peer_ni_set_next_ping(lpni, now);
4536
4537         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4538                libcfs_nidstr(&lpni->lpni_nid),
4539                lpni->lpni_ping_count,
4540                lpni->lpni_next_ping,
4541                lpni->lpni_last_alive,
4542                atomic_read(&lpni->lpni_healthv));
4543
4544         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4545 }
4546
4547 /* Call with the ln_api_mutex held */
4548 void
4549 lnet_peer_ni_set_healthv(struct lnet_nid *nid, int value, bool all)
4550 {
4551         struct lnet_peer_table *ptable;
4552         struct lnet_peer *lp;
4553         struct lnet_peer_net *lpn;
4554         struct lnet_peer_ni *lpni;
4555         int lncpt;
4556         int cpt;
4557         time64_t now;
4558
4559         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4560                 return;
4561
4562         now = ktime_get_seconds();
4563
4564         if (!all) {
4565                 lnet_net_lock(LNET_LOCK_EX);
4566                 lpni = lnet_peer_ni_find_locked(nid);
4567                 if (!lpni) {
4568                         lnet_net_unlock(LNET_LOCK_EX);
4569                         return;
4570                 }
4571                 lnet_set_lpni_healthv_locked(lpni, value);
4572                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4573                                              &the_lnet.ln_mt_peerNIRecovq, now);
4574                 lnet_peer_ni_decref_locked(lpni);
4575                 lnet_net_unlock(LNET_LOCK_EX);
4576                 return;
4577         }
4578
4579         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4580
4581         /*
4582          * Walk all the peers and reset the health value for each one to the
4583          * specified value.
4584          */
4585         lnet_net_lock(LNET_LOCK_EX);
4586         for (cpt = 0; cpt < lncpt; cpt++) {
4587                 ptable = the_lnet.ln_peer_tables[cpt];
4588                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4589                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4590                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4591                                                     lpni_peer_nis) {
4592                                         lnet_set_lpni_healthv_locked(lpni,
4593                                                                      value);
4594                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4595                                              &the_lnet.ln_mt_peerNIRecovq, now);
4596                                 }
4597                         }
4598                 }
4599         }
4600         lnet_net_unlock(LNET_LOCK_EX);
4601 }
4602