Whamcloud - gitweb
8b4540ad6aef30b3018fe0508e84d1e83374a84e
[fs/lustre-release.git] / lnet / lnet / peer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4  * Use is subject to license terms.
5  *
6  * Copyright (c) 2012, 2017, Intel Corporation.
7  */
8
9 /* This file is part of Lustre, http://www.lustre.org/ */
10
11 #define DEBUG_SUBSYSTEM S_LNET
12
13 #include <linux/sched.h>
14 #ifdef HAVE_SCHED_HEADERS
15 #include <linux/sched/signal.h>
16 #endif
17 #include <linux/uaccess.h>
18
19 #include <lnet/udsp.h>
20 #include <lnet/lib-lnet.h>
21 #include <uapi/linux/lnet/lnet-dlc.h>
22
23 /* Value indicating that recovery needs to re-check a peer immediately. */
24 #define LNET_REDISCOVER_PEER    (1)
25
26 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
27 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
28                             unsigned int flags);
29
30 static void
31 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
32 {
33         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
34                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
35                 lnet_peer_ni_decref_locked(lpni);
36         }
37 }
38
39 void
40 lnet_peer_net_added(struct lnet_net *net)
41 {
42         struct lnet_peer_ni *lpni, *tmp;
43
44         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
45                                  lpni_on_remote_peer_ni_list) {
46
47                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
48                         lpni->lpni_net = net;
49
50                         spin_lock(&lpni->lpni_lock);
51                         lpni->lpni_txcredits =
52                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
53                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
54                         lpni->lpni_rtrcredits =
55                                 lnet_peer_buffer_credits(lpni->lpni_net);
56                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
57                         spin_unlock(&lpni->lpni_lock);
58
59                         lnet_peer_remove_from_remote_list(lpni);
60                 }
61         }
62 }
63
64 static void
65 lnet_peer_tables_destroy(void)
66 {
67         struct lnet_peer_table  *ptable;
68         struct list_head        *hash;
69         int                     i;
70         int                     j;
71
72         if (!the_lnet.ln_peer_tables)
73                 return;
74
75         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
76                 hash = ptable->pt_hash;
77                 if (!hash) /* not intialized */
78                         break;
79
80                 LASSERT(list_empty(&ptable->pt_zombie_list));
81
82                 ptable->pt_hash = NULL;
83                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
84                         LASSERT(list_empty(&hash[j]));
85
86                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
87         }
88
89         cfs_percpt_free(the_lnet.ln_peer_tables);
90         the_lnet.ln_peer_tables = NULL;
91 }
92
93 int
94 lnet_peer_tables_create(void)
95 {
96         struct lnet_peer_table  *ptable;
97         struct list_head        *hash;
98         int                     i;
99         int                     j;
100
101         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
102                                                    sizeof(*ptable));
103         if (the_lnet.ln_peer_tables == NULL) {
104                 CERROR("Failed to allocate cpu-partition peer tables\n");
105                 return -ENOMEM;
106         }
107
108         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
109                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
110                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
111                 if (hash == NULL) {
112                         CERROR("Failed to create peer hash table\n");
113                         lnet_peer_tables_destroy();
114                         return -ENOMEM;
115                 }
116
117                 spin_lock_init(&ptable->pt_zombie_lock);
118                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
119
120                 INIT_LIST_HEAD(&ptable->pt_peer_list);
121
122                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
123                         INIT_LIST_HEAD(&hash[j]);
124                 ptable->pt_hash = hash; /* sign of initialization */
125         }
126
127         return 0;
128 }
129
130 static struct lnet_peer_ni *
131 lnet_peer_ni_alloc(struct lnet_nid *nid)
132 {
133         struct lnet_peer_ni *lpni;
134         struct lnet_net *net;
135         int cpt;
136
137         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
138
139         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
140         if (!lpni)
141                 return NULL;
142
143         INIT_LIST_HEAD(&lpni->lpni_txq);
144         INIT_LIST_HEAD(&lpni->lpni_hashlist);
145         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
146         INIT_LIST_HEAD(&lpni->lpni_recovery);
147         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
148         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
149         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
150         kref_init(&lpni->lpni_kref);
151         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
152
153         spin_lock_init(&lpni->lpni_lock);
154
155         if (lnet_peers_start_down())
156                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
157         else
158                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
159         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
160         lpni->lpni_nid = *nid;
161         lpni->lpni_cpt = cpt;
162         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
163
164         net = lnet_get_net_locked(LNET_NID_NET(nid));
165         lpni->lpni_net = net;
166         if (net) {
167                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
168                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
169                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
170                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
171         } else {
172                 /*
173                  * This peer_ni is not on a local network, so we
174                  * cannot add the credits here. In case the net is
175                  * added later, add the peer_ni to the remote peer ni
176                  * list so it can be easily found and revisited.
177                  */
178                 /* FIXME: per-net implementation instead? */
179                 lnet_peer_ni_addref_locked(lpni);
180                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
181                               &the_lnet.ln_remote_peer_ni_list);
182         }
183
184         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
185
186         return lpni;
187 }
188
189 static struct lnet_peer_net *
190 lnet_peer_net_alloc(__u32 net_id)
191 {
192         struct lnet_peer_net *lpn;
193
194         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
195         if (!lpn)
196                 return NULL;
197
198         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
199         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
200         lpn->lpn_net_id = net_id;
201         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
202
203         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
204
205         return lpn;
206 }
207
208 void
209 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
210 {
211         struct lnet_peer *lp;
212
213         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
214
215         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
216         LASSERT(list_empty(&lpn->lpn_peer_nis));
217         LASSERT(list_empty(&lpn->lpn_peer_nets));
218         lp = lpn->lpn_peer;
219         lpn->lpn_peer = NULL;
220         LIBCFS_FREE(lpn, sizeof(*lpn));
221
222         lnet_peer_decref_locked(lp);
223 }
224
225 static struct lnet_peer *
226 lnet_peer_alloc(struct lnet_nid *nid)
227 {
228         struct lnet_peer *lp;
229
230         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
231         if (!lp)
232                 return NULL;
233
234         INIT_LIST_HEAD(&lp->lp_rtrq);
235         INIT_LIST_HEAD(&lp->lp_routes);
236         INIT_LIST_HEAD(&lp->lp_peer_list);
237         INIT_LIST_HEAD(&lp->lp_peer_nets);
238         INIT_LIST_HEAD(&lp->lp_dc_list);
239         INIT_LIST_HEAD(&lp->lp_dc_pendq);
240         INIT_LIST_HEAD(&lp->lp_rtr_list);
241         init_waitqueue_head(&lp->lp_dc_waitq);
242         spin_lock_init(&lp->lp_lock);
243         lp->lp_primary_nid = *nid;
244         lp->lp_disc_src_nid = LNET_ANY_NID;
245         lp->lp_disc_dst_nid = LNET_ANY_NID;
246         if (lnet_peers_start_down())
247                 lp->lp_alive = false;
248         else
249                 lp->lp_alive = true;
250
251         /*
252          * all peers created on a router should have health on
253          * if it's not already on.
254          */
255         if (the_lnet.ln_routing && !lnet_health_sensitivity)
256                 lp->lp_health_sensitivity = 1;
257
258         /*
259          * Turn off discovery for loopback peer. If you're creating a peer
260          * for the loopback interface then that was initiated when we
261          * attempted to send a message over the loopback. There is no need
262          * to ever use a different interface when sending messages to
263          * myself.
264          */
265         if (nid_is_lo0(nid))
266                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
267         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
268
269         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
270
271         return lp;
272 }
273
274 void
275 lnet_destroy_peer_locked(struct lnet_peer *lp)
276 {
277         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
278
279         LASSERT(atomic_read(&lp->lp_refcount) == 0);
280         LASSERT(lp->lp_rtr_refcount == 0);
281         LASSERT(list_empty(&lp->lp_peer_nets));
282         LASSERT(list_empty(&lp->lp_peer_list));
283         LASSERT(list_empty(&lp->lp_dc_list));
284
285         if (lp->lp_data)
286                 lnet_ping_buffer_decref(lp->lp_data);
287
288         /*
289          * if there are messages still on the pending queue, then make
290          * sure to queue them on the ln_msg_resend list so they can be
291          * resent at a later point if the discovery thread is still
292          * running.
293          * If the discovery thread has stopped, then the wakeup will be a
294          * no-op, and it is expected the lnet_shutdown_lndnets() will
295          * eventually be called, which will traverse this list and
296          * finalize the messages on the list.
297          * We can not resend them now because we're holding the cpt lock.
298          * Releasing the lock can cause an inconsistent state
299          */
300         spin_lock(&the_lnet.ln_msg_resend_lock);
301         spin_lock(&lp->lp_lock);
302         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
303         spin_unlock(&lp->lp_lock);
304         spin_unlock(&the_lnet.ln_msg_resend_lock);
305         wake_up(&the_lnet.ln_dc_waitq);
306
307         LIBCFS_FREE(lp, sizeof(*lp));
308 }
309
310 /*
311  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
312  * that peer_net, detach the peer_net from the peer.
313  *
314  * Call with lnet_net_lock/EX held
315  */
316 static void
317 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
318 {
319         struct lnet_peer_table *ptable;
320         struct lnet_peer_net *lpn;
321         struct lnet_peer *lp;
322
323         /*
324          * Belts and suspenders: gracefully handle teardown of a
325          * partially connected peer_ni.
326          */
327         lpn = lpni->lpni_peer_net;
328
329         list_del_init(&lpni->lpni_peer_nis);
330         /*
331          * If there are no lpni's left, we detach lpn from
332          * lp_peer_nets, so it cannot be found anymore.
333          */
334         if (list_empty(&lpn->lpn_peer_nis))
335                 list_del_init(&lpn->lpn_peer_nets);
336
337         /* Update peer NID count. */
338         lp = lpn->lpn_peer;
339         lp->lp_nnis--;
340
341         /*
342          * If there are no more peer nets, make the peer unfindable
343          * via the peer_tables.
344          *
345          * Otherwise, if the peer is DISCOVERED, tell discovery to
346          * take another look at it. This is a no-op if discovery for
347          * this peer did the detaching.
348          */
349         if (list_empty(&lp->lp_peer_nets)) {
350                 list_del_init(&lp->lp_peer_list);
351                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
352                 ptable->pt_peers--;
353         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
354                 /* Discovery isn't running, nothing to do here. */
355         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
356                 lnet_peer_queue_for_discovery(lp);
357                 wake_up(&the_lnet.ln_dc_waitq);
358         }
359         CDEBUG(D_NET, "peer %s NID %s\n",
360                 libcfs_nidstr(&lp->lp_primary_nid),
361                 libcfs_nidstr(&lpni->lpni_nid));
362 }
363
364 /* called with lnet_net_lock LNET_LOCK_EX held */
365 static int
366 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
367 {
368         struct lnet_peer_table *ptable = NULL;
369
370         /* don't remove a peer_ni if it's also a gateway */
371         if (lnet_isrouter(lpni) && !force) {
372                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
373                        libcfs_nidstr(&lpni->lpni_nid));
374                 return -EBUSY;
375         }
376
377         lnet_peer_remove_from_remote_list(lpni);
378
379         /* remove peer ni from the hash list. */
380         list_del_init(&lpni->lpni_hashlist);
381
382         /*
383          * indicate the peer is being deleted so the monitor thread can
384          * remove it from the recovery queue.
385          */
386         spin_lock(&lpni->lpni_lock);
387         lpni->lpni_state |= LNET_PEER_NI_DELETING;
388         spin_unlock(&lpni->lpni_lock);
389
390         /* decrement the ref count on the peer table */
391         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
392
393         /*
394          * The peer_ni can no longer be found with a lookup. But there
395          * can be current users, so keep track of it on the zombie
396          * list until the reference count has gone to zero.
397          *
398          * The last reference may be lost in a place where the
399          * lnet_net_lock locks only a single cpt, and that cpt may not
400          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
401          * has its own lock.
402          */
403         spin_lock(&ptable->pt_zombie_lock);
404         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
405         ptable->pt_zombies++;
406         spin_unlock(&ptable->pt_zombie_lock);
407
408         /* no need to keep this peer_ni on the hierarchy anymore */
409         lnet_peer_detach_peer_ni_locked(lpni);
410
411         /* remove hashlist reference on peer_ni */
412         lnet_peer_ni_decref_locked(lpni);
413
414         return 0;
415 }
416
417 void lnet_peer_uninit(void)
418 {
419         struct lnet_peer_ni *lpni, *tmp;
420
421         lnet_net_lock(LNET_LOCK_EX);
422
423         /* remove all peer_nis from the remote peer and the hash list */
424         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
425                                  lpni_on_remote_peer_ni_list)
426                 lnet_peer_ni_del_locked(lpni, false);
427
428         lnet_peer_tables_destroy();
429
430         lnet_net_unlock(LNET_LOCK_EX);
431 }
432
433 static int
434 lnet_peer_del_locked(struct lnet_peer *peer)
435 {
436         struct lnet_peer_ni *lpni = NULL, *lpni2;
437         int rc = 0, rc2 = 0;
438
439         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
440
441         spin_lock(&peer->lp_lock);
442         peer->lp_state |= LNET_PEER_MARK_DELETED;
443         spin_unlock(&peer->lp_lock);
444
445         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
446         while (lpni != NULL) {
447                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
448                 rc = lnet_peer_ni_del_locked(lpni, false);
449                 if (rc != 0)
450                         rc2 = rc;
451                 lpni = lpni2;
452         }
453
454         return rc2;
455 }
456
457 /*
458  * Discovering this peer is taking too long. Cancel any Ping or Push
459  * that discovery is waiting on by unlinking the relevant MDs. The
460  * lnet_discovery_event_handler() will proceed from here and complete
461  * the cleanup.
462  */
463 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
464 {
465         struct lnet_handle_md ping_mdh;
466         struct lnet_handle_md push_mdh;
467
468         LNetInvalidateMDHandle(&ping_mdh);
469         LNetInvalidateMDHandle(&push_mdh);
470
471         spin_lock(&lp->lp_lock);
472         if (lp->lp_state & LNET_PEER_PING_SENT) {
473                 ping_mdh = lp->lp_ping_mdh;
474                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
475         }
476         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
477                 push_mdh = lp->lp_push_mdh;
478                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
479         }
480         spin_unlock(&lp->lp_lock);
481
482         if (!LNetMDHandleIsInvalid(ping_mdh))
483                 LNetMDUnlink(ping_mdh);
484         if (!LNetMDHandleIsInvalid(push_mdh))
485                 LNetMDUnlink(push_mdh);
486 }
487
488 static int
489 lnet_peer_del(struct lnet_peer *peer)
490 {
491         int rc;
492
493         lnet_peer_cancel_discovery(peer);
494         lnet_net_lock(LNET_LOCK_EX);
495         rc = lnet_peer_del_locked(peer);
496         lnet_net_unlock(LNET_LOCK_EX);
497
498         return rc;
499 }
500
501 /*
502  * Delete a NID from a peer. Call with ln_api_mutex held.
503  *
504  * Error codes:
505  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
506  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
507  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
508  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
509  */
510 static int
511 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
512                   unsigned int flags)
513 {
514         struct lnet_peer_ni *lpni;
515         struct lnet_nid primary_nid = lp->lp_primary_nid;
516         int rc = 0;
517         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
518
519         if (!(flags & LNET_PEER_CONFIGURED)) {
520                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
521                         rc = -EPERM;
522                         goto out;
523                 }
524         }
525
526         /* If we're asked to lock down the primary NID we shouldn't be
527          * deleting it
528          */
529         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
530             nid_same(&primary_nid, nid)) {
531                 rc = -EPERM;
532                 goto out;
533         }
534
535         lpni = lnet_peer_ni_find_locked(nid);
536         if (!lpni) {
537                 rc = -ENOENT;
538                 goto out;
539         }
540         if (lp != lpni->lpni_peer_net->lpn_peer) {
541                 rc = -ECHILD;
542                 lnet_peer_ni_decref_locked(lpni);
543                 goto out;
544         }
545
546         /*
547          * This function only allows deletion of the primary NID if it
548          * is the only NID.
549          */
550         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
551                 rc = -EBUSY;
552                 lnet_peer_ni_decref_locked(lpni);
553                 goto out;
554         }
555
556         lnet_net_lock(LNET_LOCK_EX);
557
558         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
559                 struct lnet_peer_ni *lpni2;
560                 /* assign the next peer_ni to be the primary */
561                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
562                 LASSERT(lpni2);
563                 lp->lp_primary_nid = lpni2->lpni_nid;
564         }
565         rc = lnet_peer_ni_del_locked(lpni, force);
566         lnet_peer_ni_decref_locked(lpni);
567
568         lnet_net_unlock(LNET_LOCK_EX);
569
570 out:
571         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
572                libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
573                flags, rc);
574
575         return rc;
576 }
577
578 static void
579 lnet_peer_table_cleanup_locked(struct lnet_net *net,
580                                struct lnet_peer_table *ptable)
581 {
582         int                      i;
583         struct lnet_peer_ni     *next;
584         struct lnet_peer_ni     *lpni;
585         struct lnet_peer        *peer;
586
587         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
588                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
589                                          lpni_hashlist) {
590                         if (net != NULL && net != lpni->lpni_net)
591                                 continue;
592
593                         peer = lpni->lpni_peer_net->lpn_peer;
594                         if (!nid_same(&peer->lp_primary_nid,
595                                        &lpni->lpni_nid)) {
596                                 lnet_peer_ni_del_locked(lpni, false);
597                                 continue;
598                         }
599                         /*
600                          * Removing the primary NID implies removing
601                          * the entire peer. Advance next beyond any
602                          * peer_ni that belongs to the same peer.
603                          */
604                         list_for_each_entry_from(next, &ptable->pt_hash[i],
605                                                  lpni_hashlist) {
606                                 if (next->lpni_peer_net->lpn_peer != peer)
607                                         break;
608                         }
609                         lnet_peer_del_locked(peer);
610                 }
611         }
612 }
613
614 static void
615 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
616 {
617         wait_var_event_warning(&ptable->pt_zombies,
618                                ptable->pt_zombies == 0,
619                                "Waiting for %d zombies on peer table\n",
620                                ptable->pt_zombies);
621 }
622
623 static void
624 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
625                                 struct lnet_peer_table *ptable)
626 {
627         struct lnet_peer_ni     *lp;
628         struct lnet_peer_ni     *tmp;
629         struct lnet_nid         gw_nid;
630         int                     i;
631
632         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
633                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
634                                          lpni_hashlist) {
635                         if (net != lp->lpni_net)
636                                 continue;
637
638                         if (!lnet_isrouter(lp))
639                                 continue;
640
641                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
642
643                         lnet_net_unlock(LNET_LOCK_EX);
644                         lnet_del_route(LNET_NET_ANY, &gw_nid);
645                         lnet_net_lock(LNET_LOCK_EX);
646                 }
647         }
648 }
649
650 void
651 lnet_peer_tables_cleanup(struct lnet_net *net)
652 {
653         int i;
654         struct lnet_peer_table *ptable;
655
656         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
657         /* If just deleting the peers for a NI, get rid of any routes these
658          * peers are gateways for. */
659         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
660                 lnet_net_lock(LNET_LOCK_EX);
661                 lnet_peer_table_del_rtrs_locked(net, ptable);
662                 lnet_net_unlock(LNET_LOCK_EX);
663         }
664
665         /* Start the cleanup process */
666         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
667                 lnet_net_lock(LNET_LOCK_EX);
668                 lnet_peer_table_cleanup_locked(net, ptable);
669                 lnet_net_unlock(LNET_LOCK_EX);
670         }
671
672         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
673                 lnet_peer_ni_finalize_wait(ptable);
674 }
675
676 static struct lnet_peer_ni *
677 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
678 {
679         struct list_head        *peers;
680         struct lnet_peer_ni     *lp;
681
682         if (the_lnet.ln_state != LNET_STATE_RUNNING)
683                 return NULL;
684
685         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
686         list_for_each_entry(lp, peers, lpni_hashlist) {
687                 if (nid_same(&lp->lpni_nid, nid)) {
688                         lnet_peer_ni_addref_locked(lp);
689                         return lp;
690                 }
691         }
692
693         return NULL;
694 }
695
696 struct lnet_peer_ni *
697 lnet_peer_ni_find_locked(struct lnet_nid *nid)
698 {
699         struct lnet_peer_ni *lpni;
700         struct lnet_peer_table *ptable;
701         int cpt;
702
703         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
704
705         ptable = the_lnet.ln_peer_tables[cpt];
706         lpni = lnet_get_peer_ni_locked(ptable, nid);
707
708         return lpni;
709 }
710
711 struct lnet_peer_ni *
712 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
713 {
714         struct lnet_peer_net *lpn;
715         struct lnet_peer_ni *lpni;
716
717         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
718         if (!lpn)
719                 return NULL;
720
721         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
722                 if (nid_same(&lpni->lpni_nid, nid))
723                         return lpni;
724         }
725
726         return NULL;
727 }
728
729 struct lnet_peer *
730 lnet_find_peer(struct lnet_nid *nid)
731 {
732         struct lnet_peer_ni *lpni;
733         struct lnet_peer *lp = NULL;
734         int cpt;
735
736         cpt = lnet_net_lock_current();
737         lpni = lnet_peer_ni_find_locked(nid);
738         if (lpni) {
739                 lp = lpni->lpni_peer_net->lpn_peer;
740                 lnet_peer_addref_locked(lp);
741                 lnet_peer_ni_decref_locked(lpni);
742         }
743         lnet_net_unlock(cpt);
744
745         return lp;
746 }
747
748 struct lnet_peer_net *
749 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
750 {
751         struct lnet_peer_net *net;
752
753         if (!prev_lpn_id) {
754                 /* no net id provided return the first net */
755                 net = list_first_entry_or_null(&lp->lp_peer_nets,
756                                                struct lnet_peer_net,
757                                                lpn_peer_nets);
758
759                 return net;
760         }
761
762         /* find the net after the one provided */
763         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
764                 if (net->lpn_net_id == prev_lpn_id) {
765                         /*
766                          * if we reached the end of the list loop to the
767                          * beginning.
768                          */
769                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
770                                 return list_first_entry_or_null(&lp->lp_peer_nets,
771                                                                 struct lnet_peer_net,
772                                                                 lpn_peer_nets);
773                         else
774                                 return list_next_entry(net, lpn_peer_nets);
775                 }
776         }
777
778         return NULL;
779 }
780
781 struct lnet_peer_ni *
782 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
783                              struct lnet_peer_net *peer_net,
784                              struct lnet_peer_ni *prev)
785 {
786         struct lnet_peer_ni *lpni;
787         struct lnet_peer_net *net = peer_net;
788
789         if (!prev) {
790                 if (!net) {
791                         if (list_empty(&peer->lp_peer_nets))
792                                 return NULL;
793
794                         net = list_first_entry(&peer->lp_peer_nets,
795                                                struct lnet_peer_net,
796                                                lpn_peer_nets);
797                 }
798                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
799                                         lpni_peer_nis);
800
801                 return lpni;
802         }
803
804         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
805                 /*
806                  * if you reached the end of the peer ni list and the peer
807                  * net is specified then there are no more peer nis in that
808                  * net.
809                  */
810                 if (net)
811                         return NULL;
812
813                 /*
814                  * we reached the end of this net ni list. move to the
815                  * next net
816                  */
817                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
818                     &peer->lp_peer_nets)
819                         /* no more nets and no more NIs. */
820                         return NULL;
821
822                 /* get the next net */
823                 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
824                                        struct lnet_peer_net,
825                                        lpn_peer_nets);
826                 /* get the ni on it */
827                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
828                                         lpni_peer_nis);
829
830                 return lpni;
831         }
832
833         /* there are more nis left */
834         lpni = list_first_entry(&prev->lpni_peer_nis,
835                                 struct lnet_peer_ni, lpni_peer_nis);
836
837         return lpni;
838 }
839
840 /* Call with the ln_api_mutex held */
841 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
842 {
843         struct lnet_process_id id;
844         struct lnet_peer_table *ptable;
845         struct lnet_peer *lp;
846         __u32 count = 0;
847         __u32 size = 0;
848         int lncpt;
849         int cpt;
850         __u32 i;
851         int rc;
852
853         rc = -ESHUTDOWN;
854         if (the_lnet.ln_state != LNET_STATE_RUNNING)
855                 goto done;
856
857         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
858
859         /*
860          * Count the number of peers, and return E2BIG if the buffer
861          * is too small. We'll also return the desired size.
862          */
863         rc = -E2BIG;
864         for (cpt = 0; cpt < lncpt; cpt++) {
865                 ptable = the_lnet.ln_peer_tables[cpt];
866                 count += ptable->pt_peers;
867         }
868         size = count * sizeof(*ids);
869         if (size > *sizep)
870                 goto done;
871
872         /*
873          * Walk the peer lists and copy out the primary nids.
874          * This is safe because the peer lists are only modified
875          * while the ln_api_mutex is held. So we don't need to
876          * hold the lnet_net_lock as well, and can therefore
877          * directly call copy_to_user().
878          */
879         rc = -EFAULT;
880         memset(&id, 0, sizeof(id));
881         id.pid = LNET_PID_LUSTRE;
882         i = 0;
883         for (cpt = 0; cpt < lncpt; cpt++) {
884                 ptable = the_lnet.ln_peer_tables[cpt];
885                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
886                         if (!nid_is_nid4(&lp->lp_primary_nid))
887                                 continue;
888                         if (i >= count)
889                                 goto done;
890                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
891                         if (copy_to_user(&ids[i], &id, sizeof(id)))
892                                 goto done;
893                         i++;
894                 }
895         }
896         rc = 0;
897 done:
898         *countp = count;
899         *sizep = size;
900         return rc;
901 }
902
903 /*
904  * Start pushes to peers that need to be updated for a configuration
905  * change on this node.
906  */
907 void
908 lnet_push_update_to_peers(int force)
909 {
910         struct lnet_peer_table *ptable;
911         struct lnet_peer *lp;
912         int lncpt;
913         int cpt;
914
915         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
916                 return;
917
918         lnet_net_lock(LNET_LOCK_EX);
919         if (lnet_peer_discovery_disabled)
920                 force = 0;
921         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
922         for (cpt = 0; cpt < lncpt; cpt++) {
923                 ptable = the_lnet.ln_peer_tables[cpt];
924                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
925                         if (force) {
926                                 spin_lock(&lp->lp_lock);
927                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
928                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
929                                 spin_unlock(&lp->lp_lock);
930                         }
931                         if (lnet_peer_needs_push(lp))
932                                 lnet_peer_queue_for_discovery(lp);
933                 }
934         }
935         lnet_net_unlock(LNET_LOCK_EX);
936         wake_up(&the_lnet.ln_dc_waitq);
937 }
938
939 /* find the NID in the preferred gateways for the remote peer
940  * return:
941  *      false: list is not empty and NID is not preferred
942  *      false: list is empty
943  *      true: nid is found in the list
944  */
945 bool
946 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
947                              struct lnet_nid *gw_nid)
948 {
949         struct lnet_nid_list *ne;
950
951         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
952                libcfs_nidstr(&lpni->lpni_nid),
953                list_empty(&lpni->lpni_rtr_pref_nids));
954
955         if (list_empty(&lpni->lpni_rtr_pref_nids))
956                 return false;
957
958         /* iterate through all the preferred NIDs and see if any of them
959          * matches the provided gw_nid
960          */
961         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
962                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
963                        libcfs_nidstr(&ne->nl_nid),
964                        libcfs_nidstr(gw_nid));
965                 if (nid_same(&ne->nl_nid, gw_nid))
966                         return true;
967         }
968
969         return false;
970 }
971
972 void
973 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
974 {
975         struct list_head zombies;
976         struct lnet_nid_list *ne;
977         struct lnet_nid_list *tmp;
978         int cpt = lpni->lpni_cpt;
979
980         INIT_LIST_HEAD(&zombies);
981
982         lnet_net_lock(cpt);
983         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
984         lnet_net_unlock(cpt);
985
986         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
987                 list_del(&ne->nl_list);
988                 LIBCFS_FREE(ne, sizeof(*ne));
989         }
990 }
991
992 int
993 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
994                        struct lnet_nid *gw_nid)
995 {
996         int cpt = lpni->lpni_cpt;
997         struct lnet_nid_list *ne = NULL;
998
999         /* This function is called with api_mutex held. When the api_mutex
1000          * is held the list can not be modified, as it is only modified as
1001          * a result of applying a UDSP and that happens under api_mutex
1002          * lock.
1003          */
1004         __must_hold(&the_lnet.ln_api_mutex);
1005
1006         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1007                 if (nid_same(&ne->nl_nid, gw_nid))
1008                         return -EEXIST;
1009         }
1010
1011         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1012         if (!ne)
1013                 return -ENOMEM;
1014
1015         ne->nl_nid = *gw_nid;
1016
1017         /* Lock the cpt to protect against addition and checks in the
1018          * selection algorithm
1019          */
1020         lnet_net_lock(cpt);
1021         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1022         lnet_net_unlock(cpt);
1023
1024         return 0;
1025 }
1026
1027 /*
1028  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1029  * this is a preferred point-to-point path. Call with lnet_net_lock in
1030  * shared mmode.
1031  */
1032 bool
1033 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1034 {
1035         struct lnet_nid_list *ne;
1036
1037         if (lpni->lpni_pref_nnids == 0)
1038                 return false;
1039         if (lpni->lpni_pref_nnids == 1)
1040                 return nid_same(&lpni->lpni_pref.nid, nid);
1041         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1042                 if (nid_same(&ne->nl_nid, nid))
1043                         return true;
1044         }
1045         return false;
1046 }
1047
1048 /*
1049  * Set a single ni as preferred, provided no preferred ni is already
1050  * defined. Only to be used for non-multi-rail peer_ni.
1051  */
1052 int
1053 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1054                                   struct lnet_nid *nid)
1055 {
1056         int rc = 0;
1057
1058         if (!nid)
1059                 return -EINVAL;
1060         spin_lock(&lpni->lpni_lock);
1061         if (LNET_NID_IS_ANY(nid)) {
1062                 rc = -EINVAL;
1063         } else if (lpni->lpni_pref_nnids > 0) {
1064                 rc = -EPERM;
1065         } else if (lpni->lpni_pref_nnids == 0) {
1066                 lpni->lpni_pref.nid = *nid;
1067                 lpni->lpni_pref_nnids = 1;
1068                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1069         }
1070         spin_unlock(&lpni->lpni_lock);
1071
1072         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1073                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1074         return rc;
1075 }
1076
1077 /*
1078  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1079  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1080  */
1081 static int
1082 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1083 {
1084         int rc = 0;
1085
1086         spin_lock(&lpni->lpni_lock);
1087         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1088                 lpni->lpni_pref_nnids = 0;
1089                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1090         } else if (lpni->lpni_pref_nnids == 0) {
1091                 rc = -ENOENT;
1092         } else {
1093                 rc = -EPERM;
1094         }
1095         spin_unlock(&lpni->lpni_lock);
1096
1097         CDEBUG(D_NET, "peer %s: %d\n",
1098                libcfs_nidstr(&lpni->lpni_nid), rc);
1099         return rc;
1100 }
1101
1102 void
1103 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1104 {
1105         lpni->lpni_sel_priority = priority;
1106 }
1107
1108 /*
1109  * Clear the preferred NIDs from a non-multi-rail peer.
1110  */
1111 static void
1112 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1113 {
1114         struct lnet_peer_ni *lpni = NULL;
1115
1116         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1117                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1118 }
1119
1120 int
1121 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1122 {
1123         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1124         struct lnet_nid_list *ne1 = NULL;
1125         struct lnet_nid_list *ne2 = NULL;
1126         struct lnet_nid *tmp_nid = NULL;
1127         int rc = 0;
1128
1129         if (LNET_NID_IS_ANY(nid)) {
1130                 rc = -EINVAL;
1131                 goto out;
1132         }
1133
1134         if (lpni->lpni_pref_nnids == 1 &&
1135             nid_same(&lpni->lpni_pref.nid, nid)) {
1136                 rc = -EEXIST;
1137                 goto out;
1138         }
1139
1140         /* A non-MR node may have only one preferred NI per peer_ni */
1141         if (lpni->lpni_pref_nnids > 0 &&
1142             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1143                 rc = -EPERM;
1144                 goto out;
1145         }
1146
1147         /* add the new preferred nid to the list of preferred nids */
1148         if (lpni->lpni_pref_nnids != 0) {
1149                 size_t alloc_size = sizeof(*ne1);
1150
1151                 if (lpni->lpni_pref_nnids == 1) {
1152                         tmp_nid = &lpni->lpni_pref.nid;
1153                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1154                 }
1155
1156                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1157                         if (nid_same(&ne1->nl_nid, nid)) {
1158                                 rc = -EEXIST;
1159                                 goto out;
1160                         }
1161                 }
1162
1163                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1164                                  alloc_size);
1165                 if (!ne1) {
1166                         rc = -ENOMEM;
1167                         goto out;
1168                 }
1169
1170                 /* move the originally stored nid to the list */
1171                 if (lpni->lpni_pref_nnids == 1) {
1172                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1173                                 lpni->lpni_cpt, alloc_size);
1174                         if (!ne2) {
1175                                 rc = -ENOMEM;
1176                                 goto out;
1177                         }
1178                         INIT_LIST_HEAD(&ne2->nl_list);
1179                         ne2->nl_nid = *tmp_nid;
1180                 }
1181                 ne1->nl_nid = *nid;
1182         }
1183
1184         lnet_net_lock(LNET_LOCK_EX);
1185         spin_lock(&lpni->lpni_lock);
1186         if (lpni->lpni_pref_nnids == 0) {
1187                 lpni->lpni_pref.nid = *nid;
1188         } else {
1189                 if (ne2)
1190                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1191                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1192         }
1193         lpni->lpni_pref_nnids++;
1194         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1195         spin_unlock(&lpni->lpni_lock);
1196         lnet_net_unlock(LNET_LOCK_EX);
1197
1198 out:
1199         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1200                 spin_lock(&lpni->lpni_lock);
1201                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1202                 spin_unlock(&lpni->lpni_lock);
1203         }
1204         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1205                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1206         return rc;
1207 }
1208
1209 int
1210 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1211 {
1212         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1213         struct lnet_nid_list *ne = NULL;
1214         int rc = 0;
1215
1216         if (lpni->lpni_pref_nnids == 0) {
1217                 rc = -ENOENT;
1218                 goto out;
1219         }
1220
1221         if (lpni->lpni_pref_nnids == 1) {
1222                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1223                         rc = -ENOENT;
1224                         goto out;
1225                 }
1226         } else {
1227                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1228                         if (nid_same(&ne->nl_nid, nid))
1229                                 goto remove_nid_entry;
1230                 }
1231                 rc = -ENOENT;
1232                 ne = NULL;
1233                 goto out;
1234         }
1235
1236 remove_nid_entry:
1237         lnet_net_lock(LNET_LOCK_EX);
1238         spin_lock(&lpni->lpni_lock);
1239         if (lpni->lpni_pref_nnids == 1)
1240                 lpni->lpni_pref.nid = LNET_ANY_NID;
1241         else {
1242                 list_del_init(&ne->nl_list);
1243                 if (lpni->lpni_pref_nnids == 2) {
1244                         struct lnet_nid_list *ne, *tmp;
1245
1246                         list_for_each_entry_safe(ne, tmp,
1247                                                  &lpni->lpni_pref.nids,
1248                                                  nl_list) {
1249                                 lpni->lpni_pref.nid = ne->nl_nid;
1250                                 list_del_init(&ne->nl_list);
1251                                 LIBCFS_FREE(ne, sizeof(*ne));
1252                         }
1253                 }
1254         }
1255         lpni->lpni_pref_nnids--;
1256         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1257         spin_unlock(&lpni->lpni_lock);
1258         lnet_net_unlock(LNET_LOCK_EX);
1259
1260         if (ne)
1261                 LIBCFS_FREE(ne, sizeof(*ne));
1262 out:
1263         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1264                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1265         return rc;
1266 }
1267
1268 void
1269 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1270 {
1271         struct list_head zombies;
1272         struct lnet_nid_list *ne;
1273         struct lnet_nid_list *tmp;
1274
1275         INIT_LIST_HEAD(&zombies);
1276
1277         lnet_net_lock(LNET_LOCK_EX);
1278         if (lpni->lpni_pref_nnids == 1)
1279                 lpni->lpni_pref.nid = LNET_ANY_NID;
1280         else if (lpni->lpni_pref_nnids > 1)
1281                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1282         lpni->lpni_pref_nnids = 0;
1283         lnet_net_unlock(LNET_LOCK_EX);
1284
1285         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1286                 list_del_init(&ne->nl_list);
1287                 LIBCFS_FREE(ne, sizeof(*ne));
1288         }
1289 }
1290
1291 void
1292 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1293 {
1294         struct lnet_peer_ni *lpni;
1295
1296         *result = *nid;
1297         lpni = lnet_peer_ni_find_locked(nid);
1298         if (lpni) {
1299                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1300                 lnet_peer_ni_decref_locked(lpni);
1301         }
1302 }
1303
1304 bool
1305 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1306 __must_hold(&lp->lp_lock)
1307 {
1308         if (lnet_peer_discovery_disabled)
1309                 return true;
1310
1311         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1312             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1313                 return true;
1314         }
1315
1316         return false;
1317 }
1318
1319 /*
1320  * Peer Discovery
1321  */
1322 bool
1323 lnet_is_discovery_disabled(struct lnet_peer *lp)
1324 {
1325         bool rc = false;
1326
1327         spin_lock(&lp->lp_lock);
1328         rc = lnet_is_discovery_disabled_locked(lp);
1329         spin_unlock(&lp->lp_lock);
1330
1331         return rc;
1332 }
1333
1334 int
1335 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1336 {
1337         struct lnet_nid pnid = LNET_ANY_NID;
1338         bool mr;
1339         int i, rc;
1340         int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1341
1342         if (!nids || num_nids < 1)
1343                 return -EINVAL;
1344
1345         rc = LNetNIInit(LNET_PID_ANY);
1346         if (rc < 0)
1347                 return rc;
1348
1349         mutex_lock(&the_lnet.ln_api_mutex);
1350
1351         mr = lnet_peer_discovery_disabled == 0;
1352
1353         rc = 0;
1354         for (i = 0; i < num_nids; i++) {
1355                 if (nid_is_lo0(&nids[i]))
1356                         continue;
1357
1358                 if (LNET_NID_IS_ANY(&pnid)) {
1359                         pnid = nids[i];
1360                         rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1361                         if (rc == -EALREADY) {
1362                                 struct lnet_peer *lp;
1363
1364                                 CDEBUG(D_NET, "A peer exists for NID %s\n",
1365                                        libcfs_nidstr(&pnid));
1366                                 rc = 0;
1367                                 /* Adds a refcount */
1368                                 lp = lnet_find_peer(&pnid);
1369                                 LASSERT(lp);
1370                                 pnid = lp->lp_primary_nid;
1371                                 /* Drop refcount from lookup */
1372                                 lnet_peer_decref_locked(lp);
1373                         }
1374                 } else if (lnet_peer_discovery_disabled) {
1375                         rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1376                                               flags);
1377                 } else {
1378                         rc = lnet_add_peer_ni(&pnid, &nids[i], mr,
1379                                               flags);
1380                 }
1381
1382                 if (rc && rc != -EEXIST)
1383                         goto unlock;
1384         }
1385
1386 unlock:
1387         mutex_unlock(&the_lnet.ln_api_mutex);
1388
1389         LNetNIFini();
1390
1391         return rc == -EEXIST ? 0 : rc;
1392 }
1393 EXPORT_SYMBOL(LNetAddPeer);
1394
1395 void LNetPrimaryNID(struct lnet_nid *nid)
1396 {
1397         struct lnet_peer *lp;
1398         struct lnet_peer_ni *lpni;
1399         struct lnet_nid orig;
1400         int rc = 0;
1401         int cpt;
1402
1403         if (!nid || nid_is_lo0(nid))
1404                 return;
1405         orig = *nid;
1406
1407         cpt = lnet_net_lock_current();
1408         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1409         if (IS_ERR(lpni)) {
1410                 rc = PTR_ERR(lpni);
1411                 goto out_unlock;
1412         }
1413         lp = lpni->lpni_peer_net->lpn_peer;
1414
1415         /* If discovery is disabled locally then we needn't bother running
1416          * discovery here because discovery will not modify whatever
1417          * primary NID is currently set for this peer. If the specified peer is
1418          * down then this discovery can introduce long delays into the mount
1419          * process, so skip it if it isn't necessary.
1420          */
1421 again:
1422         spin_lock(&lp->lp_lock);
1423         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY) && lock_prim_nid) {
1424                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1425                 lp->lp_prim_lock_ts = ktime_get_ns();
1426         }
1427
1428         /* DD disabled, nothing to do */
1429         if (lnet_peer_discovery_disabled) {
1430                 *nid = lp->lp_primary_nid;
1431                 spin_unlock(&lp->lp_lock);
1432                 goto out_decref;
1433         }
1434
1435         /* Peer already up to date, nothing to do */
1436         if (lnet_peer_is_uptodate_locked(lp)) {
1437                 *nid = lp->lp_primary_nid;
1438                 spin_unlock(&lp->lp_lock);
1439                 goto out_decref;
1440         }
1441         spin_unlock(&lp->lp_lock);
1442
1443         /* If primary nid locking is enabled, discovery is performed
1444          * in the background.
1445          * If primary nid locking is disabled, discovery blocks here.
1446          * Messages to the peer will not go through until the discovery is
1447          * complete.
1448          */
1449         if (lock_prim_nid)
1450                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1451         else
1452                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1453         if (rc)
1454                 goto out_decref;
1455
1456         /* The lpni (or lp) for this NID may have changed and our ref is
1457          * the only thing keeping the old one around. Release the ref
1458          * and lookup the lpni again
1459          */
1460         lnet_peer_ni_decref_locked(lpni);
1461         lpni = lnet_peer_ni_find_locked(nid);
1462         if (!lpni) {
1463                 rc = -ENOENT;
1464                 goto out_unlock;
1465         }
1466         lp = lpni->lpni_peer_net->lpn_peer;
1467
1468         if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1469                 goto again;
1470         *nid = lp->lp_primary_nid;
1471 out_decref:
1472         lnet_peer_ni_decref_locked(lpni);
1473 out_unlock:
1474         lnet_net_unlock(cpt);
1475
1476         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1477                libcfs_nidstr(nid), rc);
1478 }
1479 EXPORT_SYMBOL(LNetPrimaryNID);
1480
1481 bool
1482 LNetPeerDiscovered(struct lnet_nid *nid)
1483 {
1484         int cpt, disc = false;
1485         struct lnet_peer *lp;
1486
1487         lp = lnet_find_peer(nid);
1488         if (!lp)
1489                 goto out;
1490
1491         cpt = lnet_net_lock_current();
1492         spin_lock(&lp->lp_lock);
1493         if (((lp->lp_state & LNET_PEER_DISCOVERED) &&
1494             (lp->lp_state & LNET_PEER_NIDS_UPTODATE)) ||
1495             (lp->lp_state & LNET_PEER_NO_DISCOVERY))
1496                 disc = true;
1497         spin_unlock(&lp->lp_lock);
1498
1499         /* Drop refcount from lookup */
1500         lnet_peer_decref_locked(lp);
1501         lnet_net_unlock(cpt);
1502 out:
1503         CDEBUG(D_NET, "Peer NID %s discovered: %d\n", libcfs_nidstr(nid),
1504                disc);
1505         return disc;
1506 }
1507 EXPORT_SYMBOL(LNetPeerDiscovered);
1508
1509 struct lnet_peer_net *
1510 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1511 {
1512         struct lnet_peer_net *peer_net;
1513         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1514                 if (peer_net->lpn_net_id == net_id)
1515                         return peer_net;
1516         }
1517         return NULL;
1518 }
1519
1520 /*
1521  * Attach a peer_ni to a peer_net and peer. This function assumes
1522  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1523  * may be attached to a different peer, in which case it will be
1524  * properly detached first. The whole operation is done atomically.
1525  *
1526  * This function consumes the reference on lpni and Always returns 0.
1527  * This is the last function called from functions that do return an
1528  * int, so returning 0 here allows the compiler to do a tail call.
1529  */
1530 static int
1531 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1532                          struct lnet_peer_net *lpn,
1533                          struct lnet_peer_ni *lpni,
1534                          unsigned flags)
1535 {
1536         struct lnet_peer_table *ptable;
1537         bool new_lpn = false;
1538         int rc;
1539
1540         /* Install the new peer_ni */
1541         lnet_net_lock(LNET_LOCK_EX);
1542         /* Add peer_ni to global peer table hash, if necessary. */
1543         if (list_empty(&lpni->lpni_hashlist)) {
1544                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1545
1546                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1547                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1548                 ptable->pt_version++;
1549                 lnet_peer_ni_addref_locked(lpni);
1550         }
1551
1552         /* Detach the peer_ni from an existing peer, if necessary. */
1553         if (lpni->lpni_peer_net) {
1554                 LASSERT(lpni->lpni_peer_net != lpn);
1555                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1556                 lnet_peer_detach_peer_ni_locked(lpni);
1557                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1558                 lpni->lpni_peer_net = NULL;
1559         }
1560
1561         /* Add peer_ni to peer_net */
1562         lpni->lpni_peer_net = lpn;
1563         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1564                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1565         else
1566                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1567         lnet_update_peer_net_healthv(lpni);
1568         lnet_peer_net_addref_locked(lpn);
1569
1570         /* Add peer_net to peer */
1571         if (!lpn->lpn_peer) {
1572                 new_lpn = true;
1573                 lpn->lpn_peer = lp;
1574                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1575                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1576                 else
1577                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1578                 lnet_peer_addref_locked(lp);
1579         }
1580
1581         /* Add peer to global peer list, if necessary */
1582         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1583         if (list_empty(&lp->lp_peer_list)) {
1584                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1585                 ptable->pt_peers++;
1586         }
1587
1588         /* Update peer state */
1589         spin_lock(&lp->lp_lock);
1590         if (flags & LNET_PEER_CONFIGURED) {
1591                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1592                         lp->lp_state |= LNET_PEER_CONFIGURED;
1593         }
1594         if (flags & LNET_PEER_MULTI_RAIL) {
1595                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1596                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1597                         lnet_peer_clr_non_mr_pref_nids(lp);
1598                 }
1599         }
1600         if (flags & LNET_PEER_LOCK_PRIMARY) {
1601                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1602                 lp->lp_prim_lock_ts = ktime_get_ns();
1603         }
1604         spin_unlock(&lp->lp_lock);
1605
1606         lp->lp_nnis++;
1607
1608         /* apply UDSPs */
1609         if (new_lpn) {
1610                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1611                 if (rc)
1612                         CERROR("Failed to apply UDSPs on lpn %s\n",
1613                                libcfs_net2str(lpn->lpn_net_id));
1614         }
1615         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1616         if (rc)
1617                 CERROR("Failed to apply UDSPs on lpni %s\n",
1618                        libcfs_nidstr(&lpni->lpni_nid));
1619
1620         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1621                libcfs_nidstr(&lp->lp_primary_nid),
1622                libcfs_nidstr(&lpni->lpni_nid), flags);
1623         lnet_peer_ni_decref_locked(lpni);
1624         lnet_net_unlock(LNET_LOCK_EX);
1625
1626         return 0;
1627 }
1628
1629 /*
1630  * Create a new peer, with nid as its primary nid.
1631  *
1632  * Call with the lnet_api_mutex held.
1633  */
1634 static int
1635 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1636 {
1637         struct lnet_peer *lp;
1638         struct lnet_peer_net *lpn;
1639         struct lnet_peer_ni *lpni;
1640         int rc = 0;
1641
1642         LASSERT(nid);
1643
1644         /*
1645          * No need for the lnet_net_lock here, because the
1646          * lnet_api_mutex is held.
1647          */
1648         lpni = lnet_peer_ni_find_locked(nid);
1649         if (lpni) {
1650                 /* A peer with this NID already exists. */
1651                 lp = lpni->lpni_peer_net->lpn_peer;
1652                 lnet_peer_ni_decref_locked(lpni);
1653                 /*
1654                  * This is an error if the peer was configured and the
1655                  * primary NID differs or an attempt is made to change
1656                  * the Multi-Rail flag. Otherwise the assumption is
1657                  * that an existing peer is being modified.
1658                  */
1659                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1660                         if (!nid_same(&lp->lp_primary_nid, nid))
1661                                 rc = -EEXIST;
1662                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1663                                 rc = -EPERM;
1664                         goto out;
1665                 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1666                         if (nid_same(&lp->lp_primary_nid, nid))
1667                                 rc = -EEXIST;
1668                         /* we're trying to recreate an existing peer which
1669                          * has already been created and its primary
1670                          * locked. This is likely due to two servers
1671                          * existing on the same node. So we'll just refer
1672                          * to that node with the primary NID which was
1673                          * first added by Lustre
1674                          */
1675                         else
1676                                 rc = -EALREADY;
1677                         goto out;
1678                 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1679                         /* if not recreating peer as configured and
1680                          * not locking primary nid, no need to
1681                          * do anything if primary nid is not being changed
1682                          */
1683                         if (nid_same(&lp->lp_primary_nid, nid)) {
1684                                 rc = -EEXIST;
1685                                 goto out;
1686                         }
1687                 }
1688                 /* Delete and recreate the peer.
1689                  * We can get here:
1690                  * 1. If the peer is being recreated as a configured NID
1691                  * 2. if there already exists a peer which
1692                  *    was discovered manually, but is recreated via Lustre
1693                  *    with PRIMARY_lock
1694                  */
1695                 rc = lnet_peer_del(lp);
1696                 if (rc)
1697                         goto out;
1698         }
1699
1700         /* Create peer, peer_net, and peer_ni. */
1701         rc = -ENOMEM;
1702         lp = lnet_peer_alloc(nid);
1703         if (!lp)
1704                 goto out;
1705         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1706         if (!lpn)
1707                 goto out_free_lp;
1708         lpni = lnet_peer_ni_alloc(nid);
1709         if (!lpni)
1710                 goto out_free_lpn;
1711
1712         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1713
1714 out_free_lpn:
1715         LIBCFS_FREE(lpn, sizeof(*lpn));
1716 out_free_lp:
1717         LIBCFS_FREE(lp, sizeof(*lp));
1718 out:
1719         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1720                libcfs_nidstr(nid), flags, rc);
1721         return rc;
1722 }
1723
1724 /*
1725  * Add a NID to a peer. Call with ln_api_mutex held.
1726  *
1727  * Error codes:
1728  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1729  *  -EEXIST:   The NID was configured by DLC for a different peer.
1730  *  -ENOMEM:   Out of memory.
1731  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1732  *             non-multi-rail peer.
1733  */
1734 static int
1735 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1736                   unsigned int flags)
1737 {
1738         struct lnet_peer_net *lpn;
1739         struct lnet_peer_ni *lpni;
1740         int rc = 0;
1741
1742         LASSERT(lp);
1743         LASSERT(nid);
1744
1745         /* A configured peer can only be updated through configuration. */
1746         if (!(flags & LNET_PEER_CONFIGURED)) {
1747                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1748                         rc = -EPERM;
1749                         goto out;
1750                 }
1751         }
1752
1753         /*
1754          * The MULTI_RAIL flag can be set but not cleared, because
1755          * that would leave the peer struct in an invalid state.
1756          */
1757         if (flags & LNET_PEER_MULTI_RAIL) {
1758                 spin_lock(&lp->lp_lock);
1759                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1760                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1761                         lnet_peer_clr_non_mr_pref_nids(lp);
1762                 }
1763                 spin_unlock(&lp->lp_lock);
1764         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1765                 rc = -EPERM;
1766                 goto out;
1767         }
1768
1769         lpni = lnet_peer_ni_find_locked(nid);
1770         if (lpni) {
1771                 /*
1772                  * A peer_ni already exists. This is only a problem if
1773                  * it is not connected to this peer and was configured
1774                  * by DLC.
1775                  */
1776                 if (lpni->lpni_peer_net->lpn_peer == lp)
1777                         goto out_free_lpni;
1778                 if (lnet_peer_ni_is_configured(lpni)) {
1779                         rc = -EEXIST;
1780                         goto out_free_lpni;
1781                 }
1782                 /* If this is the primary NID, destroy the peer. */
1783                 if (lnet_peer_ni_is_primary(lpni)) {
1784                         struct lnet_peer *lp2 =
1785                                 lpni->lpni_peer_net->lpn_peer;
1786                         int rtr_refcount = lp2->lp_rtr_refcount;
1787                         unsigned int peer2_state;
1788                         __u64 peer2_prim_lock_ts;
1789
1790                         /* If there's another peer that this NID belongs to
1791                          * and the primary NID for that peer is locked,
1792                          * then, unless it is the only NID, we don't want
1793                          * to mess with it.
1794                          * But the configuration is wrong at this point,
1795                          * so we should flag both of these peers as in a bad
1796                          * state
1797                          */
1798                         spin_lock(&lp2->lp_lock);
1799                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1800                             lp2->lp_nnis > 1) {
1801                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1802                                 spin_unlock(&lp2->lp_lock);
1803                                 spin_lock(&lp->lp_lock);
1804                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1805                                 spin_unlock(&lp->lp_lock);
1806                                 CERROR("Peer %s NID %s is already locked with peer %s\n",
1807                                         libcfs_nidstr(&lp->lp_primary_nid),
1808                                         libcfs_nidstr(nid),
1809                                         libcfs_nidstr(&lp2->lp_primary_nid));
1810                                 goto out_free_lpni;
1811                         }
1812                         peer2_state = lp2->lp_state;
1813                         peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1814                         spin_unlock(&lp2->lp_lock);
1815
1816                         /* NID which got locked the earliest should be
1817                          * kept as primary. In case if the peers were
1818                          * created by Lustre, this allows the
1819                          * first listed NID to stay primary as intended
1820                          * for the purpose of communicating with Lustre
1821                          * even if peer discovery succeeded using
1822                          * a different NID of MR peer.
1823                          */
1824                         spin_lock(&lp->lp_lock);
1825                         if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1826                             ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1827                             peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1828                              !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1829                                 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1830                                 lp->lp_primary_nid = *nid;
1831                                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1832                         }
1833                         spin_unlock(&lp->lp_lock);
1834                         /*
1835                          * if we're trying to delete a router it means
1836                          * we're moving this peer NI to a new peer so must
1837                          * transfer router properties to the new peer
1838                          */
1839                         if (rtr_refcount > 0) {
1840                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1841                                 lnet_rtr_transfer_to_peer(lp2, lp);
1842                         }
1843                         lnet_peer_del(lp2);
1844                         lnet_peer_ni_decref_locked(lpni);
1845                         lpni = lnet_peer_ni_alloc(nid);
1846                         if (!lpni) {
1847                                 rc = -ENOMEM;
1848                                 goto out_free_lpni;
1849                         }
1850                 }
1851         } else {
1852                 lpni = lnet_peer_ni_alloc(nid);
1853                 if (!lpni) {
1854                         rc = -ENOMEM;
1855                         goto out_free_lpni;
1856                 }
1857         }
1858
1859         /*
1860          * Get the peer_net. Check that we're not adding a second
1861          * peer_ni on a peer_net of a non-multi-rail peer.
1862          */
1863         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1864         if (!lpn) {
1865                 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1866                 if (!lpn) {
1867                         rc = -ENOMEM;
1868                         goto out_free_lpni;
1869                 }
1870         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1871                 rc = -ENOTUNIQ;
1872                 goto out_free_lpni;
1873         }
1874
1875         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1876
1877 out_free_lpni:
1878         lnet_peer_ni_decref_locked(lpni);
1879 out:
1880         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1881                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1882                flags, rc);
1883         return rc;
1884 }
1885
1886 /*
1887  * Update the primary NID of a peer, if possible.
1888  *
1889  * Call with the lnet_api_mutex held.
1890  */
1891 static int
1892 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1893                           unsigned int flags)
1894 {
1895         struct lnet_nid old = lp->lp_primary_nid;
1896         int rc = 0;
1897
1898         if (nid_same(&lp->lp_primary_nid, nid))
1899                 goto out;
1900
1901         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1902                 lp->lp_primary_nid = *nid;
1903
1904         rc = lnet_peer_add_nid(lp, nid, flags);
1905         if (rc) {
1906                 lp->lp_primary_nid = old;
1907                 goto out;
1908         }
1909 out:
1910         /* if this is a configured peer or the primary for that peer has
1911          * been locked, then we don't want to flag this scenario as
1912          * a failure
1913          */
1914         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1915             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1916                 return 0;
1917
1918         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1919                libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1920
1921         return rc;
1922 }
1923
1924 /*
1925  * lpni creation initiated due to traffic either sending or receiving.
1926  * Callers must hold ln_api_mutex
1927  * Ref taken on lnet_peer_ni returned by this function
1928  */
1929 static struct lnet_peer_ni *
1930 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1931 __must_hold(&the_lnet.ln_api_mutex)
1932 {
1933         struct lnet_peer *lp = NULL;
1934         struct lnet_peer_net *lpn = NULL;
1935         struct lnet_peer_ni *lpni;
1936         unsigned flags = 0;
1937         int rc = 0;
1938
1939         if (LNET_NID_IS_ANY(nid)) {
1940                 rc = -EINVAL;
1941                 goto out_err;
1942         }
1943
1944         /* lnet_net_lock is not needed here because ln_api_lock is held */
1945         lpni = lnet_peer_ni_find_locked(nid);
1946         if (lpni) {
1947                 /*
1948                  * We must have raced with another thread. Since we
1949                  * know next to nothing about a peer_ni created by
1950                  * traffic, we just assume everything is ok and
1951                  * return.
1952                  */
1953                 goto out;
1954         }
1955
1956         /* Create peer, peer_net, and peer_ni. */
1957         rc = -ENOMEM;
1958         lp = lnet_peer_alloc(nid);
1959         if (!lp)
1960                 goto out_err;
1961         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1962         if (!lpn)
1963                 goto out_err;
1964         lpni = lnet_peer_ni_alloc(nid);
1965         if (!lpni)
1966                 goto out_err;
1967         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1968
1969         /* lnet_peer_attach_peer_ni() always returns 0 */
1970         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1971
1972         lnet_peer_ni_addref_locked(lpni);
1973
1974 out_err:
1975         if (rc) {
1976                 if (lpn)
1977                         LIBCFS_FREE(lpn, sizeof(*lpn));
1978                 if (lp)
1979                         LIBCFS_FREE(lp, sizeof(*lp));
1980                 lpni = ERR_PTR(rc);
1981         }
1982 out:
1983         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1984         return lpni;
1985 }
1986
1987 /*
1988  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1989  *
1990  * This API handles the following combinations:
1991  *   Create a peer with its primary NI if only the prim_nid is provided
1992  *   Add a NID to a peer identified by the prim_nid. The peer identified
1993  *   by the prim_nid must already exist.
1994  *   The peer being created may be non-MR.
1995  *
1996  * The caller must hold ln_api_mutex. This prevents the peer from
1997  * being created/modified/deleted by a different thread.
1998  */
1999 static int
2000 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
2001                  unsigned int flags)
2002 __must_hold(&the_lnet.ln_api_mutex)
2003 {
2004         struct lnet_peer *lp = NULL;
2005         struct lnet_peer_ni *lpni;
2006
2007         /* The prim_nid must always be specified */
2008         if (LNET_NID_IS_ANY(prim_nid))
2009                 return -EINVAL;
2010
2011         if (mr)
2012                 flags |= LNET_PEER_MULTI_RAIL;
2013
2014         /*
2015          * If nid isn't specified, we must create a new peer with
2016          * prim_nid as its primary nid.
2017          */
2018         if (LNET_NID_IS_ANY(nid))
2019                 return lnet_peer_add(prim_nid, flags);
2020
2021         /* Look up the prim_nid, which must exist. */
2022         lpni = lnet_peer_ni_find_locked(prim_nid);
2023         if (!lpni)
2024                 return -ENOENT;
2025         lp = lpni->lpni_peer_net->lpn_peer;
2026         lnet_peer_ni_decref_locked(lpni);
2027
2028         /* Peer must have been configured. */
2029         if ((flags & LNET_PEER_CONFIGURED) &&
2030             !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2031                 CDEBUG(D_NET, "peer %s was not configured\n",
2032                        libcfs_nidstr(prim_nid));
2033                 return -ENOENT;
2034         }
2035
2036         /* Primary NID must match */
2037         if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2038                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2039                        libcfs_nidstr(prim_nid),
2040                        libcfs_nidstr(&lp->lp_primary_nid));
2041                 return -ENODEV;
2042         }
2043
2044         /* Multi-Rail flag must match. */
2045         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2046                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2047                        libcfs_nidstr(prim_nid));
2048                 return -EPERM;
2049         }
2050
2051         if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2052                 CDEBUG(D_NET,
2053                        "Don't add temporary peer NI for uptodate peer %s\n",
2054                        libcfs_nidstr(&lp->lp_primary_nid));
2055                 return -EINVAL;
2056         }
2057
2058         return lnet_peer_add_nid(lp, nid, flags);
2059 }
2060
2061 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2062                           bool mr, bool lock_prim)
2063 {
2064         int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2065
2066         return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2067 }
2068
2069 static int
2070 lnet_reset_peer(struct lnet_peer *lp)
2071 {
2072         struct lnet_peer_net *lpn, *lpntmp;
2073         struct lnet_peer_ni *lpni, *lpnitmp;
2074         unsigned int flags;
2075         int rc;
2076
2077         lnet_peer_cancel_discovery(lp);
2078
2079         flags = LNET_PEER_CONFIGURED;
2080         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2081                 flags |= LNET_PEER_MULTI_RAIL;
2082
2083         list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2084                 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2085                                          lpni_peer_nis) {
2086                         if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2087                                 continue;
2088
2089                         rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2090                         if (rc) {
2091                                 CERROR("Failed to delete %s from peer %s\n",
2092                                        libcfs_nidstr(&lpni->lpni_nid),
2093                                        libcfs_nidstr(&lp->lp_primary_nid));
2094                         }
2095                 }
2096         }
2097
2098         /* mark it for discovery the next time we use it */
2099         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2100         return 0;
2101 }
2102
2103 /*
2104  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2105  *
2106  * This API handles the following combinations:
2107  *   Delete a NI from a peer if both prim_nid and nid are provided.
2108  *   Delete a peer if only prim_nid is provided.
2109  *   Delete a peer if its primary nid is provided.
2110  *
2111  * The caller must hold ln_api_mutex. This prevents the peer from
2112  * being modified/deleted by a different thread.
2113  */
2114 int
2115 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2116                  int force)
2117 {
2118         struct lnet_peer *lp;
2119         struct lnet_peer_ni *lpni;
2120         unsigned int flags;
2121
2122         if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2123                 return -EINVAL;
2124
2125         lpni = lnet_peer_ni_find_locked(prim_nid);
2126         if (!lpni)
2127                 return -ENOENT;
2128         lp = lpni->lpni_peer_net->lpn_peer;
2129         lnet_peer_ni_decref_locked(lpni);
2130
2131         if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2132                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2133                        libcfs_nidstr(prim_nid),
2134                        libcfs_nidstr(&lp->lp_primary_nid));
2135                 return -ENODEV;
2136         }
2137
2138         lnet_net_lock(LNET_LOCK_EX);
2139         if (lp->lp_rtr_refcount > 0) {
2140                 lnet_net_unlock(LNET_LOCK_EX);
2141                 CERROR("%s is a router. Can not be deleted\n",
2142                        libcfs_nidstr(prim_nid));
2143                 return -EBUSY;
2144         }
2145         lnet_net_unlock(LNET_LOCK_EX);
2146
2147         if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2148                 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2149                         CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2150                                libcfs_nidstr(&lp->lp_primary_nid));
2151                         return lnet_reset_peer(lp);
2152                 } else {
2153                         return lnet_peer_del(lp);
2154                 }
2155         }
2156
2157         flags = LNET_PEER_CONFIGURED;
2158         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2159                 flags |= LNET_PEER_MULTI_RAIL;
2160
2161         return lnet_peer_del_nid(lp, nid, flags);
2162 }
2163
2164 void
2165 lnet_destroy_peer_ni_locked(struct kref *ref)
2166 {
2167         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2168                                                  lpni_kref);
2169         struct lnet_peer_table *ptable;
2170         struct lnet_peer_net *lpn;
2171
2172         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2173
2174         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2175         LASSERT(list_empty(&lpni->lpni_txq));
2176         LASSERT(lpni->lpni_txqnob == 0);
2177         LASSERT(list_empty(&lpni->lpni_peer_nis));
2178         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2179
2180         lpn = lpni->lpni_peer_net;
2181         lpni->lpni_peer_net = NULL;
2182         lpni->lpni_net = NULL;
2183
2184         if (!list_empty(&lpni->lpni_hashlist)) {
2185                 /* remove the peer ni from the zombie list */
2186                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2187                 spin_lock(&ptable->pt_zombie_lock);
2188                 list_del_init(&lpni->lpni_hashlist);
2189                 ptable->pt_zombies--;
2190                 spin_unlock(&ptable->pt_zombie_lock);
2191         }
2192
2193         if (lpni->lpni_pref_nnids > 1) {
2194                 struct lnet_nid_list *ne, *tmp;
2195
2196                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2197                                          nl_list) {
2198                         list_del_init(&ne->nl_list);
2199                         LIBCFS_FREE(ne, sizeof(*ne));
2200                 }
2201         }
2202         LIBCFS_FREE(lpni, sizeof(*lpni));
2203
2204         if (lpn)
2205                 lnet_peer_net_decref_locked(lpn);
2206 }
2207
2208 struct lnet_peer_ni *
2209 lnet_nid2peerni_ex(struct lnet_nid *nid)
2210 __must_hold(&the_lnet.ln_api_mutex)
2211 {
2212         struct lnet_peer_ni *lpni = NULL;
2213
2214         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2215                 return ERR_PTR(-ESHUTDOWN);
2216
2217         /*
2218          * find if a peer_ni already exists.
2219          * If so then just return that.
2220          */
2221         lpni = lnet_peer_ni_find_locked(nid);
2222         if (lpni)
2223                 return lpni;
2224
2225         lnet_net_unlock(LNET_LOCK_EX);
2226
2227         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2228
2229         lnet_net_lock(LNET_LOCK_EX);
2230
2231         return lpni;
2232 }
2233
2234 /*
2235  * Get a peer_ni for the given nid, create it if necessary. Takes a
2236  * hold on the peer_ni.
2237  */
2238 struct lnet_peer_ni *
2239 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2240                         struct lnet_nid *pref, int cpt)
2241 {
2242         struct lnet_peer_ni *lpni = NULL;
2243
2244         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2245                 return ERR_PTR(-ESHUTDOWN);
2246
2247         /*
2248          * find if a peer_ni already exists.
2249          * If so then just return that.
2250          */
2251         lpni = lnet_peer_ni_find_locked(nid);
2252         if (lpni)
2253                 return lpni;
2254
2255         /*
2256          * Slow path:
2257          * use the lnet_api_mutex to serialize the creation of the peer_ni
2258          * and the creation/deletion of the local ni/net. When a local ni is
2259          * created, if there exists a set of peer_nis on that network,
2260          * they need to be traversed and updated. When a local NI is
2261          * deleted, which could result in a network being deleted, then
2262          * all peer nis on that network need to be removed as well.
2263          *
2264          * Creation through traffic should also be serialized with
2265          * creation through DLC.
2266          */
2267         lnet_net_unlock(cpt);
2268         mutex_lock(&the_lnet.ln_api_mutex);
2269         /*
2270          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2271          * check here is sufficent.
2272          */
2273         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2274                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2275
2276         mutex_unlock(&the_lnet.ln_api_mutex);
2277         lnet_net_lock(cpt);
2278
2279         /* Lock has been dropped, check again for shutdown. */
2280         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2281                 if (!IS_ERR_OR_NULL(lpni))
2282                         lnet_peer_ni_decref_locked(lpni);
2283                 lpni = ERR_PTR(-ESHUTDOWN);
2284         }
2285
2286         return lpni;
2287 }
2288
2289 bool
2290 lnet_peer_gw_discovery(struct lnet_peer *lp)
2291 {
2292         bool rc = false;
2293
2294         spin_lock(&lp->lp_lock);
2295         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2296                 rc = true;
2297         spin_unlock(&lp->lp_lock);
2298
2299         return rc;
2300 }
2301
2302 bool
2303 lnet_peer_is_uptodate(struct lnet_peer *lp)
2304 {
2305         bool rc;
2306
2307         spin_lock(&lp->lp_lock);
2308         rc = lnet_peer_is_uptodate_locked(lp);
2309         spin_unlock(&lp->lp_lock);
2310         return rc;
2311 }
2312
2313 /*
2314  * Is a peer uptodate from the point of view of discovery?
2315  *
2316  * If it is currently being processed, obviously not.
2317  * A forced Ping or Push is also handled by the discovery thread.
2318  *
2319  * Otherwise look at whether the peer needs rediscovering.
2320  */
2321 bool
2322 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2323 __must_hold(&lp->lp_lock)
2324 {
2325         bool rc;
2326
2327         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2328                             LNET_PEER_FORCE_PING |
2329                             LNET_PEER_FORCE_PUSH)) {
2330                 rc = false;
2331         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2332                 rc = false;
2333         } else if (lnet_peer_needs_push(lp)) {
2334                 rc = false;
2335         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2336                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2337                         rc = true;
2338                 else
2339                         rc = false;
2340         } else {
2341                 rc = false;
2342         }
2343
2344         return rc;
2345 }
2346
2347 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2348 void
2349 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2350 {
2351         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2352          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2353          * when adding to the list and queuing the peer to ensure that we do not
2354          * strand any messages on the lp_dc_pendq. This scheme ensures the
2355          * message will be resent even if the peer is already being discovered.
2356          * Therefore we needn't check the return value of
2357          * lnet_peer_queue_for_discovery(lp).
2358          */
2359         lnet_net_lock(LNET_LOCK_EX);
2360         spin_lock(&lp->lp_lock);
2361         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2362         spin_unlock(&lp->lp_lock);
2363         lnet_peer_queue_for_discovery(lp);
2364         lnet_net_unlock(LNET_LOCK_EX);
2365 }
2366
2367 /*
2368  * Queue a peer for the attention of the discovery thread.  Call with
2369  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2370  * -EALREADY if the peer was already queued.
2371  */
2372 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2373 {
2374         int rc;
2375
2376         spin_lock(&lp->lp_lock);
2377         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2378                 lp->lp_state |= LNET_PEER_DISCOVERING;
2379         spin_unlock(&lp->lp_lock);
2380         if (list_empty(&lp->lp_dc_list)) {
2381                 lnet_peer_addref_locked(lp);
2382                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2383                 wake_up(&the_lnet.ln_dc_waitq);
2384                 rc = 0;
2385         } else {
2386                 rc = -EALREADY;
2387         }
2388
2389         CDEBUG(D_NET, "Queue peer %s: %d\n",
2390                libcfs_nidstr(&lp->lp_primary_nid), rc);
2391
2392         return rc;
2393 }
2394
2395 /*
2396  * Discovery of a peer is complete. Wake all waiters on the peer.
2397  * Call with lnet_net_lock/EX held.
2398  */
2399 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2400 {
2401         struct lnet_msg *msg, *tmp;
2402         int rc = 0;
2403         LIST_HEAD(pending_msgs);
2404
2405         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2406                libcfs_nidstr(&lp->lp_primary_nid));
2407
2408         spin_lock(&lp->lp_lock);
2409         /* Our caller dropped lp_lock which may have allowed another thread to
2410          * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2411          * Ensure it is cleared.
2412          */
2413         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2414         if (dc_error) {
2415                 lp->lp_dc_error = dc_error;
2416                 lp->lp_state |= LNET_PEER_REDISCOVER;
2417         }
2418         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2419         spin_unlock(&lp->lp_lock);
2420         list_del_init(&lp->lp_dc_list);
2421         wake_up(&lp->lp_dc_waitq);
2422
2423         if (lp->lp_rtr_refcount > 0)
2424                 lnet_router_discovery_complete(lp);
2425
2426         lnet_net_unlock(LNET_LOCK_EX);
2427
2428         /* iterate through all pending messages and send them again */
2429         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2430                 list_del_init(&msg->msg_list);
2431                 if (dc_error) {
2432                         lnet_finalize(msg, dc_error);
2433                         continue;
2434                 }
2435
2436                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2437                        lnet_msgtyp2str(msg->msg_type),
2438                        libcfs_idstr(&msg->msg_target));
2439                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2440                                &msg->msg_rtr_nid_param);
2441                 if (rc < 0) {
2442                         CNETERR("Error sending %s to %s: %d\n",
2443                                lnet_msgtyp2str(msg->msg_type),
2444                                libcfs_idstr(&msg->msg_target), rc);
2445                         lnet_finalize(msg, rc);
2446                 }
2447         }
2448         lnet_net_lock(LNET_LOCK_EX);
2449         lnet_peer_decref_locked(lp);
2450 }
2451
2452 /*
2453  * Handle inbound push.
2454  * Like any event handler, called with lnet_res_lock/CPT held.
2455  */
2456 void lnet_peer_push_event(struct lnet_event *ev)
2457 {
2458         struct lnet_ping_buffer *pbuf;
2459         struct lnet_peer *lp;
2460         int infobytes;
2461
2462         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2463
2464         /* lnet_find_peer() adds a refcount */
2465         lp = lnet_find_peer(&ev->source.nid);
2466         if (!lp) {
2467                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2468                        libcfs_nidstr(&ev->initiator.nid),
2469                        libcfs_nidstr(&ev->source.nid));
2470                 pbuf->pb_needs_post = true;
2471                 return;
2472         }
2473
2474         /* Ensure peer state remains consistent while we modify it. */
2475         spin_lock(&lp->lp_lock);
2476
2477         /*
2478          * If some kind of error happened the contents of the message
2479          * cannot be used. Clear the NIDS_UPTODATE and set the
2480          * FORCE_PING flag to trigger a ping.
2481          */
2482         if (ev->status) {
2483                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2484                 lp->lp_state |= LNET_PEER_FORCE_PING;
2485                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2486                        ev->status,
2487                        libcfs_nidstr(&lp->lp_primary_nid),
2488                        libcfs_nidstr(&ev->source.nid));
2489                 goto out;
2490         }
2491
2492         /*
2493          * A push with invalid or corrupted info. Clear the UPTODATE
2494          * flag to trigger a ping.
2495          */
2496         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2497                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2498                 lp->lp_state |= LNET_PEER_FORCE_PING;
2499                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2500                        libcfs_nidstr(&lp->lp_primary_nid));
2501                 goto out;
2502         }
2503
2504         /* Make sure we'll allocate the correct size ping buffer when
2505          * pinging the peer.
2506          */
2507         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2508         if (lp->lp_data_bytes < infobytes)
2509                 lp->lp_data_bytes = infobytes;
2510
2511         /*
2512          * A non-Multi-Rail peer is not supposed to be capable of
2513          * sending a push.
2514          */
2515         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2516                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2517                        libcfs_nidstr(&lp->lp_primary_nid));
2518                 goto out;
2519         }
2520
2521         /*
2522          * The peer may have discovery disabled at its end. Set
2523          * NO_DISCOVERY as appropriate.
2524          */
2525         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2526                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2527                        libcfs_nidstr(&lp->lp_primary_nid));
2528                 /*
2529                  * Mark the peer for deletion if we already know about it
2530                  * and it's going from discovery set to no discovery set
2531                  */
2532                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2533                                       LNET_PEER_DISCOVERING)) &&
2534                      lp->lp_state & LNET_PEER_DISCOVERED) {
2535                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2536                                libcfs_nidstr(&lp->lp_primary_nid),
2537                                lp->lp_state);
2538                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2539                 }
2540                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2541         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2542                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2543                        libcfs_nidstr(&lp->lp_primary_nid));
2544                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2545         }
2546
2547         /*
2548          * Update the MULTI_RAIL flag based on the push. If the peer
2549          * was configured with DLC then the setting should match what
2550          * DLC put in.
2551          * NB: We verified above that the MR feature bit is set in pi_features
2552          */
2553         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2554                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2555                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2556         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2557                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2558                       libcfs_nidstr(&lp->lp_primary_nid));
2559         } else if (lnet_peer_discovery_disabled) {
2560                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2561                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2562         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2563                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2564                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2565         } else {
2566                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2567                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2568                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2569                 lnet_peer_clr_non_mr_pref_nids(lp);
2570         }
2571
2572         /* Check for truncation of the Put message. Clear the
2573          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2574          * and tell discovery to allocate a bigger buffer.
2575          */
2576         if (ev->mlength < ev->rlength) {
2577                 if (the_lnet.ln_push_target_nbytes < infobytes)
2578                         the_lnet.ln_push_target_nbytes = infobytes;
2579                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2580                 lp->lp_state |= LNET_PEER_FORCE_PING;
2581                 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2582                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2583                 goto out;
2584         }
2585
2586         /* always assume new data */
2587         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2588         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2589
2590         /* If there is data present that hasn't been processed yet,
2591          * we'll replace it if the Put contained newer data and it
2592          * fits. We're racing with a Ping or earlier Push in this
2593          * case.
2594          */
2595         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2596                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2597                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2598                     infobytes <= lp->lp_data->pb_nbytes) {
2599                         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2600                                infobytes, FLEXIBLE_OBJECT);
2601                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2602                               libcfs_nidstr(&lp->lp_primary_nid),
2603                               LNET_PING_BUFFER_SEQNO(pbuf),
2604                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2605                 }
2606                 goto out;
2607         }
2608
2609         /*
2610          * Allocate a buffer to copy the data. On a failure we drop
2611          * the Push and set FORCE_PING to force the discovery
2612          * thread to fix the problem by pinging the peer.
2613          */
2614         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2615         if (!lp->lp_data) {
2616                 lp->lp_state |= LNET_PEER_FORCE_PING;
2617                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2618                        libcfs_nidstr(&lp->lp_primary_nid),
2619                        LNET_PING_BUFFER_SEQNO(pbuf));
2620                 goto out;
2621         }
2622
2623         /* Success */
2624         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2625                       FLEXIBLE_OBJECT);
2626         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2627         CDEBUG(D_NET, "Received Push %s %u\n",
2628                libcfs_nidstr(&lp->lp_primary_nid),
2629                LNET_PING_BUFFER_SEQNO(pbuf));
2630
2631 out:
2632         /* We've processed this buffer. It can be reposted */
2633         pbuf->pb_needs_post = true;
2634
2635         /*
2636          * Queue the peer for discovery if not done, force it on the request
2637          * queue and wake the discovery thread if the peer was already queued,
2638          * because its status changed.
2639          */
2640         spin_unlock(&lp->lp_lock);
2641         lnet_net_lock(LNET_LOCK_EX);
2642         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2643                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2644                 wake_up(&the_lnet.ln_dc_waitq);
2645         }
2646         /* Drop refcount from lookup */
2647         lnet_peer_decref_locked(lp);
2648         lnet_net_unlock(LNET_LOCK_EX);
2649 }
2650
2651 /*
2652  * Clear the discovery error state, unless we're already discovering
2653  * this peer, in which case the error is current.
2654  */
2655 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2656 {
2657         spin_lock(&lp->lp_lock);
2658         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2659                 lp->lp_dc_error = 0;
2660         spin_unlock(&lp->lp_lock);
2661 }
2662
2663 /*
2664  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2665  * dropped/retaken within this function. An lnet_peer_ni is passed in
2666  * because discovery could tear down an lnet_peer.
2667  */
2668 int
2669 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2670 {
2671         DEFINE_WAIT(wait);
2672         struct lnet_peer *lp = NULL;
2673         int rc = 0;
2674         int count = 0;
2675
2676 again:
2677         if (lp)
2678                 lnet_peer_decref_locked(lp);
2679         lnet_net_unlock(cpt);
2680         lnet_net_lock(LNET_LOCK_EX);
2681         lp = lpni->lpni_peer_net->lpn_peer;
2682         lnet_peer_clear_discovery_error(lp);
2683
2684         /*
2685          * We're willing to be interrupted. The lpni can become a
2686          * zombie if we race with DLC, so we must check for that.
2687          */
2688         for (;;) {
2689                 /* Keep lp alive when the lnet_net_lock is unlocked */
2690                 lnet_peer_addref_locked(lp);
2691                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2692                 if (signal_pending(current))
2693                         break;
2694                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2695                         break;
2696                 /*
2697                  * Don't repeat discovery if discovery is disabled. This is
2698                  * done to ensure we can use discovery as a standard ping as
2699                  * well for backwards compatibility with routers which do not
2700                  * have discovery or have discovery disabled
2701                  */
2702                 if (lnet_is_discovery_disabled(lp) && count > 0)
2703                         break;
2704                 if (lp->lp_dc_error)
2705                         break;
2706                 if (lnet_peer_is_uptodate(lp))
2707                         break;
2708                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2709                         break;
2710                 lnet_peer_queue_for_discovery(lp);
2711                 count++;
2712                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2713
2714                 /*
2715                  * If caller requested a non-blocking operation then
2716                  * return immediately. Once discovery is complete any
2717                  * pending messages that were stopped due to discovery
2718                  * will be transmitted.
2719                  */
2720                 if (!block)
2721                         break;
2722
2723                 lnet_net_unlock(LNET_LOCK_EX);
2724                 schedule();
2725                 finish_wait(&lp->lp_dc_waitq, &wait);
2726                 lnet_net_lock(LNET_LOCK_EX);
2727                 lnet_peer_decref_locked(lp);
2728                 /* Peer may have changed */
2729                 lp = lpni->lpni_peer_net->lpn_peer;
2730         }
2731         finish_wait(&lp->lp_dc_waitq, &wait);
2732
2733         lnet_net_unlock(LNET_LOCK_EX);
2734         lnet_net_lock(cpt);
2735         /*
2736          * The peer may have changed, so re-check and rediscover if that turns
2737          * out to have been the case. The reference count on lp ensured that
2738          * even if it was unlinked from lpni the memory could not be recycled.
2739          * Thus the check below is sufficient to determine whether the peer
2740          * changed. If the peer changed, then lp must not be dereferenced.
2741          */
2742         if (lp != lpni->lpni_peer_net->lpn_peer)
2743                 goto again;
2744
2745         if (signal_pending(current))
2746                 rc = -EINTR;
2747         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2748                 rc = -ESHUTDOWN;
2749         else if (lp->lp_dc_error)
2750                 rc = lp->lp_dc_error;
2751         else if (!block)
2752                 CDEBUG(D_NET, "non-blocking discovery\n");
2753         else if (!lnet_peer_is_uptodate(lp) &&
2754                  !(lnet_is_discovery_disabled(lp) ||
2755                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2756                 goto again;
2757
2758         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2759                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2760                libcfs_nidstr(&lpni->lpni_nid), rc,
2761                (!block) ? "pending discovery" : "discovery complete");
2762         lnet_peer_decref_locked(lp);
2763
2764         return rc;
2765 }
2766
2767 /* Handle an incoming ack for a push. */
2768 static void
2769 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2770 {
2771         struct lnet_ping_buffer *pbuf;
2772
2773         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2774         spin_lock(&lp->lp_lock);
2775         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2776         lp->lp_push_error = ev->status;
2777         if (ev->status)
2778                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2779         else
2780                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2781         spin_unlock(&lp->lp_lock);
2782
2783         CDEBUG(D_NET, "peer %s ev->status %d\n",
2784                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2785 }
2786
2787 static bool find_primary(struct lnet_nid *nid,
2788                          struct lnet_ping_buffer *pbuf)
2789 {
2790         struct lnet_ping_info *pi = &pbuf->pb_info;
2791         struct lnet_ping_iter piter;
2792         __u32 *stp;
2793
2794         if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2795                 /* First large nid is primary */
2796                 for (stp = ping_iter_first(&piter, pbuf, nid);
2797                      stp;
2798                      stp = ping_iter_next(&piter, nid)) {
2799                         if (nid_is_nid4(nid))
2800                                 continue;
2801                         /* nid has already been copied in */
2802                         return true;
2803                 }
2804                 /* no large nids ... weird ... ignore the flag
2805                  * and use first nid.
2806                  */
2807         }
2808         /* pi_nids[1] is primary */
2809         if (pi->pi_nnis < 2)
2810                 return false;
2811         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2812         return true;
2813 }
2814
2815 /* Handle a Reply message. This is the reply to a Ping message. */
2816 static void
2817 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2818 {
2819         struct lnet_ping_buffer *pbuf;
2820         struct lnet_nid primary;
2821         int infobytes;
2822         int rc;
2823         bool ping_feat_disc;
2824
2825         spin_lock(&lp->lp_lock);
2826
2827         lp->lp_disc_src_nid = ev->target.nid;
2828         lp->lp_disc_dst_nid = ev->source.nid;
2829
2830         /*
2831          * If some kind of error happened the contents of message
2832          * cannot be used. Set PING_FAILED to trigger a retry.
2833          */
2834         if (ev->status) {
2835                 lp->lp_state |= LNET_PEER_PING_FAILED;
2836                 lp->lp_ping_error = ev->status;
2837                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2838                        ev->status,
2839                        libcfs_nidstr(&lp->lp_primary_nid),
2840                        libcfs_nidstr(&ev->source.nid));
2841                 goto out;
2842         }
2843
2844         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2845         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2846                 lnet_swap_pinginfo(pbuf);
2847
2848         /*
2849          * A reply with invalid or corrupted info. Set PING_FAILED to
2850          * trigger a retry.
2851          */
2852         rc = lnet_ping_info_validate(&pbuf->pb_info);
2853         if (rc) {
2854                 lp->lp_state |= LNET_PEER_PING_FAILED;
2855                 lp->lp_ping_error = 0;
2856                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2857                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2858                 goto out;
2859         }
2860
2861         /*
2862          * The peer may have discovery disabled at its end. Set
2863          * NO_DISCOVERY as appropriate.
2864          */
2865         ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2866         if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2867                 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2868                        libcfs_nidstr(&lp->lp_primary_nid),
2869                        ping_feat_disc ? "enabled" : "disabled",
2870                        lnet_peer_discovery_disabled ? "disabled" : "enabled");
2871
2872                 /* Detect whether this peer has toggled discovery from on to
2873                  * off and whether we can delete and re-create the peer. Peers
2874                  * that were manually configured cannot be deleted by discovery.
2875                  * We need to delete this peer and re-create it if the peer was
2876                  * not configured manually, is currently considered DD capable,
2877                  * and either:
2878                  * 1. We've already discovered the peer (the peer has toggled
2879                  *    the discovery feature from on to off), or
2880                  * 2. The peer is considered MR, but it was not user configured
2881                  *    (this was a "temporary" peer created via the kernel APIs
2882                  *     that we're discovering for the first time)
2883                  */
2884                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2885                                       LNET_PEER_NO_DISCOVERY)) &&
2886                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2887                                      LNET_PEER_MULTI_RAIL))) {
2888                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2889                                libcfs_nidstr(&lp->lp_primary_nid),
2890                                lp->lp_state);
2891                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2892                 }
2893                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2894         } else {
2895                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2896                        libcfs_nidstr(&lp->lp_primary_nid));
2897                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2898         }
2899
2900         /*
2901          * Update the MULTI_RAIL flag based on the reply. If the peer
2902          * was configured with DLC then the setting should match what
2903          * DLC put in.
2904          */
2905         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2906                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2907                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2908                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2909                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2910                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2911                               libcfs_nidstr(&lp->lp_primary_nid));
2912                 } else if (lnet_peer_discovery_disabled) {
2913                         CDEBUG(D_NET,
2914                                "peer %s(%p) not MR: DD disabled locally\n",
2915                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2916                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2917                         CDEBUG(D_NET,
2918                                "peer %s(%p) not MR: DD disabled remotely\n",
2919                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2920                 } else {
2921                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2922                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2923                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2924                         lnet_peer_clr_non_mr_pref_nids(lp);
2925                 }
2926         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2927                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2928                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2929                               libcfs_nidstr(&lp->lp_primary_nid));
2930                 } else {
2931                         CERROR("Multi-Rail state vanished from %s\n",
2932                                libcfs_nidstr(&lp->lp_primary_nid));
2933                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2934                 }
2935         }
2936
2937         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2938         /*
2939          * Make sure we'll allocate the correct size ping buffer when
2940          * pinging the peer.
2941          */
2942         if (lp->lp_data_bytes < infobytes)
2943                 lp->lp_data_bytes = infobytes;
2944
2945         /* Check for truncation of the Reply. Clear PING_SENT and set
2946          * PING_FAILED to trigger a retry.
2947          */
2948         if (pbuf->pb_nbytes < infobytes) {
2949                 if (the_lnet.ln_push_target_nbytes < infobytes)
2950                         the_lnet.ln_push_target_nbytes = infobytes;
2951                 lp->lp_state |= LNET_PEER_PING_FAILED;
2952                 lp->lp_ping_error = 0;
2953                 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2954                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2955                 goto out;
2956         }
2957
2958         /*
2959          * Check the sequence numbers in the reply. These are only
2960          * available if the reply came from a Multi-Rail peer.
2961          */
2962         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2963             find_primary(&primary, pbuf) &&
2964             nid_same(&lp->lp_primary_nid, &primary)) {
2965                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2966                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2967                                 libcfs_nidstr(&lp->lp_primary_nid),
2968                                 LNET_PING_BUFFER_SEQNO(pbuf),
2969                                 lp->lp_peer_seqno);
2970
2971                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2972         }
2973
2974         /* We're happy with the state of the data in the buffer. */
2975         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2976                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2977                lp->lp_state);
2978         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2979                 lnet_ping_buffer_decref(lp->lp_data);
2980         else
2981                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2982         lnet_ping_buffer_addref(pbuf);
2983         lp->lp_data = pbuf;
2984 out:
2985         lp->lp_state &= ~LNET_PEER_PING_SENT;
2986         spin_unlock(&lp->lp_lock);
2987 }
2988
2989 /*
2990  * Send event handling. Only matters for error cases, where we clean
2991  * up state on the peer and peer_ni that would otherwise be updated in
2992  * the REPLY event handler for a successful Ping, and the ACK event
2993  * handler for a successful Push.
2994  */
2995 static int
2996 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2997 {
2998         int rc = 0;
2999
3000         if (!ev->status)
3001                 goto out;
3002
3003         spin_lock(&lp->lp_lock);
3004         if (ev->msg_type == LNET_MSG_GET) {
3005                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3006                 lp->lp_state |= LNET_PEER_PING_FAILED;
3007                 lp->lp_ping_error = ev->status;
3008         } else { /* ev->msg_type == LNET_MSG_PUT */
3009                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3010                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3011                 lp->lp_push_error = ev->status;
3012         }
3013         spin_unlock(&lp->lp_lock);
3014         rc = LNET_REDISCOVER_PEER;
3015 out:
3016         CDEBUG(D_NET, "%s Send to %s: %d\n",
3017                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3018                 libcfs_nidstr(&ev->target.nid), rc);
3019         return rc;
3020 }
3021
3022 /*
3023  * Unlink event handling. This event is only seen if a call to
3024  * LNetMDUnlink() caused the event to be unlinked. If this call was
3025  * made after the event was set up in LNetGet() or LNetPut() then we
3026  * assume the Ping or Push timed out.
3027  */
3028 static void
3029 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3030 {
3031         spin_lock(&lp->lp_lock);
3032         /* We've passed through LNetGet() */
3033         if (lp->lp_state & LNET_PEER_PING_SENT) {
3034                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3035                 lp->lp_state |= LNET_PEER_PING_FAILED;
3036                 lp->lp_ping_error = -ETIMEDOUT;
3037                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3038                         libcfs_nidstr(&lp->lp_primary_nid));
3039         }
3040         /* We've passed through LNetPut() */
3041         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3042                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3043                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3044                 lp->lp_push_error = -ETIMEDOUT;
3045                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3046                         libcfs_nidstr(&lp->lp_primary_nid));
3047         }
3048         spin_unlock(&lp->lp_lock);
3049 }
3050
3051 /*
3052  * Event handler for the discovery EQ.
3053  *
3054  * Called with lnet_res_lock(cpt) held. The cpt is the
3055  * lnet_cpt_of_cookie() of the md handle cookie.
3056  */
3057 static void lnet_discovery_event_handler(struct lnet_event *event)
3058 {
3059         struct lnet_peer *lp = event->md_user_ptr;
3060         struct lnet_ping_buffer *pbuf;
3061         int rc;
3062
3063         /* discovery needs to take another look */
3064         rc = LNET_REDISCOVER_PEER;
3065
3066         CDEBUG(D_NET, "Received event: %d\n", event->type);
3067
3068         switch (event->type) {
3069         case LNET_EVENT_ACK:
3070                 lnet_discovery_event_ack(lp, event);
3071                 break;
3072         case LNET_EVENT_REPLY:
3073                 lnet_discovery_event_reply(lp, event);
3074                 break;
3075         case LNET_EVENT_SEND:
3076                 /* Only send failure triggers a retry. */
3077                 rc = lnet_discovery_event_send(lp, event);
3078                 break;
3079         case LNET_EVENT_UNLINK:
3080                 /* LNetMDUnlink() was called */
3081                 lnet_discovery_event_unlink(lp, event);
3082                 break;
3083         default:
3084                 /* Invalid events. */
3085                 LBUG();
3086         }
3087         lnet_net_lock(LNET_LOCK_EX);
3088
3089         /* put peer back at end of request queue, if discovery not already
3090          * done */
3091         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3092             lnet_peer_queue_for_discovery(lp)) {
3093                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3094                 wake_up(&the_lnet.ln_dc_waitq);
3095         }
3096         if (event->unlinked) {
3097                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3098                 lnet_ping_buffer_decref(pbuf);
3099                 lnet_peer_decref_locked(lp);
3100         }
3101         lnet_net_unlock(LNET_LOCK_EX);
3102 }
3103
3104 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3105                      struct lnet_ping_buffer *pbuf,
3106                      struct lnet_nid *nid)
3107 {
3108         pi->pinfo = &pbuf->pb_info;
3109         pi->pos = &pbuf->pb_info.pi_ni;
3110         pi->end = (void *)pi->pinfo +
3111                   min_t(int, pbuf->pb_nbytes,
3112                         lnet_ping_info_size(pi->pinfo));
3113         /* lnet_ping_info_validiate ensures there will be one
3114          * lnet_ni_status at the start
3115          */
3116         if (nid)
3117                 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3118
3119         pi->pos += sizeof(struct lnet_ni_status);
3120         return &pbuf->pb_info.pi_ni[0].ns_status;
3121 }
3122
3123 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3124 {
3125         int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3126
3127         if (pi->pos < ((void *)pi->pinfo + off)) {
3128                 struct lnet_ni_status *ns = pi->pos;
3129
3130                 pi->pos = ns + 1;
3131                 if (pi->pos > pi->end)
3132                         return NULL;
3133                 if (nid)
3134                         lnet_nid4_to_nid(ns->ns_nid, nid);
3135                 return &ns->ns_status;
3136         }
3137
3138         while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3139                 struct lnet_ni_large_status *lns = pi->pos;
3140
3141                 if (pi->pos + 8 > pi->end)
3142                         /* Not safe to examine next */
3143                         return NULL;
3144                 pi->pos = lnet_ping_sts_next(lns);
3145                 if (pi->pos > pi->end)
3146                         return NULL;
3147                 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3148                         continue;
3149                 if (nid)
3150                         *nid = lns->ns_nid;
3151                 return &lns->ns_status;
3152         }
3153         return NULL;
3154 }
3155
3156 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3157 {
3158         struct lnet_ping_iter pi;
3159         u32 *st;
3160         int nnis = 0;
3161
3162         for (st = ping_iter_first(&pi, pbuf, NULL); st;
3163              st = ping_iter_next(&pi, NULL))
3164                 nnis += 1;
3165
3166         return nnis;
3167 }
3168
3169 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3170 {
3171         if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN) {
3172                 lnet_net_lock(0);
3173                 lnet_handle_remote_failure_locked(lpni);
3174                 lnet_net_unlock(0);
3175         } else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3176                  !lpni->lpni_last_alive)
3177                 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3178 }
3179
3180 /*
3181  * Build a peer from incoming data.
3182  *
3183  * The NIDs in the incoming data are supposed to be structured as follows:
3184  *  - loopback
3185  *  - primary NID
3186  *  - other NIDs in same net
3187  *  - NIDs in second net
3188  *  - NIDs in third net
3189  *  - ...
3190  * This due to the way the list of NIDs in the data is created.
3191  *
3192  * Note that this function will mark the peer uptodate unless an
3193  * ENOMEM is encontered. All other errors are due to a conflict
3194  * between the DLC configuration and what discovery sees. We treat DLC
3195  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3196  * peer from becoming stuck in discovery.
3197  */
3198 static int lnet_peer_merge_data(struct lnet_peer *lp,
3199                                 struct lnet_ping_buffer *pbuf)
3200 {
3201         struct lnet_peer_net *lpn;
3202         struct lnet_peer_ni *lpni;
3203         struct lnet_nid *curnis = NULL;
3204         struct lnet_ni_large_status *addnis = NULL;
3205         struct lnet_nid *delnis = NULL;
3206         struct lnet_ping_iter pi;
3207         struct lnet_nid nid;
3208         u32 *stp;
3209         struct lnet_nid primary = {};
3210         bool want_large_primary;
3211         unsigned int flags;
3212         int ncurnis;
3213         int naddnis;
3214         int ndelnis;
3215         int nnis = 0;
3216         int i;
3217         int j;
3218         int rc;
3219         __u32 old_st;
3220
3221         flags = LNET_PEER_DISCOVERED;
3222         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3223                 flags |= LNET_PEER_MULTI_RAIL;
3224
3225         /*
3226          * Cache the routing feature for the peer; whether it is enabled
3227          * for disabled as reported by the remote peer.
3228          */
3229         spin_lock(&lp->lp_lock);
3230         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3231                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3232         else
3233                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3234         spin_unlock(&lp->lp_lock);
3235
3236         nnis = ping_info_count_entries(pbuf);
3237         nnis = max_t(int, lp->lp_nnis, nnis);
3238         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3239         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3240         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3241         if (!curnis || !addnis || !delnis) {
3242                 rc = -ENOMEM;
3243                 goto out;
3244         }
3245         ncurnis = 0;
3246         naddnis = 0;
3247         ndelnis = 0;
3248
3249         /* Construct the list of NIDs present in peer. */
3250         lpni = NULL;
3251         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3252                 curnis[ncurnis++] = lpni->lpni_nid;
3253
3254         /* Check for NIDs in pbuf not present in curnis[].
3255          * Skip the first, which is loop-back.  Take second as
3256          * primary, unless a large primary is found.
3257          */
3258         ping_iter_first(&pi, pbuf, NULL);
3259         stp = ping_iter_next(&pi, &nid);
3260         if (stp)
3261                 primary = nid;
3262         want_large_primary = (pbuf->pb_info.pi_features &
3263                               LNET_PING_FEAT_PRIMARY_LARGE);
3264         for (; stp; stp = ping_iter_next(&pi, &nid)) {
3265                 for (j = 0; j < ncurnis; j++)
3266                         if (nid_same(&nid, &curnis[j]))
3267                                 break;
3268                 if (j == ncurnis) {
3269                         addnis[naddnis].ns_nid = nid;
3270                         addnis[naddnis].ns_status = *stp;
3271                         naddnis += 1;
3272                 }
3273                 if (want_large_primary && nid.nid_size) {
3274                         primary = nid;
3275                         want_large_primary = false;
3276                 }
3277         }
3278         /*
3279          * Check for NIDs in curnis[] not present in pbuf.
3280          * The nested loop starts at 1 to skip the loopback NID.
3281          *
3282          * But never add the loopback NID to delnis[]: if it is
3283          * present in curnis[] then this peer is for this node.
3284          */
3285         for (i = 0; i < ncurnis; i++) {
3286                 if (nid_is_lo0(&curnis[i]))
3287                         continue;
3288                 ping_iter_first(&pi, pbuf, NULL);
3289                 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3290                         if (nid_same(&curnis[i], &nid)) {
3291                                 /*
3292                                  * update the information we cache for the
3293                                  * peer with the latest information we
3294                                  * received
3295                                  */
3296                                 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3297                                 if (lpni) {
3298                                         old_st = lpni->lpni_ns_status;
3299                                         lpni->lpni_ns_status = *stp;
3300                                         if (old_st != lpni->lpni_ns_status)
3301                                                 handle_disc_lpni_health(lpni);
3302                                         lnet_peer_ni_decref_locked(lpni);
3303                                 }
3304                                 break;
3305                         }
3306                 }
3307                 if (!stp)
3308                         delnis[ndelnis++] = curnis[i];
3309         }
3310
3311         /*
3312          * If we get here and the discovery is disabled then we don't want
3313          * to add or delete any NIs. We just updated the ones we have some
3314          * information on, and call it a day
3315          */
3316         rc = 0;
3317         if (lnet_is_discovery_disabled(lp))
3318                 goto out;
3319
3320         for (i = 0; i < naddnis; i++) {
3321                 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3322                 if (rc) {
3323                         CERROR("Error adding NID %s to peer %s: %d\n",
3324                                libcfs_nidstr(&addnis[i].ns_nid),
3325                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3326                         if (rc == -ENOMEM)
3327                                 goto out;
3328                 }
3329                 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3330                 if (lpni) {
3331                         lpni->lpni_ns_status = addnis[i].ns_status;
3332                         handle_disc_lpni_health(lpni);
3333                         lnet_peer_ni_decref_locked(lpni);
3334                 }
3335         }
3336
3337         for (i = 0; i < ndelnis; i++) {
3338                 /*
3339                  * for routers it's okay to delete the primary_nid because
3340                  * the upper layers don't really rely on it. So if we're
3341                  * being told that the router changed its primary_nid
3342                  * then it's okay to delete it.
3343                  */
3344                 if (lp->lp_rtr_refcount > 0)
3345                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3346                 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3347                 if (rc) {
3348                         CERROR("Error deleting NID %s from peer %s: %d\n",
3349                                libcfs_nidstr(&delnis[i]),
3350                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3351                         if (rc == -ENOMEM)
3352                                 goto out;
3353                 }
3354         }
3355
3356         /* The peer net for the primary NID should be the first entry in the
3357          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3358          * be the first entry in its peer net's lpn_peer_nis list.
3359          */
3360         find_primary(&nid, pbuf);
3361         lpni = lnet_peer_ni_find_locked(&nid);
3362         if (!lpni) {
3363                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3364                        libcfs_nidstr(&nid));
3365                 goto out;
3366         }
3367
3368         lpn = lpni->lpni_peer_net;
3369         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3370                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3371
3372         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3373                 list_move(&lpni->lpni_peer_nis,
3374                           &lpni->lpni_peer_net->lpn_peer_nis);
3375
3376         lnet_peer_ni_decref_locked(lpni);
3377         /*
3378          * Errors other than -ENOMEM are due to peers having been
3379          * configured with DLC. Ignore these because DLC overrides
3380          * Discovery.
3381          */
3382         rc = 0;
3383 out:
3384         /* If this peer is a gateway, invoke the routing callback to update
3385          * the associated route status
3386          */
3387         if (lp->lp_rtr_refcount > 0)
3388                 lnet_router_discovery_ping_reply(lp, pbuf);
3389
3390         CFS_FREE_PTR_ARRAY(curnis, nnis);
3391         CFS_FREE_PTR_ARRAY(addnis, nnis);
3392         CFS_FREE_PTR_ARRAY(delnis, nnis);
3393         lnet_ping_buffer_decref(pbuf);
3394         CDEBUG(D_NET, "peer %s (%p): %d\n",
3395                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3396
3397         if (rc) {
3398                 spin_lock(&lp->lp_lock);
3399                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3400                 lp->lp_state |= LNET_PEER_FORCE_PING;
3401                 spin_unlock(&lp->lp_lock);
3402         }
3403         return rc;
3404 }
3405
3406 /*
3407  * The data in pbuf says lp is its primary peer, but the data was
3408  * received by a different peer. Try to update lp with the data.
3409  */
3410 static int
3411 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3412 {
3413         struct lnet_handle_md mdh;
3414
3415         /* Queue lp for discovery, and force it on the request queue. */
3416         lnet_net_lock(LNET_LOCK_EX);
3417         if (lnet_peer_queue_for_discovery(lp))
3418                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3419         lnet_net_unlock(LNET_LOCK_EX);
3420
3421         LNetInvalidateMDHandle(&mdh);
3422
3423         /*
3424          * Decide whether we can move the peer to the DATA_PRESENT state.
3425          *
3426          * We replace stale data for a multi-rail peer, repair PING_FAILED
3427          * status, and preempt FORCE_PING.
3428          *
3429          * If after that we have DATA_PRESENT, we merge it into this peer.
3430          */
3431         spin_lock(&lp->lp_lock);
3432         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3433                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3434                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3435                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3436                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3437                         lnet_ping_buffer_decref(pbuf);
3438                         pbuf = lp->lp_data;
3439                         lp->lp_data = NULL;
3440                 }
3441         }
3442         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3443                 lnet_ping_buffer_decref(lp->lp_data);
3444                 lp->lp_data = NULL;
3445                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3446         }
3447         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3448                 mdh = lp->lp_ping_mdh;
3449                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3450                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3451                 lp->lp_ping_error = 0;
3452         }
3453         if (lp->lp_state & LNET_PEER_FORCE_PING)
3454                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3455         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3456         spin_unlock(&lp->lp_lock);
3457
3458         if (!LNetMDHandleIsInvalid(mdh))
3459                 LNetMDUnlink(mdh);
3460
3461         if (pbuf)
3462                 return lnet_peer_merge_data(lp, pbuf);
3463
3464         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3465         return 0;
3466 }
3467
3468 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3469                                      struct lnet_ping_buffer *pbuf)
3470 {
3471         struct lnet_ping_iter pi;
3472         struct lnet_nid pnid;
3473         u32 *st;
3474
3475         for (st = ping_iter_first(&pi, pbuf, &pnid);
3476              st;
3477              st = ping_iter_next(&pi, &pnid))
3478                 if (nid_same(nid, &pnid))
3479                         return true;
3480         return false;
3481 }
3482
3483 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3484  * to the discovery queue a reference was taken that will prevent the peer from
3485  * actually being freed by this function. After this function exits the
3486  * discovery thread should call lnet_peer_discovery_complete() which will
3487  * drop that reference as well as wake any waiters that may also be holding a
3488  * ref on the peer
3489  */
3490 static int lnet_peer_deletion(struct lnet_peer *lp)
3491 __must_hold(&lp->lp_lock)
3492 {
3493         struct list_head rlist;
3494         struct lnet_route *route, *tmp;
3495         int sensitivity = lp->lp_health_sensitivity;
3496         int rc = 0;
3497
3498         INIT_LIST_HEAD(&rlist);
3499
3500         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3501                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3502
3503         /* no-op if lnet_peer_del() has already been called on this peer */
3504         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3505                 goto clear_discovering;
3506
3507         spin_unlock(&lp->lp_lock);
3508
3509         mutex_lock(&the_lnet.ln_api_mutex);
3510         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3511             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3512                 mutex_unlock(&the_lnet.ln_api_mutex);
3513                 spin_lock(&lp->lp_lock);
3514                 rc = -ESHUTDOWN;
3515                 goto clear_discovering;
3516         }
3517
3518         lnet_peer_cancel_discovery(lp);
3519         lnet_net_lock(LNET_LOCK_EX);
3520         list_for_each_entry_safe(route, tmp,
3521                                  &lp->lp_routes,
3522                                  lr_gwlist)
3523                 lnet_move_route(route, NULL, &rlist);
3524
3525         /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3526         rc = lnet_peer_del_locked(lp);
3527         if (rc)
3528                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3529                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3530
3531         lnet_net_unlock(LNET_LOCK_EX);
3532
3533         list_for_each_entry_safe(route, tmp,
3534                                  &rlist, lr_list) {
3535                 /* re-add these routes */
3536                 lnet_add_route(route->lr_net,
3537                                route->lr_hops,
3538                                &route->lr_nid,
3539                                route->lr_priority,
3540                                sensitivity);
3541                 LIBCFS_FREE(route, sizeof(*route));
3542         }
3543
3544         mutex_unlock(&the_lnet.ln_api_mutex);
3545
3546         spin_lock(&lp->lp_lock);
3547
3548         rc = 0;
3549
3550 clear_discovering:
3551         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3552                           LNET_PEER_FORCE_PUSH);
3553
3554         return rc;
3555 }
3556
3557 /*
3558  * Update a peer using the data received.
3559  */
3560 static int lnet_peer_data_present(struct lnet_peer *lp)
3561 __must_hold(&lp->lp_lock)
3562 {
3563         struct lnet_ping_buffer *pbuf;
3564         struct lnet_peer_ni *lpni;
3565         struct lnet_nid nid;
3566         unsigned int flags;
3567         int rc = 0;
3568
3569         pbuf = lp->lp_data;
3570         lp->lp_data = NULL;
3571         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3572         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3573         spin_unlock(&lp->lp_lock);
3574
3575         /*
3576          * Modifications of peer structures are done while holding the
3577          * ln_api_mutex. A global lock is required because we may be
3578          * modifying multiple peer structures, and a mutex greatly
3579          * simplifies memory management.
3580          *
3581          * The actual changes to the data structures must also protect
3582          * against concurrent lookups, for which the lnet_net_lock in
3583          * LNET_LOCK_EX mode is used.
3584          */
3585         mutex_lock(&the_lnet.ln_api_mutex);
3586         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3587                 lnet_ping_buffer_decref(pbuf);
3588                 rc = -ESHUTDOWN;
3589                 goto out;
3590         }
3591
3592         /*
3593          * If this peer is not on the peer list then it is being torn
3594          * down, and our reference count may be all that is keeping it
3595          * alive. Don't do any work on it.
3596          */
3597         if (list_empty(&lp->lp_peer_list)) {
3598                 lnet_ping_buffer_decref(pbuf);
3599                 goto out;
3600         }
3601
3602         flags = LNET_PEER_DISCOVERED;
3603         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3604                 flags |= LNET_PEER_MULTI_RAIL;
3605
3606         /*
3607          * Check whether the primary NID in the message matches the
3608          * primary NID of the peer. If it does, update the peer, if
3609          * it it does not, check whether there is already a peer with
3610          * that primary NID. If no such peer exists, try to update
3611          * the primary NID of the current peer (allowed if it was
3612          * created due to message traffic) and complete the update.
3613          * If the peer did exist, hand off the data to it.
3614          *
3615          * The peer for the loopback interface is a special case: this
3616          * is the peer for the local node, and we want to set its
3617          * primary NID to the correct value here. Moreover, this peer
3618          * can show up with only the loopback NID in the ping buffer.
3619          */
3620         if (!find_primary(&nid, pbuf)) {
3621                 lnet_ping_buffer_decref(pbuf);
3622                 goto out;
3623         }
3624         if (nid_is_lo0(&lp->lp_primary_nid)) {
3625                 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3626                 if (rc)
3627                         lnet_ping_buffer_decref(pbuf);
3628                 else
3629                         rc = lnet_peer_merge_data(lp, pbuf);
3630         /*
3631          * if the primary nid of the peer is present in the ping info returned
3632          * from the peer, but it's not the local primary peer we have
3633          * cached and discovery is disabled, then we don't want to update
3634          * our local peer info, by adding or removing NIDs, we just want
3635          * to update the status of the nids that we currently have
3636          * recorded in that peer.
3637          */
3638         } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3639                    (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3640                     lnet_is_discovery_disabled(lp))) {
3641                 rc = lnet_peer_merge_data(lp, pbuf);
3642         } else {
3643                 lpni = lnet_peer_ni_find_locked(&nid);
3644                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3645                         rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3646                         if (rc) {
3647                                 CERROR("Primary NID error %s versus %s: %d\n",
3648                                        libcfs_nidstr(&lp->lp_primary_nid),
3649                                        libcfs_nidstr(&nid), rc);
3650                                 lnet_ping_buffer_decref(pbuf);
3651                         } else {
3652                                 rc = lnet_peer_merge_data(lp, pbuf);
3653                         }
3654                         if (lpni)
3655                                 lnet_peer_ni_decref_locked(lpni);
3656                 } else {
3657                         struct lnet_peer *new_lp;
3658                         new_lp = lpni->lpni_peer_net->lpn_peer;
3659                         /*
3660                          * if lp has discovery/MR enabled that means new_lp
3661                          * should have discovery/MR enabled as well, since
3662                          * it's the same peer, which we're about to merge
3663                          */
3664                         spin_lock(&lp->lp_lock);
3665                         spin_lock(&new_lp->lp_lock);
3666                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3667                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3668                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3669                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3670                         /* If we're processing a ping reply then we may be
3671                          * about to send a push to the peer that we ping'd.
3672                          * Since the ping reply that we're processing was
3673                          * received by lp, we need to set the discovery source
3674                          * NID for new_lp to the NID stored in lp.
3675                          */
3676                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3677                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3678                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3679                         }
3680                         spin_unlock(&new_lp->lp_lock);
3681                         spin_unlock(&lp->lp_lock);
3682
3683                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3684                         lnet_consolidate_routes_locked(lp, new_lp);
3685                         lnet_peer_ni_decref_locked(lpni);
3686                 }
3687         }
3688 out:
3689         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3690                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3691                lp->lp_state);
3692         mutex_unlock(&the_lnet.ln_api_mutex);
3693
3694         spin_lock(&lp->lp_lock);
3695         /* Tell discovery to re-check the peer immediately. */
3696         if (!rc)
3697                 rc = LNET_REDISCOVER_PEER;
3698         return rc;
3699 }
3700
3701 /*
3702  * A ping failed. Clear the PING_FAILED state and set the
3703  * FORCE_PING state, to ensure a retry even if discovery is
3704  * disabled. This avoids being left with incorrect state.
3705  */
3706 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3707 __must_hold(&lp->lp_lock)
3708 {
3709         struct lnet_handle_md mdh;
3710         int rc;
3711
3712         mdh = lp->lp_ping_mdh;
3713         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3714         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3715         lp->lp_state |= LNET_PEER_FORCE_PING;
3716         rc = lp->lp_ping_error;
3717         lp->lp_ping_error = 0;
3718         spin_unlock(&lp->lp_lock);
3719
3720         if (!LNetMDHandleIsInvalid(mdh))
3721                 LNetMDUnlink(mdh);
3722
3723         CDEBUG(D_NET, "peer %s:%d\n",
3724                libcfs_nidstr(&lp->lp_primary_nid), rc);
3725
3726         spin_lock(&lp->lp_lock);
3727         return rc ? rc : LNET_REDISCOVER_PEER;
3728 }
3729
3730 /* Active side of ping. */
3731 static int lnet_peer_send_ping(struct lnet_peer *lp)
3732 __must_hold(&lp->lp_lock)
3733 {
3734         int bytes;
3735         int rc;
3736         int cpt;
3737
3738         lp->lp_state |= LNET_PEER_PING_SENT;
3739         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3740         spin_unlock(&lp->lp_lock);
3741
3742         cpt = lnet_net_lock_current();
3743         /* Refcount for MD. */
3744         lnet_peer_addref_locked(lp);
3745         lnet_net_unlock(cpt);
3746
3747         bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3748
3749         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3750                             the_lnet.ln_dc_handler, false);
3751         /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3752          * refcount on the peer, otherwise LNetMDUnlink will be called
3753          * which will eventually do that.
3754          */
3755         if (rc > 0) {
3756                 lnet_net_lock(cpt);
3757                 lnet_peer_decref_locked(lp);
3758                 lnet_net_unlock(cpt);
3759                 rc = -rc; /* change the rc to negative value */
3760                 goto fail_error;
3761         } else if (rc < 0) {
3762                 goto fail_error;
3763         }
3764
3765         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3766
3767         spin_lock(&lp->lp_lock);
3768         return 0;
3769
3770 fail_error:
3771         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3772         /*
3773          * The errors that get us here are considered hard errors and
3774          * cause Discovery to terminate. So we clear PING_SENT, but do
3775          * not set either PING_FAILED or FORCE_PING. In fact we need
3776          * to clear PING_FAILED, because the unlink event handler will
3777          * have set it if we called LNetMDUnlink() above.
3778          */
3779         spin_lock(&lp->lp_lock);
3780         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3781         return rc;
3782 }
3783
3784 /*
3785  * This function exists because you cannot call LNetMDUnlink() from an
3786  * event handler.
3787  */
3788 static int lnet_peer_push_failed(struct lnet_peer *lp)
3789 __must_hold(&lp->lp_lock)
3790 {
3791         struct lnet_handle_md mdh;
3792         int rc;
3793
3794         mdh = lp->lp_push_mdh;
3795         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3796         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3797         rc = lp->lp_push_error;
3798         lp->lp_push_error = 0;
3799         spin_unlock(&lp->lp_lock);
3800
3801         if (!LNetMDHandleIsInvalid(mdh))
3802                 LNetMDUnlink(mdh);
3803
3804         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3805         spin_lock(&lp->lp_lock);
3806         return rc ? rc : LNET_REDISCOVER_PEER;
3807 }
3808
3809 /*
3810  * Mark the peer as discovered.
3811  */
3812 static int lnet_peer_discovered(struct lnet_peer *lp)
3813 __must_hold(&lp->lp_lock)
3814 {
3815         lp->lp_state |= LNET_PEER_DISCOVERED;
3816         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3817                           LNET_PEER_REDISCOVER);
3818
3819         lp->lp_dc_error = 0;
3820
3821         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3822
3823         return 0;
3824 }
3825
3826 /* Active side of push. */
3827 static int lnet_peer_send_push(struct lnet_peer *lp)
3828 __must_hold(&lp->lp_lock)
3829 {
3830         struct lnet_ping_buffer *pbuf;
3831         struct lnet_processid id;
3832         struct lnet_md md;
3833         int cpt;
3834         int rc;
3835
3836         /* Don't push to a non-multi-rail peer. */
3837         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3838                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3839                 /* if peer's NIDs are uptodate then peer is discovered */
3840                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3841                         rc = lnet_peer_discovered(lp);
3842                         return rc;
3843                 }
3844
3845                 return 0;
3846         }
3847
3848         lp->lp_state |= LNET_PEER_PUSH_SENT;
3849         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3850         spin_unlock(&lp->lp_lock);
3851
3852         cpt = lnet_net_lock_current();
3853         pbuf = the_lnet.ln_ping_target;
3854         lnet_ping_buffer_addref(pbuf);
3855         lnet_net_unlock(cpt);
3856
3857         /* Push source MD */
3858         md.start     = &pbuf->pb_info;
3859         md.length    = pbuf->pb_nbytes;
3860         md.threshold = 2; /* Put/Ack */
3861         md.max_size  = 0;
3862         md.options   = LNET_MD_TRACK_RESPONSE;
3863         md.handler   = the_lnet.ln_dc_handler;
3864         md.user_ptr  = lp;
3865
3866         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3867         if (rc) {
3868                 lnet_ping_buffer_decref(pbuf);
3869                 CERROR("Can't bind push source MD: %d\n", rc);
3870                 goto fail_error;
3871         }
3872
3873         cpt = lnet_net_lock_current();
3874         /* Refcount for MD. */
3875         lnet_peer_addref_locked(lp);
3876         id.pid = LNET_PID_LUSTRE;
3877         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3878                 id.nid = lp->lp_disc_dst_nid;
3879         else
3880                 id.nid = lp->lp_primary_nid;
3881         lnet_net_unlock(cpt);
3882
3883         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3884                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3885                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3886
3887         /*
3888          * reset the discovery nid. There is no need to restrict sending
3889          * from that source, if we call lnet_push_update_to_peers(). It'll
3890          * get set to a specific NID, if we initiate discovery from the
3891          * scratch
3892          */
3893         lp->lp_disc_src_nid = LNET_ANY_NID;
3894         lp->lp_disc_dst_nid = LNET_ANY_NID;
3895
3896         if (rc)
3897                 goto fail_unlink;
3898
3899         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3900
3901         spin_lock(&lp->lp_lock);
3902         return 0;
3903
3904 fail_unlink:
3905         LNetMDUnlink(lp->lp_push_mdh);
3906         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3907 fail_error:
3908         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3909                lp, rc);
3910         /*
3911          * The errors that get us here are considered hard errors and
3912          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3913          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3914          * because the unlink event handler will have set it if we
3915          * called LNetMDUnlink() above.
3916          */
3917         spin_lock(&lp->lp_lock);
3918         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3919         return rc;
3920 }
3921
3922 /*
3923  * Wait for work to be queued or some other change that must be
3924  * attended to. Returns non-zero if the discovery thread should shut
3925  * down.
3926  */
3927 static int lnet_peer_discovery_wait_for_work(void)
3928 {
3929         int cpt;
3930         int rc = 0;
3931
3932         DEFINE_WAIT(wait);
3933
3934         cpt = lnet_net_lock_current();
3935         for (;;) {
3936                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3937                                 TASK_INTERRUPTIBLE);
3938                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3939                         break;
3940                 if (lnet_push_target_resize_needed() ||
3941                     the_lnet.ln_push_target->pb_needs_post)
3942                         break;
3943                 if (!list_empty(&the_lnet.ln_dc_request))
3944                         break;
3945                 if (!list_empty(&the_lnet.ln_msg_resend))
3946                         break;
3947                 lnet_net_unlock(cpt);
3948
3949                 /*
3950                  * wakeup max every second to check if there are peers that
3951                  * have been stuck on the working queue for greater than
3952                  * the peer timeout.
3953                  */
3954                 schedule_timeout(cfs_time_seconds(1));
3955                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3956                 cpt = lnet_net_lock_current();
3957         }
3958         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3959
3960         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3961                 rc = -ESHUTDOWN;
3962
3963         lnet_net_unlock(cpt);
3964
3965         CDEBUG(D_NET, "woken: %d\n", rc);
3966
3967         return rc;
3968 }
3969
3970 /*
3971  * Messages that were pending on a destroyed peer will be put on a global
3972  * resend list. The message resend list will be checked by
3973  * the discovery thread when it wakes up, and will resend messages. These
3974  * messages can still be sendable in the case the lpni which was the initial
3975  * cause of the message re-queue was transfered to another peer.
3976  *
3977  * It is possible that LNet could be shutdown while we're iterating
3978  * through the list. lnet_shudown_lndnets() will attempt to access the
3979  * resend list, but will have to wait until the spinlock is released, by
3980  * which time there shouldn't be any more messages on the resend list.
3981  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3982  * for the messages so they can be released. The other case is that
3983  * lnet_shudown_lndnets() can finalize all the messages before this
3984  * function can visit the resend list, in which case this function will be
3985  * a no-op.
3986  */
3987 static void lnet_resend_msgs(void)
3988 {
3989         struct lnet_msg *msg, *tmp;
3990         LIST_HEAD(resend);
3991         int rc;
3992
3993         spin_lock(&the_lnet.ln_msg_resend_lock);
3994         list_splice(&the_lnet.ln_msg_resend, &resend);
3995         spin_unlock(&the_lnet.ln_msg_resend_lock);
3996
3997         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3998                 list_del_init(&msg->msg_list);
3999                 rc = lnet_send(&msg->msg_src_nid_param, msg,
4000                                &msg->msg_rtr_nid_param);
4001                 if (rc < 0) {
4002                         CNETERR("Error sending %s to %s: %d\n",
4003                                lnet_msgtyp2str(msg->msg_type),
4004                                libcfs_idstr(&msg->msg_target), rc);
4005                         lnet_finalize(msg, rc);
4006                 }
4007         }
4008 }
4009
4010 /* The discovery thread. */
4011 static int lnet_peer_discovery(void *arg)
4012 {
4013         struct lnet_peer *lp;
4014         int retry = 3;
4015         int rc;
4016
4017         wait_for_completion(&the_lnet.ln_started);
4018
4019         CDEBUG(D_NET, "started\n");
4020
4021         for (;;) {
4022                 if (lnet_peer_discovery_wait_for_work())
4023                         break;
4024
4025                 if (lnet_push_target_resize_needed())
4026                         lnet_push_target_resize();
4027                 else if (the_lnet.ln_push_target->pb_needs_post)
4028                         lnet_push_target_post(the_lnet.ln_push_target,
4029                                               &the_lnet.ln_push_target_md);
4030
4031                 lnet_resend_msgs();
4032
4033                 lnet_net_lock(LNET_LOCK_EX);
4034                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4035                         lnet_net_unlock(LNET_LOCK_EX);
4036                         break;
4037                 }
4038
4039                 /*
4040                  * Process all incoming discovery work requests.  When
4041                  * discovery must wait on a peer to change state, it
4042                  * is added to the tail of the ln_dc_working queue. A
4043                  * timestamp keeps track of when the peer was added,
4044                  * so we can time out discovery requests that take too
4045                  * long.
4046                  */
4047                 while (!list_empty(&the_lnet.ln_dc_request)) {
4048                         lp = list_first_entry(&the_lnet.ln_dc_request,
4049                                               struct lnet_peer, lp_dc_list);
4050                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4051                         /*
4052                          * set the time the peer was put on the dc_working
4053                          * queue. It shouldn't remain on the queue
4054                          * forever, in case the GET message (for ping)
4055                          * doesn't get a REPLY or the PUT message (for
4056                          * push) doesn't get an ACK.
4057                          */
4058                         lp->lp_last_queued = ktime_get_real_seconds();
4059                         lnet_net_unlock(LNET_LOCK_EX);
4060
4061                         if (lnet_push_target_resize_needed())
4062                                 lnet_push_target_resize();
4063                         else if (the_lnet.ln_push_target->pb_needs_post)
4064                                 lnet_push_target_post(the_lnet.ln_push_target,
4065                                                       &the_lnet.ln_push_target_md);
4066
4067                         /*
4068                          * Select an action depending on the state of
4069                          * the peer and whether discovery is disabled.
4070                          * The check whether discovery is disabled is
4071                          * done after the code that handles processing
4072                          * for arrived data, cleanup for failures, and
4073                          * forcing a Ping or Push.
4074                          */
4075                         spin_lock(&lp->lp_lock);
4076                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4077                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4078                                 lp->lp_state);
4079                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4080                                             LNET_PEER_MARK_DELETED))
4081                                 rc = lnet_peer_deletion(lp);
4082                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4083                                 rc = lnet_peer_data_present(lp);
4084                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
4085                                 rc = lnet_peer_ping_failed(lp);
4086                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4087                                 rc = lnet_peer_push_failed(lp);
4088                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
4089                                 rc = lnet_peer_send_ping(lp);
4090                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4091                                 rc = lnet_peer_send_push(lp);
4092                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4093                                 rc = lnet_peer_send_ping(lp);
4094                         else if (lnet_peer_needs_push(lp))
4095                                 rc = lnet_peer_send_push(lp);
4096                         else
4097                                 rc = lnet_peer_discovered(lp);
4098                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4099                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4100                                 lp->lp_state, rc);
4101
4102                         if (rc == LNET_REDISCOVER_PEER) {
4103                                 spin_unlock(&lp->lp_lock);
4104                                 lnet_net_lock(LNET_LOCK_EX);
4105                                 list_move(&lp->lp_dc_list,
4106                                           &the_lnet.ln_dc_request);
4107                         } else if (rc ||
4108                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4109                                 spin_unlock(&lp->lp_lock);
4110                                 lnet_net_lock(LNET_LOCK_EX);
4111                                 lnet_peer_discovery_complete(lp, rc);
4112                         } else {
4113                                 spin_unlock(&lp->lp_lock);
4114                                 lnet_net_lock(LNET_LOCK_EX);
4115                         }
4116
4117                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4118                                 break;
4119
4120                 }
4121
4122                 lnet_net_unlock(LNET_LOCK_EX);
4123         }
4124
4125 cleanup:
4126         CDEBUG(D_NET, "stopping\n");
4127         /*
4128          * Clean up before telling lnet_peer_discovery_stop() that
4129          * we're done. Use wake_up() below to somewhat reduce the
4130          * size of the thundering herd if there are multiple threads
4131          * waiting on discovery of a single peer.
4132          */
4133
4134         /* Queue cleanup 1: stop all pending pings and pushes. */
4135         lnet_net_lock(LNET_LOCK_EX);
4136         while (!list_empty(&the_lnet.ln_dc_working)) {
4137                 lp = list_first_entry(&the_lnet.ln_dc_working,
4138                                       struct lnet_peer, lp_dc_list);
4139                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4140                 lnet_net_unlock(LNET_LOCK_EX);
4141                 lnet_peer_cancel_discovery(lp);
4142                 lnet_net_lock(LNET_LOCK_EX);
4143         }
4144         lnet_net_unlock(LNET_LOCK_EX);
4145
4146         /* Queue cleanup 2: wait for the expired queue to clear. */
4147         while (!list_empty(&the_lnet.ln_dc_expired))
4148                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4149
4150         /* Queue cleanup 3: clear the request queue. */
4151         lnet_net_lock(LNET_LOCK_EX);
4152         while (!list_empty(&the_lnet.ln_dc_request)) {
4153                 lp = list_first_entry(&the_lnet.ln_dc_request,
4154                                       struct lnet_peer, lp_dc_list);
4155                 lnet_net_unlock(LNET_LOCK_EX);
4156                 spin_lock(&lp->lp_lock);
4157                 if (lp->lp_state & LNET_PEER_PING_FAILED)
4158                         (void)lnet_peer_ping_failed(lp);
4159                 if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4160                         (void)lnet_peer_push_failed(lp);
4161                 spin_unlock(&lp->lp_lock);
4162                 lnet_net_lock(LNET_LOCK_EX);
4163                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4164         }
4165         lnet_net_unlock(LNET_LOCK_EX);
4166
4167         if (lnet_assert_handler_unused(the_lnet.ln_dc_handler, --retry <= 0))
4168                 goto cleanup;
4169
4170         the_lnet.ln_dc_handler = NULL;
4171
4172         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4173         wake_up(&the_lnet.ln_dc_waitq);
4174
4175         CDEBUG(D_NET, "stopped\n");
4176
4177         return 0;
4178 }
4179
4180 /* ln_api_mutex is held on entry. */
4181 int lnet_peer_discovery_start(void)
4182 {
4183         struct task_struct *task;
4184         int rc = 0;
4185
4186         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4187                 return -EALREADY;
4188
4189         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4190         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4191         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4192         if (IS_ERR(task)) {
4193                 rc = PTR_ERR(task);
4194                 CERROR("Can't start peer discovery thread: %d\n", rc);
4195
4196                 the_lnet.ln_dc_handler = NULL;
4197
4198                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4199         }
4200
4201         CDEBUG(D_NET, "discovery start: %d\n", rc);
4202
4203         return rc;
4204 }
4205
4206 /* ln_api_mutex is held on entry. */
4207 void lnet_peer_discovery_stop(void)
4208 {
4209         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4210                 return;
4211
4212         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4213         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4214
4215         /* In the LNetNIInit() path we may be stopping discovery before it
4216          * entered its work loop
4217          */
4218         if (!completion_done(&the_lnet.ln_started))
4219                 complete(&the_lnet.ln_started);
4220         else
4221                 wake_up(&the_lnet.ln_dc_waitq);
4222
4223         mutex_unlock(&the_lnet.ln_api_mutex);
4224         wait_event(the_lnet.ln_dc_waitq,
4225                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4226         mutex_lock(&the_lnet.ln_api_mutex);
4227
4228         LASSERT(list_empty(&the_lnet.ln_dc_request));
4229         LASSERT(list_empty(&the_lnet.ln_dc_working));
4230         LASSERT(list_empty(&the_lnet.ln_dc_expired));
4231
4232         CDEBUG(D_NET, "discovery stopped\n");
4233 }
4234
4235 /* Debugging */
4236
4237 void
4238 lnet_debug_peer(struct lnet_nid *nid)
4239 {
4240         char                    *aliveness = "NA";
4241         struct lnet_peer_ni     *lp;
4242         int                     cpt;
4243
4244         cpt = lnet_nid2cpt(nid, NULL);
4245         lnet_net_lock(cpt);
4246
4247         lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4248         if (IS_ERR(lp)) {
4249                 lnet_net_unlock(cpt);
4250                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4251                 return;
4252         }
4253
4254         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4255                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4256
4257         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4258                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4259                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4260                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4261                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4262
4263         lnet_peer_ni_decref_locked(lp);
4264
4265         lnet_net_unlock(cpt);
4266 }
4267
4268 /* Gathering information for userspace. */
4269
4270 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4271                           char aliveness[LNET_MAX_STR_LEN],
4272                           __u32 *cpt_iter, __u32 *refcount,
4273                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4274                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4275                           __u32 *peer_tx_qnob)
4276 {
4277         struct lnet_peer_table          *peer_table;
4278         struct lnet_peer_ni             *lp;
4279         int                             j;
4280         int                             lncpt;
4281         bool                            found = false;
4282
4283         /* get the number of CPTs */
4284         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4285
4286         /* if the cpt number to be examined is >= the number of cpts in
4287          * the system then indicate that there are no more cpts to examin
4288          */
4289         if (*cpt_iter >= lncpt)
4290                 return -ENOENT;
4291
4292         /* get the current table */
4293         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4294         /* if the ptable is NULL then there are no more cpts to examine */
4295         if (peer_table == NULL)
4296                 return -ENOENT;
4297
4298         lnet_net_lock(*cpt_iter);
4299
4300         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4301                 struct list_head *peers = &peer_table->pt_hash[j];
4302
4303                 list_for_each_entry(lp, peers, lpni_hashlist) {
4304                         if (!nid_is_nid4(&lp->lpni_nid))
4305                                 continue;
4306                         if (peer_index-- > 0)
4307                                 continue;
4308
4309                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4310                         if (lnet_isrouter(lp) ||
4311                                 lnet_peer_aliveness_enabled(lp))
4312                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4313                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4314
4315                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4316                         *refcount = kref_read(&lp->lpni_kref);
4317                         *ni_peer_tx_credits =
4318                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4319                         *peer_tx_credits = lp->lpni_txcredits;
4320                         *peer_rtr_credits = lp->lpni_rtrcredits;
4321                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4322                         *peer_tx_qnob = lp->lpni_txqnob;
4323
4324                         found = true;
4325                 }
4326
4327         }
4328         lnet_net_unlock(*cpt_iter);
4329
4330         *cpt_iter = lncpt;
4331
4332         return found ? 0 : -ENOENT;
4333 }
4334
4335 /* ln_api_mutex is held, which keeps the peer list stable */
4336 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4337 {
4338         struct lnet_ioctl_element_stats *lpni_stats;
4339         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4340         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4341         struct lnet_peer_ni_credit_info *lpni_info;
4342         struct lnet_peer_ni *lpni;
4343         struct lnet_peer *lp;
4344         lnet_nid_t nid4;
4345         struct lnet_nid nid;
4346         __u32 size;
4347         int rc;
4348
4349         lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4350         lp = lnet_find_peer(&nid);
4351         if (!lp) {
4352                 rc = -ENOENT;
4353                 goto out;
4354         }
4355
4356         size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4357                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4358         size *= lp->lp_nnis;
4359         if (size > cfg->prcfg_size) {
4360                 cfg->prcfg_size = size;
4361                 rc = -E2BIG;
4362                 goto out_lp_decref;
4363         }
4364
4365         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4366         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4367         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4368         cfg->prcfg_count = lp->lp_nnis;
4369         cfg->prcfg_size = size;
4370         cfg->prcfg_state = lp->lp_state;
4371
4372         /* Allocate helper buffers. */
4373         rc = -ENOMEM;
4374         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4375         if (!lpni_info)
4376                 goto out_lp_decref;
4377         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4378         if (!lpni_stats)
4379                 goto out_free_info;
4380         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4381         if (!lpni_msg_stats)
4382                 goto out_free_stats;
4383         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4384         if (!lpni_hstats)
4385                 goto out_free_msg_stats;
4386
4387
4388         lpni = NULL;
4389         rc = -EFAULT;
4390         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4391                 if (!nid_is_nid4(&lpni->lpni_nid))
4392                         continue;
4393                 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4394                 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4395                         goto out_free_hstats;
4396                 bulk += sizeof(nid4);
4397
4398                 memset(lpni_info, 0, sizeof(*lpni_info));
4399                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4400                 if (lnet_isrouter(lpni) ||
4401                         lnet_peer_aliveness_enabled(lpni))
4402                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4403                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4404
4405                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4406                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4407                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4408                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4409                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4410                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4411                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4412                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4413                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4414                         goto out_free_hstats;
4415                 bulk += sizeof(*lpni_info);
4416
4417                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4418                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4419                                                             LNET_STATS_TYPE_SEND);
4420                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4421                                                             LNET_STATS_TYPE_RECV);
4422                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4423                                                             LNET_STATS_TYPE_DROP);
4424                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4425                         goto out_free_hstats;
4426                 bulk += sizeof(*lpni_stats);
4427                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4428                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4429                         goto out_free_hstats;
4430                 bulk += sizeof(*lpni_msg_stats);
4431                 lpni_hstats->hlpni_network_timeout =
4432                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4433                 lpni_hstats->hlpni_remote_dropped =
4434                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4435                 lpni_hstats->hlpni_remote_timeout =
4436                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4437                 lpni_hstats->hlpni_remote_error =
4438                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4439                 lpni_hstats->hlpni_health_value =
4440                   atomic_read(&lpni->lpni_healthv);
4441                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4442                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4443                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4444                         goto out_free_hstats;
4445                 bulk += sizeof(*lpni_hstats);
4446         }
4447         rc = 0;
4448
4449 out_free_hstats:
4450         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4451 out_free_msg_stats:
4452         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4453 out_free_stats:
4454         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4455 out_free_info:
4456         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4457 out_lp_decref:
4458         lnet_peer_decref_locked(lp);
4459 out:
4460         return rc;
4461 }
4462
4463 /* must hold net_lock/0 */
4464 void
4465 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4466                                      struct list_head *recovery_queue,
4467                                      time64_t now)
4468 {
4469         /* the mt could've shutdown and cleaned up the queues */
4470         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4471                 return;
4472
4473         if (!list_empty(&lpni->lpni_recovery))
4474                 return;
4475
4476         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4477                 return;
4478
4479         if (!lpni->lpni_last_alive) {
4480                 CDEBUG(D_NET,
4481                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4482                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4483                        lpni->lpni_last_alive);
4484                 return;
4485         }
4486
4487         if (lnet_recovery_limit &&
4488             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4489                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4490                        libcfs_nidstr(&lpni->lpni_nid),
4491                        lpni->lpni_last_alive);
4492                 /* Reset the ping count so that if this peer NI is added back to
4493                  * the recovery queue we will send the first ping right away.
4494                  */
4495                 lpni->lpni_ping_count = 0;
4496                 return;
4497         }
4498
4499         /* This peer NI is going on the recovery queue, so take a ref on it */
4500         lnet_peer_ni_addref_locked(lpni);
4501
4502         lnet_peer_ni_set_next_ping(lpni, now);
4503
4504         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4505                libcfs_nidstr(&lpni->lpni_nid),
4506                lpni->lpni_ping_count,
4507                lpni->lpni_next_ping,
4508                lpni->lpni_last_alive,
4509                atomic_read(&lpni->lpni_healthv));
4510
4511         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4512 }
4513
4514 /* Call with the ln_api_mutex held */
4515 void
4516 lnet_peer_ni_set_healthv(struct lnet_nid *nid, int value, bool all)
4517 {
4518         struct lnet_peer_table *ptable;
4519         struct lnet_peer *lp;
4520         struct lnet_peer_net *lpn;
4521         struct lnet_peer_ni *lpni;
4522         int lncpt;
4523         int cpt;
4524         time64_t now;
4525
4526         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4527                 return;
4528
4529         now = ktime_get_seconds();
4530
4531         if (!all) {
4532                 lnet_net_lock(LNET_LOCK_EX);
4533                 lpni = lnet_peer_ni_find_locked(nid);
4534                 if (!lpni) {
4535                         lnet_net_unlock(LNET_LOCK_EX);
4536                         return;
4537                 }
4538                 lnet_set_lpni_healthv_locked(lpni, value);
4539                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4540                                              &the_lnet.ln_mt_peerNIRecovq, now);
4541                 lnet_peer_ni_decref_locked(lpni);
4542                 lnet_net_unlock(LNET_LOCK_EX);
4543                 return;
4544         }
4545
4546         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4547
4548         /*
4549          * Walk all the peers and reset the health value for each one to the
4550          * specified value.
4551          */
4552         lnet_net_lock(LNET_LOCK_EX);
4553         for (cpt = 0; cpt < lncpt; cpt++) {
4554                 ptable = the_lnet.ln_peer_tables[cpt];
4555                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4556                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4557                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4558                                                     lpni_peer_nis) {
4559                                         lnet_set_lpni_healthv_locked(lpni,
4560                                                                      value);
4561                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4562                                              &the_lnet.ln_mt_peerNIRecovq, now);
4563                                 }
4564                         }
4565                 }
4566         }
4567         lnet_net_unlock(LNET_LOCK_EX);
4568 }
4569