Whamcloud - gitweb
LU-17744 ldiskfs: mballoc stats fixes
[fs/lustre-release.git] / lnet / lnet / peer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4  * Use is subject to license terms.
5  *
6  * Copyright (c) 2012, 2017, Intel Corporation.
7  */
8
9 /* This file is part of Lustre, http://www.lustre.org/ */
10
11 #define DEBUG_SUBSYSTEM S_LNET
12
13 #include <linux/sched.h>
14 #ifdef HAVE_SCHED_HEADERS
15 #include <linux/sched/signal.h>
16 #endif
17 #include <linux/uaccess.h>
18
19 #include <lnet/udsp.h>
20 #include <lnet/lib-lnet.h>
21 #include <uapi/linux/lnet/lnet-dlc.h>
22
23 /* Value indicating that recovery needs to re-check a peer immediately. */
24 #define LNET_REDISCOVER_PEER    (1)
25
26 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
27 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
28                             unsigned int flags);
29
30 static void
31 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
32 {
33         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
34                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
35                 lnet_peer_ni_decref_locked(lpni);
36         }
37 }
38
39 void
40 lnet_peer_net_added(struct lnet_net *net)
41 {
42         struct lnet_peer_ni *lpni, *tmp;
43
44         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
45                                  lpni_on_remote_peer_ni_list) {
46
47                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
48                         lpni->lpni_net = net;
49
50                         spin_lock(&lpni->lpni_lock);
51                         lpni->lpni_txcredits =
52                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
53                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
54                         lpni->lpni_rtrcredits =
55                                 lnet_peer_buffer_credits(lpni->lpni_net);
56                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
57                         spin_unlock(&lpni->lpni_lock);
58
59                         lnet_peer_remove_from_remote_list(lpni);
60                 }
61         }
62 }
63
64 static void
65 lnet_peer_tables_destroy(void)
66 {
67         struct lnet_peer_table  *ptable;
68         struct list_head        *hash;
69         int                     i;
70         int                     j;
71
72         if (!the_lnet.ln_peer_tables)
73                 return;
74
75         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
76                 hash = ptable->pt_hash;
77                 if (!hash) /* not intialized */
78                         break;
79
80                 LASSERT(list_empty(&ptable->pt_zombie_list));
81
82                 ptable->pt_hash = NULL;
83                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
84                         LASSERT(list_empty(&hash[j]));
85
86                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
87         }
88
89         cfs_percpt_free(the_lnet.ln_peer_tables);
90         the_lnet.ln_peer_tables = NULL;
91 }
92
93 int
94 lnet_peer_tables_create(void)
95 {
96         struct lnet_peer_table  *ptable;
97         struct list_head        *hash;
98         int                     i;
99         int                     j;
100
101         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
102                                                    sizeof(*ptable));
103         if (the_lnet.ln_peer_tables == NULL) {
104                 CERROR("Failed to allocate cpu-partition peer tables\n");
105                 return -ENOMEM;
106         }
107
108         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
109                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
110                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
111                 if (hash == NULL) {
112                         CERROR("Failed to create peer hash table\n");
113                         lnet_peer_tables_destroy();
114                         return -ENOMEM;
115                 }
116
117                 spin_lock_init(&ptable->pt_zombie_lock);
118                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
119
120                 INIT_LIST_HEAD(&ptable->pt_peer_list);
121
122                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
123                         INIT_LIST_HEAD(&hash[j]);
124                 ptable->pt_hash = hash; /* sign of initialization */
125         }
126
127         return 0;
128 }
129
130 static struct lnet_peer_ni *
131 lnet_peer_ni_alloc(struct lnet_nid *nid)
132 {
133         struct lnet_peer_ni *lpni;
134         struct lnet_net *net;
135         int cpt;
136
137         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
138
139         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
140         if (!lpni)
141                 return NULL;
142
143         INIT_LIST_HEAD(&lpni->lpni_txq);
144         INIT_LIST_HEAD(&lpni->lpni_hashlist);
145         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
146         INIT_LIST_HEAD(&lpni->lpni_recovery);
147         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
148         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
149         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
150         kref_init(&lpni->lpni_kref);
151         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
152
153         spin_lock_init(&lpni->lpni_lock);
154
155         if (lnet_peers_start_down())
156                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
157         else
158                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
159         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
160         lpni->lpni_nid = *nid;
161         lpni->lpni_cpt = cpt;
162         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
163
164         net = lnet_get_net_locked(LNET_NID_NET(nid));
165         lpni->lpni_net = net;
166         if (net) {
167                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
168                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
169                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
170                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
171         } else {
172                 /*
173                  * This peer_ni is not on a local network, so we
174                  * cannot add the credits here. In case the net is
175                  * added later, add the peer_ni to the remote peer ni
176                  * list so it can be easily found and revisited.
177                  */
178                 /* FIXME: per-net implementation instead? */
179                 lnet_peer_ni_addref_locked(lpni);
180                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
181                               &the_lnet.ln_remote_peer_ni_list);
182         }
183
184         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
185
186         return lpni;
187 }
188
189 static struct lnet_peer_net *
190 lnet_peer_net_alloc(__u32 net_id)
191 {
192         struct lnet_peer_net *lpn;
193
194         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
195         if (!lpn)
196                 return NULL;
197
198         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
199         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
200         lpn->lpn_net_id = net_id;
201         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
202
203         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
204
205         return lpn;
206 }
207
208 void
209 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
210 {
211         struct lnet_peer *lp;
212
213         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
214
215         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
216         LASSERT(list_empty(&lpn->lpn_peer_nis));
217         LASSERT(list_empty(&lpn->lpn_peer_nets));
218         lp = lpn->lpn_peer;
219         lpn->lpn_peer = NULL;
220         LIBCFS_FREE(lpn, sizeof(*lpn));
221
222         lnet_peer_decref_locked(lp);
223 }
224
225 static struct lnet_peer *
226 lnet_peer_alloc(struct lnet_nid *nid)
227 {
228         struct lnet_peer *lp;
229
230         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
231         if (!lp)
232                 return NULL;
233
234         INIT_LIST_HEAD(&lp->lp_rtrq);
235         INIT_LIST_HEAD(&lp->lp_routes);
236         INIT_LIST_HEAD(&lp->lp_peer_list);
237         INIT_LIST_HEAD(&lp->lp_peer_nets);
238         INIT_LIST_HEAD(&lp->lp_dc_list);
239         INIT_LIST_HEAD(&lp->lp_dc_pendq);
240         INIT_LIST_HEAD(&lp->lp_rtr_list);
241         init_waitqueue_head(&lp->lp_dc_waitq);
242         spin_lock_init(&lp->lp_lock);
243         lp->lp_primary_nid = *nid;
244         lp->lp_disc_src_nid = LNET_ANY_NID;
245         lp->lp_disc_dst_nid = LNET_ANY_NID;
246         if (lnet_peers_start_down())
247                 lp->lp_alive = false;
248         else
249                 lp->lp_alive = true;
250
251         /*
252          * all peers created on a router should have health on
253          * if it's not already on.
254          */
255         if (the_lnet.ln_routing && !lnet_health_sensitivity)
256                 lp->lp_health_sensitivity = 1;
257
258         /*
259          * Turn off discovery for loopback peer. If you're creating a peer
260          * for the loopback interface then that was initiated when we
261          * attempted to send a message over the loopback. There is no need
262          * to ever use a different interface when sending messages to
263          * myself.
264          */
265         if (nid_is_lo0(nid))
266                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
267         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
268
269         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
270
271         return lp;
272 }
273
274 void
275 lnet_destroy_peer_locked(struct lnet_peer *lp)
276 {
277         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
278
279         LASSERT(atomic_read(&lp->lp_refcount) == 0);
280         LASSERT(lp->lp_rtr_refcount == 0);
281         LASSERT(list_empty(&lp->lp_peer_nets));
282         LASSERT(list_empty(&lp->lp_peer_list));
283         LASSERT(list_empty(&lp->lp_dc_list));
284
285         if (lp->lp_data)
286                 lnet_ping_buffer_decref(lp->lp_data);
287
288         /*
289          * if there are messages still on the pending queue, then make
290          * sure to queue them on the ln_msg_resend list so they can be
291          * resent at a later point if the discovery thread is still
292          * running.
293          * If the discovery thread has stopped, then the wakeup will be a
294          * no-op, and it is expected the lnet_shutdown_lndnets() will
295          * eventually be called, which will traverse this list and
296          * finalize the messages on the list.
297          * We can not resend them now because we're holding the cpt lock.
298          * Releasing the lock can cause an inconsistent state
299          */
300         spin_lock(&the_lnet.ln_msg_resend_lock);
301         spin_lock(&lp->lp_lock);
302         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
303         spin_unlock(&lp->lp_lock);
304         spin_unlock(&the_lnet.ln_msg_resend_lock);
305         wake_up(&the_lnet.ln_dc_waitq);
306
307         LIBCFS_FREE(lp, sizeof(*lp));
308 }
309
310 /*
311  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
312  * that peer_net, detach the peer_net from the peer.
313  *
314  * Call with lnet_net_lock/EX held
315  */
316 static void
317 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
318 {
319         struct lnet_peer_table *ptable;
320         struct lnet_peer_net *lpn;
321         struct lnet_peer *lp;
322
323         /*
324          * Belts and suspenders: gracefully handle teardown of a
325          * partially connected peer_ni.
326          */
327         lpn = lpni->lpni_peer_net;
328
329         list_del_init(&lpni->lpni_peer_nis);
330         /*
331          * If there are no lpni's left, we detach lpn from
332          * lp_peer_nets, so it cannot be found anymore.
333          */
334         if (list_empty(&lpn->lpn_peer_nis))
335                 list_del_init(&lpn->lpn_peer_nets);
336
337         /* Update peer NID count. */
338         lp = lpn->lpn_peer;
339         lp->lp_nnis--;
340
341         /*
342          * If there are no more peer nets, make the peer unfindable
343          * via the peer_tables.
344          *
345          * Otherwise, if the peer is DISCOVERED, tell discovery to
346          * take another look at it. This is a no-op if discovery for
347          * this peer did the detaching.
348          */
349         if (list_empty(&lp->lp_peer_nets)) {
350                 list_del_init(&lp->lp_peer_list);
351                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
352                 ptable->pt_peers--;
353         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
354                 /* Discovery isn't running, nothing to do here. */
355         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
356                 lnet_peer_queue_for_discovery(lp);
357                 wake_up(&the_lnet.ln_dc_waitq);
358         }
359         CDEBUG(D_NET, "peer %s NID %s\n",
360                 libcfs_nidstr(&lp->lp_primary_nid),
361                 libcfs_nidstr(&lpni->lpni_nid));
362 }
363
364 /* called with lnet_net_lock LNET_LOCK_EX held */
365 static int
366 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
367 {
368         struct lnet_peer_table *ptable = NULL;
369
370         /* don't remove a peer_ni if it's also a gateway */
371         if (lnet_isrouter(lpni) && !force) {
372                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
373                        libcfs_nidstr(&lpni->lpni_nid));
374                 return -EBUSY;
375         }
376
377         lnet_peer_remove_from_remote_list(lpni);
378
379         /* remove peer ni from the hash list. */
380         list_del_init(&lpni->lpni_hashlist);
381
382         /*
383          * indicate the peer is being deleted so the monitor thread can
384          * remove it from the recovery queue.
385          */
386         spin_lock(&lpni->lpni_lock);
387         lpni->lpni_state |= LNET_PEER_NI_DELETING;
388         spin_unlock(&lpni->lpni_lock);
389
390         /* decrement the ref count on the peer table */
391         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
392
393         /*
394          * The peer_ni can no longer be found with a lookup. But there
395          * can be current users, so keep track of it on the zombie
396          * list until the reference count has gone to zero.
397          *
398          * The last reference may be lost in a place where the
399          * lnet_net_lock locks only a single cpt, and that cpt may not
400          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
401          * has its own lock.
402          */
403         spin_lock(&ptable->pt_zombie_lock);
404         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
405         ptable->pt_zombies++;
406         spin_unlock(&ptable->pt_zombie_lock);
407
408         /* no need to keep this peer_ni on the hierarchy anymore */
409         lnet_peer_detach_peer_ni_locked(lpni);
410
411         /* remove hashlist reference on peer_ni */
412         lnet_peer_ni_decref_locked(lpni);
413
414         return 0;
415 }
416
417 void lnet_peer_uninit(void)
418 {
419         struct lnet_peer_ni *lpni, *tmp;
420
421         lnet_net_lock(LNET_LOCK_EX);
422
423         /* remove all peer_nis from the remote peer and the hash list */
424         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
425                                  lpni_on_remote_peer_ni_list)
426                 lnet_peer_ni_del_locked(lpni, false);
427
428         lnet_peer_tables_destroy();
429
430         lnet_net_unlock(LNET_LOCK_EX);
431 }
432
433 static int
434 lnet_peer_del_locked(struct lnet_peer *peer)
435 {
436         struct lnet_peer_ni *lpni = NULL, *lpni2;
437         int rc = 0, rc2 = 0;
438
439         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
440
441         spin_lock(&peer->lp_lock);
442         peer->lp_state |= LNET_PEER_MARK_DELETED;
443         spin_unlock(&peer->lp_lock);
444
445         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
446         while (lpni != NULL) {
447                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
448                 rc = lnet_peer_ni_del_locked(lpni, false);
449                 if (rc != 0)
450                         rc2 = rc;
451                 lpni = lpni2;
452         }
453
454         return rc2;
455 }
456
457 /*
458  * Discovering this peer is taking too long. Cancel any Ping or Push
459  * that discovery is waiting on by unlinking the relevant MDs. The
460  * lnet_discovery_event_handler() will proceed from here and complete
461  * the cleanup.
462  */
463 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
464 {
465         struct lnet_handle_md ping_mdh;
466         struct lnet_handle_md push_mdh;
467
468         LNetInvalidateMDHandle(&ping_mdh);
469         LNetInvalidateMDHandle(&push_mdh);
470
471         spin_lock(&lp->lp_lock);
472         if (lp->lp_state & LNET_PEER_PING_SENT) {
473                 ping_mdh = lp->lp_ping_mdh;
474                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
475         }
476         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
477                 push_mdh = lp->lp_push_mdh;
478                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
479         }
480         spin_unlock(&lp->lp_lock);
481
482         if (!LNetMDHandleIsInvalid(ping_mdh))
483                 LNetMDUnlink(ping_mdh);
484         if (!LNetMDHandleIsInvalid(push_mdh))
485                 LNetMDUnlink(push_mdh);
486 }
487
488 static int
489 lnet_peer_del(struct lnet_peer *peer)
490 {
491         int rc;
492
493         lnet_peer_cancel_discovery(peer);
494         lnet_net_lock(LNET_LOCK_EX);
495         rc = lnet_peer_del_locked(peer);
496         lnet_net_unlock(LNET_LOCK_EX);
497
498         return rc;
499 }
500
501 /*
502  * Delete a NID from a peer. Call with ln_api_mutex held.
503  *
504  * Error codes:
505  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
506  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
507  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
508  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
509  */
510 static int
511 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
512                   unsigned int flags)
513 {
514         struct lnet_peer_ni *lpni;
515         struct lnet_nid primary_nid = lp->lp_primary_nid;
516         int rc = 0;
517         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
518
519         if (!(flags & LNET_PEER_CONFIGURED)) {
520                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
521                         rc = -EPERM;
522                         goto out;
523                 }
524         }
525
526         /* If we're asked to lock down the primary NID we shouldn't be
527          * deleting it
528          */
529         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
530             nid_same(&primary_nid, nid)) {
531                 rc = -EPERM;
532                 goto out;
533         }
534
535         lpni = lnet_peer_ni_find_locked(nid);
536         if (!lpni) {
537                 rc = -ENOENT;
538                 goto out;
539         }
540         if (lp != lpni->lpni_peer_net->lpn_peer) {
541                 rc = -ECHILD;
542                 lnet_peer_ni_decref_locked(lpni);
543                 goto out;
544         }
545
546         /*
547          * This function only allows deletion of the primary NID if it
548          * is the only NID.
549          */
550         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
551                 rc = -EBUSY;
552                 lnet_peer_ni_decref_locked(lpni);
553                 goto out;
554         }
555
556         lnet_net_lock(LNET_LOCK_EX);
557
558         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
559                 struct lnet_peer_ni *lpni2;
560                 /* assign the next peer_ni to be the primary */
561                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
562                 LASSERT(lpni2);
563                 lp->lp_primary_nid = lpni2->lpni_nid;
564         }
565         rc = lnet_peer_ni_del_locked(lpni, force);
566         lnet_peer_ni_decref_locked(lpni);
567
568         lnet_net_unlock(LNET_LOCK_EX);
569
570 out:
571         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
572                libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
573                flags, rc);
574
575         return rc;
576 }
577
578 static void
579 lnet_peer_table_cleanup_locked(struct lnet_net *net,
580                                struct lnet_peer_table *ptable)
581 {
582         int                      i;
583         struct lnet_peer_ni     *next;
584         struct lnet_peer_ni     *lpni;
585         struct lnet_peer        *peer;
586
587         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
588                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
589                                          lpni_hashlist) {
590                         if (net != NULL && net != lpni->lpni_net)
591                                 continue;
592
593                         peer = lpni->lpni_peer_net->lpn_peer;
594                         if (!nid_same(&peer->lp_primary_nid,
595                                        &lpni->lpni_nid)) {
596                                 lnet_peer_ni_del_locked(lpni, false);
597                                 continue;
598                         }
599                         /*
600                          * Removing the primary NID implies removing
601                          * the entire peer. Advance next beyond any
602                          * peer_ni that belongs to the same peer.
603                          */
604                         list_for_each_entry_from(next, &ptable->pt_hash[i],
605                                                  lpni_hashlist) {
606                                 if (next->lpni_peer_net->lpn_peer != peer)
607                                         break;
608                         }
609                         lnet_peer_del_locked(peer);
610                 }
611         }
612 }
613
614 static void
615 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
616 {
617         wait_var_event_warning(&ptable->pt_zombies,
618                                ptable->pt_zombies == 0,
619                                "Waiting for %d zombies on peer table\n",
620                                ptable->pt_zombies);
621 }
622
623 static void
624 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
625                                 struct lnet_peer_table *ptable)
626 {
627         struct lnet_peer_ni     *lp;
628         struct lnet_peer_ni     *tmp;
629         struct lnet_nid         gw_nid;
630         int                     i;
631
632         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
633                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
634                                          lpni_hashlist) {
635                         if (net != lp->lpni_net)
636                                 continue;
637
638                         if (!lnet_isrouter(lp))
639                                 continue;
640
641                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
642
643                         lnet_net_unlock(LNET_LOCK_EX);
644                         lnet_del_route(LNET_NET_ANY, &gw_nid);
645                         lnet_net_lock(LNET_LOCK_EX);
646                 }
647         }
648 }
649
650 void
651 lnet_peer_tables_cleanup(struct lnet_net *net)
652 {
653         int i;
654         struct lnet_peer_table *ptable;
655
656         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
657         /* If just deleting the peers for a NI, get rid of any routes these
658          * peers are gateways for. */
659         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
660                 lnet_net_lock(LNET_LOCK_EX);
661                 lnet_peer_table_del_rtrs_locked(net, ptable);
662                 lnet_net_unlock(LNET_LOCK_EX);
663         }
664
665         /* Start the cleanup process */
666         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
667                 lnet_net_lock(LNET_LOCK_EX);
668                 lnet_peer_table_cleanup_locked(net, ptable);
669                 lnet_net_unlock(LNET_LOCK_EX);
670         }
671
672         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
673                 lnet_peer_ni_finalize_wait(ptable);
674 }
675
676 static struct lnet_peer_ni *
677 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
678 {
679         struct list_head        *peers;
680         struct lnet_peer_ni     *lp;
681
682         if (the_lnet.ln_state != LNET_STATE_RUNNING)
683                 return NULL;
684
685         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
686         list_for_each_entry(lp, peers, lpni_hashlist) {
687                 if (nid_same(&lp->lpni_nid, nid)) {
688                         lnet_peer_ni_addref_locked(lp);
689                         return lp;
690                 }
691         }
692
693         return NULL;
694 }
695
696 struct lnet_peer_ni *
697 lnet_peer_ni_find_locked(struct lnet_nid *nid)
698 {
699         struct lnet_peer_ni *lpni;
700         struct lnet_peer_table *ptable;
701         int cpt;
702
703         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
704
705         ptable = the_lnet.ln_peer_tables[cpt];
706         lpni = lnet_get_peer_ni_locked(ptable, nid);
707
708         return lpni;
709 }
710
711 struct lnet_peer_ni *
712 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
713 {
714         struct lnet_peer_net *lpn;
715         struct lnet_peer_ni *lpni;
716
717         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
718         if (!lpn)
719                 return NULL;
720
721         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
722                 if (nid_same(&lpni->lpni_nid, nid))
723                         return lpni;
724         }
725
726         return NULL;
727 }
728
729 struct lnet_peer *
730 lnet_find_peer(struct lnet_nid *nid)
731 {
732         struct lnet_peer_ni *lpni;
733         struct lnet_peer *lp = NULL;
734         int cpt;
735
736         cpt = lnet_net_lock_current();
737         lpni = lnet_peer_ni_find_locked(nid);
738         if (lpni) {
739                 lp = lpni->lpni_peer_net->lpn_peer;
740                 lnet_peer_addref_locked(lp);
741                 lnet_peer_ni_decref_locked(lpni);
742         }
743         lnet_net_unlock(cpt);
744
745         return lp;
746 }
747
748 struct lnet_peer_net *
749 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
750 {
751         struct lnet_peer_net *net;
752
753         if (!prev_lpn_id) {
754                 /* no net id provided return the first net */
755                 net = list_first_entry_or_null(&lp->lp_peer_nets,
756                                                struct lnet_peer_net,
757                                                lpn_peer_nets);
758
759                 return net;
760         }
761
762         /* find the net after the one provided */
763         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
764                 if (net->lpn_net_id == prev_lpn_id) {
765                         /*
766                          * if we reached the end of the list loop to the
767                          * beginning.
768                          */
769                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
770                                 return list_first_entry_or_null(&lp->lp_peer_nets,
771                                                                 struct lnet_peer_net,
772                                                                 lpn_peer_nets);
773                         else
774                                 return list_next_entry(net, lpn_peer_nets);
775                 }
776         }
777
778         return NULL;
779 }
780
781 struct lnet_peer_ni *
782 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
783                              struct lnet_peer_net *peer_net,
784                              struct lnet_peer_ni *prev)
785 {
786         struct lnet_peer_ni *lpni;
787         struct lnet_peer_net *net = peer_net;
788
789         if (!prev) {
790                 if (!net) {
791                         if (list_empty(&peer->lp_peer_nets))
792                                 return NULL;
793
794                         net = list_first_entry(&peer->lp_peer_nets,
795                                                struct lnet_peer_net,
796                                                lpn_peer_nets);
797                 }
798                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
799                                         lpni_peer_nis);
800
801                 return lpni;
802         }
803
804         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
805                 /*
806                  * if you reached the end of the peer ni list and the peer
807                  * net is specified then there are no more peer nis in that
808                  * net.
809                  */
810                 if (net)
811                         return NULL;
812
813                 /*
814                  * we reached the end of this net ni list. move to the
815                  * next net
816                  */
817                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
818                     &peer->lp_peer_nets)
819                         /* no more nets and no more NIs. */
820                         return NULL;
821
822                 /* get the next net */
823                 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
824                                        struct lnet_peer_net,
825                                        lpn_peer_nets);
826                 /* get the ni on it */
827                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
828                                         lpni_peer_nis);
829
830                 return lpni;
831         }
832
833         /* there are more nis left */
834         lpni = list_first_entry(&prev->lpni_peer_nis,
835                                 struct lnet_peer_ni, lpni_peer_nis);
836
837         return lpni;
838 }
839
840 /* Call with the ln_api_mutex held */
841 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
842 {
843         struct lnet_process_id id;
844         struct lnet_peer_table *ptable;
845         struct lnet_peer *lp;
846         __u32 count = 0;
847         __u32 size = 0;
848         int lncpt;
849         int cpt;
850         __u32 i;
851         int rc;
852
853         rc = -ESHUTDOWN;
854         if (the_lnet.ln_state != LNET_STATE_RUNNING)
855                 goto done;
856
857         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
858
859         /*
860          * Count the number of peers, and return E2BIG if the buffer
861          * is too small. We'll also return the desired size.
862          */
863         rc = -E2BIG;
864         for (cpt = 0; cpt < lncpt; cpt++) {
865                 ptable = the_lnet.ln_peer_tables[cpt];
866                 count += ptable->pt_peers;
867         }
868         size = count * sizeof(*ids);
869         if (size > *sizep)
870                 goto done;
871
872         /*
873          * Walk the peer lists and copy out the primary nids.
874          * This is safe because the peer lists are only modified
875          * while the ln_api_mutex is held. So we don't need to
876          * hold the lnet_net_lock as well, and can therefore
877          * directly call copy_to_user().
878          */
879         rc = -EFAULT;
880         memset(&id, 0, sizeof(id));
881         id.pid = LNET_PID_LUSTRE;
882         i = 0;
883         for (cpt = 0; cpt < lncpt; cpt++) {
884                 ptable = the_lnet.ln_peer_tables[cpt];
885                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
886                         if (!nid_is_nid4(&lp->lp_primary_nid))
887                                 continue;
888                         if (i >= count)
889                                 goto done;
890                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
891                         if (copy_to_user(&ids[i], &id, sizeof(id)))
892                                 goto done;
893                         i++;
894                 }
895         }
896         rc = 0;
897 done:
898         *countp = count;
899         *sizep = size;
900         return rc;
901 }
902
903 /*
904  * Start pushes to peers that need to be updated for a configuration
905  * change on this node.
906  */
907 void
908 lnet_push_update_to_peers(int force)
909 {
910         struct lnet_peer_table *ptable;
911         struct lnet_peer *lp;
912         int lncpt;
913         int cpt;
914
915         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
916                 return;
917
918         lnet_net_lock(LNET_LOCK_EX);
919         if (lnet_peer_discovery_disabled)
920                 force = 0;
921         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
922         for (cpt = 0; cpt < lncpt; cpt++) {
923                 ptable = the_lnet.ln_peer_tables[cpt];
924                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
925                         if (force) {
926                                 spin_lock(&lp->lp_lock);
927                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
928                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
929                                 spin_unlock(&lp->lp_lock);
930                         }
931                         if (lnet_peer_needs_push(lp))
932                                 lnet_peer_queue_for_discovery(lp);
933                 }
934         }
935         lnet_net_unlock(LNET_LOCK_EX);
936         wake_up(&the_lnet.ln_dc_waitq);
937 }
938
939 /* find the NID in the preferred gateways for the remote peer
940  * return:
941  *      false: list is not empty and NID is not preferred
942  *      false: list is empty
943  *      true: nid is found in the list
944  */
945 bool
946 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
947                              struct lnet_nid *gw_nid)
948 {
949         struct lnet_nid_list *ne;
950
951         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
952                libcfs_nidstr(&lpni->lpni_nid),
953                list_empty(&lpni->lpni_rtr_pref_nids));
954
955         if (list_empty(&lpni->lpni_rtr_pref_nids))
956                 return false;
957
958         /* iterate through all the preferred NIDs and see if any of them
959          * matches the provided gw_nid
960          */
961         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
962                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
963                        libcfs_nidstr(&ne->nl_nid),
964                        libcfs_nidstr(gw_nid));
965                 if (nid_same(&ne->nl_nid, gw_nid))
966                         return true;
967         }
968
969         return false;
970 }
971
972 void
973 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
974 {
975         struct list_head zombies;
976         struct lnet_nid_list *ne;
977         struct lnet_nid_list *tmp;
978         int cpt = lpni->lpni_cpt;
979
980         INIT_LIST_HEAD(&zombies);
981
982         lnet_net_lock(cpt);
983         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
984         lnet_net_unlock(cpt);
985
986         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
987                 list_del(&ne->nl_list);
988                 LIBCFS_FREE(ne, sizeof(*ne));
989         }
990 }
991
992 int
993 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
994                        struct lnet_nid *gw_nid)
995 {
996         int cpt = lpni->lpni_cpt;
997         struct lnet_nid_list *ne = NULL;
998
999         /* This function is called with api_mutex held. When the api_mutex
1000          * is held the list can not be modified, as it is only modified as
1001          * a result of applying a UDSP and that happens under api_mutex
1002          * lock.
1003          */
1004         __must_hold(&the_lnet.ln_api_mutex);
1005
1006         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1007                 if (nid_same(&ne->nl_nid, gw_nid))
1008                         return -EEXIST;
1009         }
1010
1011         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1012         if (!ne)
1013                 return -ENOMEM;
1014
1015         ne->nl_nid = *gw_nid;
1016
1017         /* Lock the cpt to protect against addition and checks in the
1018          * selection algorithm
1019          */
1020         lnet_net_lock(cpt);
1021         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1022         lnet_net_unlock(cpt);
1023
1024         return 0;
1025 }
1026
1027 /*
1028  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1029  * this is a preferred point-to-point path. Call with lnet_net_lock in
1030  * shared mmode.
1031  */
1032 bool
1033 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1034 {
1035         struct lnet_nid_list *ne;
1036
1037         if (lpni->lpni_pref_nnids == 0)
1038                 return false;
1039         if (lpni->lpni_pref_nnids == 1)
1040                 return nid_same(&lpni->lpni_pref.nid, nid);
1041         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1042                 if (nid_same(&ne->nl_nid, nid))
1043                         return true;
1044         }
1045         return false;
1046 }
1047
1048 /*
1049  * Set a single ni as preferred, provided no preferred ni is already
1050  * defined. Only to be used for non-multi-rail peer_ni.
1051  */
1052 int
1053 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1054                                   struct lnet_nid *nid)
1055 {
1056         int rc = 0;
1057
1058         if (!nid)
1059                 return -EINVAL;
1060         spin_lock(&lpni->lpni_lock);
1061         if (LNET_NID_IS_ANY(nid)) {
1062                 rc = -EINVAL;
1063         } else if (lpni->lpni_pref_nnids > 0) {
1064                 rc = -EPERM;
1065         } else if (lpni->lpni_pref_nnids == 0) {
1066                 lpni->lpni_pref.nid = *nid;
1067                 lpni->lpni_pref_nnids = 1;
1068                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1069         }
1070         spin_unlock(&lpni->lpni_lock);
1071
1072         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1073                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1074         return rc;
1075 }
1076
1077 /*
1078  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1079  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1080  */
1081 static int
1082 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1083 {
1084         int rc = 0;
1085
1086         spin_lock(&lpni->lpni_lock);
1087         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1088                 lpni->lpni_pref_nnids = 0;
1089                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1090         } else if (lpni->lpni_pref_nnids == 0) {
1091                 rc = -ENOENT;
1092         } else {
1093                 rc = -EPERM;
1094         }
1095         spin_unlock(&lpni->lpni_lock);
1096
1097         CDEBUG(D_NET, "peer %s: %d\n",
1098                libcfs_nidstr(&lpni->lpni_nid), rc);
1099         return rc;
1100 }
1101
1102 void
1103 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1104 {
1105         lpni->lpni_sel_priority = priority;
1106 }
1107
1108 /*
1109  * Clear the preferred NIDs from a non-multi-rail peer.
1110  */
1111 static void
1112 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1113 {
1114         struct lnet_peer_ni *lpni = NULL;
1115
1116         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1117                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1118 }
1119
1120 int
1121 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1122 {
1123         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1124         struct lnet_nid_list *ne1 = NULL;
1125         struct lnet_nid_list *ne2 = NULL;
1126         struct lnet_nid *tmp_nid = NULL;
1127         int rc = 0;
1128
1129         if (LNET_NID_IS_ANY(nid)) {
1130                 rc = -EINVAL;
1131                 goto out;
1132         }
1133
1134         if (lpni->lpni_pref_nnids == 1 &&
1135             nid_same(&lpni->lpni_pref.nid, nid)) {
1136                 rc = -EEXIST;
1137                 goto out;
1138         }
1139
1140         /* A non-MR node may have only one preferred NI per peer_ni */
1141         if (lpni->lpni_pref_nnids > 0 &&
1142             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1143                 rc = -EPERM;
1144                 goto out;
1145         }
1146
1147         /* add the new preferred nid to the list of preferred nids */
1148         if (lpni->lpni_pref_nnids != 0) {
1149                 size_t alloc_size = sizeof(*ne1);
1150
1151                 if (lpni->lpni_pref_nnids == 1) {
1152                         tmp_nid = &lpni->lpni_pref.nid;
1153                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1154                 }
1155
1156                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1157                         if (nid_same(&ne1->nl_nid, nid)) {
1158                                 rc = -EEXIST;
1159                                 goto out;
1160                         }
1161                 }
1162
1163                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1164                                  alloc_size);
1165                 if (!ne1) {
1166                         rc = -ENOMEM;
1167                         goto out;
1168                 }
1169
1170                 /* move the originally stored nid to the list */
1171                 if (lpni->lpni_pref_nnids == 1) {
1172                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1173                                 lpni->lpni_cpt, alloc_size);
1174                         if (!ne2) {
1175                                 rc = -ENOMEM;
1176                                 goto out;
1177                         }
1178                         INIT_LIST_HEAD(&ne2->nl_list);
1179                         ne2->nl_nid = *tmp_nid;
1180                 }
1181                 ne1->nl_nid = *nid;
1182         }
1183
1184         lnet_net_lock(LNET_LOCK_EX);
1185         spin_lock(&lpni->lpni_lock);
1186         if (lpni->lpni_pref_nnids == 0) {
1187                 lpni->lpni_pref.nid = *nid;
1188         } else {
1189                 if (ne2)
1190                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1191                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1192         }
1193         lpni->lpni_pref_nnids++;
1194         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1195         spin_unlock(&lpni->lpni_lock);
1196         lnet_net_unlock(LNET_LOCK_EX);
1197
1198 out:
1199         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1200                 spin_lock(&lpni->lpni_lock);
1201                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1202                 spin_unlock(&lpni->lpni_lock);
1203         }
1204         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1205                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1206         return rc;
1207 }
1208
1209 int
1210 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1211 {
1212         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1213         struct lnet_nid_list *ne = NULL;
1214         int rc = 0;
1215
1216         if (lpni->lpni_pref_nnids == 0) {
1217                 rc = -ENOENT;
1218                 goto out;
1219         }
1220
1221         if (lpni->lpni_pref_nnids == 1) {
1222                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1223                         rc = -ENOENT;
1224                         goto out;
1225                 }
1226         } else {
1227                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1228                         if (nid_same(&ne->nl_nid, nid))
1229                                 goto remove_nid_entry;
1230                 }
1231                 rc = -ENOENT;
1232                 ne = NULL;
1233                 goto out;
1234         }
1235
1236 remove_nid_entry:
1237         lnet_net_lock(LNET_LOCK_EX);
1238         spin_lock(&lpni->lpni_lock);
1239         if (lpni->lpni_pref_nnids == 1)
1240                 lpni->lpni_pref.nid = LNET_ANY_NID;
1241         else {
1242                 list_del_init(&ne->nl_list);
1243                 if (lpni->lpni_pref_nnids == 2) {
1244                         struct lnet_nid_list *ne, *tmp;
1245
1246                         list_for_each_entry_safe(ne, tmp,
1247                                                  &lpni->lpni_pref.nids,
1248                                                  nl_list) {
1249                                 lpni->lpni_pref.nid = ne->nl_nid;
1250                                 list_del_init(&ne->nl_list);
1251                                 LIBCFS_FREE(ne, sizeof(*ne));
1252                         }
1253                 }
1254         }
1255         lpni->lpni_pref_nnids--;
1256         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1257         spin_unlock(&lpni->lpni_lock);
1258         lnet_net_unlock(LNET_LOCK_EX);
1259
1260         if (ne)
1261                 LIBCFS_FREE(ne, sizeof(*ne));
1262 out:
1263         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1264                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1265         return rc;
1266 }
1267
1268 void
1269 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1270 {
1271         struct list_head zombies;
1272         struct lnet_nid_list *ne;
1273         struct lnet_nid_list *tmp;
1274
1275         INIT_LIST_HEAD(&zombies);
1276
1277         lnet_net_lock(LNET_LOCK_EX);
1278         if (lpni->lpni_pref_nnids == 1)
1279                 lpni->lpni_pref.nid = LNET_ANY_NID;
1280         else if (lpni->lpni_pref_nnids > 1)
1281                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1282         lpni->lpni_pref_nnids = 0;
1283         lnet_net_unlock(LNET_LOCK_EX);
1284
1285         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1286                 list_del_init(&ne->nl_list);
1287                 LIBCFS_FREE(ne, sizeof(*ne));
1288         }
1289 }
1290
1291 void
1292 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1293 {
1294         struct lnet_peer_ni *lpni;
1295
1296         *result = *nid;
1297         lpni = lnet_peer_ni_find_locked(nid);
1298         if (lpni) {
1299                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1300                 lnet_peer_ni_decref_locked(lpni);
1301         }
1302 }
1303
1304 bool
1305 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1306 __must_hold(&lp->lp_lock)
1307 {
1308         if (lnet_peer_discovery_disabled)
1309                 return true;
1310
1311         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1312             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1313                 return true;
1314         }
1315
1316         return false;
1317 }
1318
1319 /*
1320  * Peer Discovery
1321  */
1322 bool
1323 lnet_is_discovery_disabled(struct lnet_peer *lp)
1324 {
1325         bool rc = false;
1326
1327         spin_lock(&lp->lp_lock);
1328         rc = lnet_is_discovery_disabled_locked(lp);
1329         spin_unlock(&lp->lp_lock);
1330
1331         return rc;
1332 }
1333
1334 int
1335 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1336 {
1337         struct lnet_nid pnid = LNET_ANY_NID;
1338         bool mr;
1339         int i, rc;
1340         int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1341
1342         if (!nids || num_nids < 1)
1343                 return -EINVAL;
1344
1345         rc = LNetNIInit(LNET_PID_ANY);
1346         if (rc < 0)
1347                 return rc;
1348
1349         mutex_lock(&the_lnet.ln_api_mutex);
1350
1351         mr = lnet_peer_discovery_disabled == 0;
1352
1353         rc = 0;
1354         for (i = 0; i < num_nids; i++) {
1355                 if (nid_is_lo0(&nids[i]))
1356                         continue;
1357
1358                 if (LNET_NID_IS_ANY(&pnid)) {
1359                         pnid = nids[i];
1360                         rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1361                         if (rc == -EALREADY) {
1362                                 struct lnet_peer *lp;
1363
1364                                 CDEBUG(D_NET, "A peer exists for NID %s\n",
1365                                        libcfs_nidstr(&pnid));
1366                                 rc = 0;
1367                                 /* Adds a refcount */
1368                                 lp = lnet_find_peer(&pnid);
1369                                 LASSERT(lp);
1370                                 pnid = lp->lp_primary_nid;
1371                                 /* Drop refcount from lookup */
1372                                 lnet_peer_decref_locked(lp);
1373                         }
1374                 } else if (lnet_peer_discovery_disabled) {
1375                         rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1376                                               flags);
1377                 } else {
1378                         rc = lnet_add_peer_ni(&pnid, &nids[i], mr,
1379                                               flags);
1380                 }
1381
1382                 if (rc && rc != -EEXIST)
1383                         goto unlock;
1384         }
1385
1386 unlock:
1387         mutex_unlock(&the_lnet.ln_api_mutex);
1388
1389         LNetNIFini();
1390
1391         return rc == -EEXIST ? 0 : rc;
1392 }
1393 EXPORT_SYMBOL(LNetAddPeer);
1394
1395 void LNetPrimaryNID(struct lnet_nid *nid)
1396 {
1397         struct lnet_peer *lp;
1398         struct lnet_peer_ni *lpni;
1399         struct lnet_nid orig;
1400         int rc = 0;
1401         int cpt;
1402
1403         if (!nid || nid_is_lo0(nid))
1404                 return;
1405         orig = *nid;
1406
1407         cpt = lnet_net_lock_current();
1408         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1409         if (IS_ERR(lpni)) {
1410                 rc = PTR_ERR(lpni);
1411                 goto out_unlock;
1412         }
1413         lp = lpni->lpni_peer_net->lpn_peer;
1414
1415         /* If discovery is disabled locally then we needn't bother running
1416          * discovery here because discovery will not modify whatever
1417          * primary NID is currently set for this peer. If the specified peer is
1418          * down then this discovery can introduce long delays into the mount
1419          * process, so skip it if it isn't necessary.
1420          */
1421 again:
1422         spin_lock(&lp->lp_lock);
1423         /* DD disabled, nothing to do */
1424         if (lnet_peer_discovery_disabled) {
1425                 *nid = lp->lp_primary_nid;
1426                 spin_unlock(&lp->lp_lock);
1427                 goto out_decref;
1428         }
1429
1430         /* Peer already up to date, nothing to do */
1431         if (lnet_peer_is_uptodate_locked(lp)) {
1432                 *nid = lp->lp_primary_nid;
1433                 spin_unlock(&lp->lp_lock);
1434                 goto out_decref;
1435         }
1436         spin_unlock(&lp->lp_lock);
1437
1438         /* If primary nid locking is enabled, discovery is performed
1439          * in the background.
1440          * If primary nid locking is disabled, discovery blocks here.
1441          * Messages to the peer will not go through until the discovery is
1442          * complete.
1443          */
1444         if (lock_prim_nid && lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1445                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1446         else
1447                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1448         if (rc)
1449                 goto out_decref;
1450
1451         /* The lpni (or lp) for this NID may have changed and our ref is
1452          * the only thing keeping the old one around. Release the ref
1453          * and lookup the lpni again
1454          */
1455         lnet_peer_ni_decref_locked(lpni);
1456         lpni = lnet_peer_ni_find_locked(nid);
1457         if (!lpni) {
1458                 rc = -ENOENT;
1459                 goto out_unlock;
1460         }
1461         lp = lpni->lpni_peer_net->lpn_peer;
1462
1463         if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1464                 goto again;
1465         *nid = lp->lp_primary_nid;
1466 out_decref:
1467         lnet_peer_ni_decref_locked(lpni);
1468 out_unlock:
1469         lnet_net_unlock(cpt);
1470
1471         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1472                libcfs_nidstr(nid), rc);
1473 }
1474 EXPORT_SYMBOL(LNetPrimaryNID);
1475
1476 bool
1477 LNetPeerDiscovered(struct lnet_nid *nid)
1478 {
1479         int cpt, disc = false;
1480         struct lnet_peer *lp;
1481
1482         lp = lnet_find_peer(nid);
1483         if (!lp)
1484                 goto out;
1485
1486         cpt = lnet_net_lock_current();
1487         spin_lock(&lp->lp_lock);
1488         if (((lp->lp_state & LNET_PEER_DISCOVERED) &&
1489             (lp->lp_state & LNET_PEER_NIDS_UPTODATE)) ||
1490             (lp->lp_state & LNET_PEER_NO_DISCOVERY))
1491                 disc = true;
1492         spin_unlock(&lp->lp_lock);
1493
1494         /* Drop refcount from lookup */
1495         lnet_peer_decref_locked(lp);
1496         lnet_net_unlock(cpt);
1497 out:
1498         CDEBUG(D_NET, "Peer NID %s discovered: %d\n", libcfs_nidstr(nid),
1499                disc);
1500         return disc;
1501 }
1502 EXPORT_SYMBOL(LNetPeerDiscovered);
1503
1504 struct lnet_peer_net *
1505 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1506 {
1507         struct lnet_peer_net *peer_net;
1508         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1509                 if (peer_net->lpn_net_id == net_id)
1510                         return peer_net;
1511         }
1512         return NULL;
1513 }
1514
1515 /*
1516  * Attach a peer_ni to a peer_net and peer. This function assumes
1517  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1518  * may be attached to a different peer, in which case it will be
1519  * properly detached first. The whole operation is done atomically.
1520  *
1521  * This function consumes the reference on lpni and Always returns 0.
1522  * This is the last function called from functions that do return an
1523  * int, so returning 0 here allows the compiler to do a tail call.
1524  */
1525 static int
1526 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1527                          struct lnet_peer_net *lpn,
1528                          struct lnet_peer_ni *lpni,
1529                          unsigned flags)
1530 {
1531         struct lnet_peer_table *ptable;
1532         bool new_lpn = false;
1533         int rc;
1534
1535         /* Install the new peer_ni */
1536         lnet_net_lock(LNET_LOCK_EX);
1537         /* Add peer_ni to global peer table hash, if necessary. */
1538         if (list_empty(&lpni->lpni_hashlist)) {
1539                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1540
1541                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1542                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1543                 ptable->pt_version++;
1544                 lnet_peer_ni_addref_locked(lpni);
1545         }
1546
1547         /* Detach the peer_ni from an existing peer, if necessary. */
1548         if (lpni->lpni_peer_net) {
1549                 LASSERT(lpni->lpni_peer_net != lpn);
1550                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1551                 lnet_peer_detach_peer_ni_locked(lpni);
1552                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1553                 lpni->lpni_peer_net = NULL;
1554         }
1555
1556         /* Add peer_ni to peer_net */
1557         lpni->lpni_peer_net = lpn;
1558         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1559                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1560         else
1561                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1562         lnet_update_peer_net_healthv(lpni);
1563         lnet_peer_net_addref_locked(lpn);
1564
1565         /* Add peer_net to peer */
1566         if (!lpn->lpn_peer) {
1567                 new_lpn = true;
1568                 lpn->lpn_peer = lp;
1569                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1570                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1571                 else
1572                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1573                 lnet_peer_addref_locked(lp);
1574         }
1575
1576         /* Add peer to global peer list, if necessary */
1577         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1578         if (list_empty(&lp->lp_peer_list)) {
1579                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1580                 ptable->pt_peers++;
1581         }
1582
1583         /* Update peer state */
1584         spin_lock(&lp->lp_lock);
1585         if (flags & LNET_PEER_CONFIGURED) {
1586                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1587                         lp->lp_state |= LNET_PEER_CONFIGURED;
1588         }
1589         if (flags & LNET_PEER_MULTI_RAIL) {
1590                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1591                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1592                         lnet_peer_clr_non_mr_pref_nids(lp);
1593                 }
1594         }
1595         if (flags & LNET_PEER_LOCK_PRIMARY) {
1596                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1597                 lp->lp_prim_lock_ts = ktime_get_ns();
1598         }
1599         spin_unlock(&lp->lp_lock);
1600
1601         lp->lp_nnis++;
1602
1603         /* apply UDSPs */
1604         if (new_lpn) {
1605                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1606                 if (rc)
1607                         CERROR("Failed to apply UDSPs on lpn %s\n",
1608                                libcfs_net2str(lpn->lpn_net_id));
1609         }
1610         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1611         if (rc)
1612                 CERROR("Failed to apply UDSPs on lpni %s\n",
1613                        libcfs_nidstr(&lpni->lpni_nid));
1614
1615         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1616                libcfs_nidstr(&lp->lp_primary_nid),
1617                libcfs_nidstr(&lpni->lpni_nid), flags);
1618         lnet_peer_ni_decref_locked(lpni);
1619         lnet_net_unlock(LNET_LOCK_EX);
1620
1621         return 0;
1622 }
1623
1624 /*
1625  * Create a new peer, with nid as its primary nid.
1626  *
1627  * Call with the lnet_api_mutex held.
1628  */
1629 static int
1630 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1631 {
1632         struct lnet_peer *lp;
1633         struct lnet_peer_net *lpn;
1634         struct lnet_peer_ni *lpni;
1635         int rc = 0;
1636
1637         LASSERT(nid);
1638
1639         /*
1640          * No need for the lnet_net_lock here, because the
1641          * lnet_api_mutex is held.
1642          */
1643         lpni = lnet_peer_ni_find_locked(nid);
1644         if (lpni) {
1645                 /* A peer with this NID already exists. */
1646                 lp = lpni->lpni_peer_net->lpn_peer;
1647                 lnet_peer_ni_decref_locked(lpni);
1648                 /*
1649                  * This is an error if the peer was configured and the
1650                  * primary NID differs or an attempt is made to change
1651                  * the Multi-Rail flag. Otherwise the assumption is
1652                  * that an existing peer is being modified.
1653                  */
1654                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1655                         if (!nid_same(&lp->lp_primary_nid, nid))
1656                                 rc = -EEXIST;
1657                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1658                                 rc = -EPERM;
1659                         goto out;
1660                 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1661                         if (nid_same(&lp->lp_primary_nid, nid))
1662                                 rc = -EEXIST;
1663                         /* we're trying to recreate an existing peer which
1664                          * has already been created and its primary
1665                          * locked. This is likely due to two servers
1666                          * existing on the same node. So we'll just refer
1667                          * to that node with the primary NID which was
1668                          * first added by Lustre
1669                          */
1670                         else
1671                                 rc = -EALREADY;
1672                         goto out;
1673                 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1674                         /* if not recreating peer as configured and
1675                          * not locking primary nid, no need to
1676                          * do anything if primary nid is not being changed
1677                          */
1678                         if (nid_same(&lp->lp_primary_nid, nid)) {
1679                                 rc = -EEXIST;
1680                                 goto out;
1681                         }
1682                 }
1683                 /* Delete and recreate the peer.
1684                  * We can get here:
1685                  * 1. If the peer is being recreated as a configured NID
1686                  * 2. if there already exists a peer which
1687                  *    was discovered manually, but is recreated via Lustre
1688                  *    with PRIMARY_lock
1689                  */
1690                 rc = lnet_peer_del(lp);
1691                 if (rc)
1692                         goto out;
1693         }
1694
1695         /* Create peer, peer_net, and peer_ni. */
1696         rc = -ENOMEM;
1697         lp = lnet_peer_alloc(nid);
1698         if (!lp)
1699                 goto out;
1700         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1701         if (!lpn)
1702                 goto out_free_lp;
1703         lpni = lnet_peer_ni_alloc(nid);
1704         if (!lpni)
1705                 goto out_free_lpn;
1706
1707         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1708
1709 out_free_lpn:
1710         LIBCFS_FREE(lpn, sizeof(*lpn));
1711 out_free_lp:
1712         LIBCFS_FREE(lp, sizeof(*lp));
1713 out:
1714         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1715                libcfs_nidstr(nid), flags, rc);
1716         return rc;
1717 }
1718
1719 /*
1720  * Add a NID to a peer. Call with ln_api_mutex held.
1721  *
1722  * Error codes:
1723  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1724  *  -EEXIST:   The NID was configured by DLC for a different peer.
1725  *  -ENOMEM:   Out of memory.
1726  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1727  *             non-multi-rail peer.
1728  */
1729 static int
1730 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1731                   unsigned int flags)
1732 {
1733         struct lnet_peer_net *lpn;
1734         struct lnet_peer_ni *lpni;
1735         int rc = 0;
1736
1737         LASSERT(lp);
1738         LASSERT(nid);
1739
1740         /* A configured peer can only be updated through configuration. */
1741         if (!(flags & LNET_PEER_CONFIGURED)) {
1742                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1743                         rc = -EPERM;
1744                         goto out;
1745                 }
1746         }
1747
1748         /*
1749          * The MULTI_RAIL flag can be set but not cleared, because
1750          * that would leave the peer struct in an invalid state.
1751          */
1752         if (flags & LNET_PEER_MULTI_RAIL) {
1753                 spin_lock(&lp->lp_lock);
1754                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1755                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1756                         lnet_peer_clr_non_mr_pref_nids(lp);
1757                 }
1758                 spin_unlock(&lp->lp_lock);
1759         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1760                 rc = -EPERM;
1761                 goto out;
1762         }
1763
1764         lpni = lnet_peer_ni_find_locked(nid);
1765         if (lpni) {
1766                 /*
1767                  * A peer_ni already exists. This is only a problem if
1768                  * it is not connected to this peer and was configured
1769                  * by DLC.
1770                  */
1771                 if (lpni->lpni_peer_net->lpn_peer == lp)
1772                         goto out_free_lpni;
1773                 if (lnet_peer_ni_is_configured(lpni)) {
1774                         rc = -EEXIST;
1775                         goto out_free_lpni;
1776                 }
1777                 /* If this is the primary NID, destroy the peer. */
1778                 if (lnet_peer_ni_is_primary(lpni)) {
1779                         struct lnet_peer *lp2 =
1780                                 lpni->lpni_peer_net->lpn_peer;
1781                         int rtr_refcount = lp2->lp_rtr_refcount;
1782                         unsigned int peer2_state;
1783                         __u64 peer2_prim_lock_ts;
1784
1785                         /* If there's another peer that this NID belongs to
1786                          * and the primary NID for that peer is locked,
1787                          * then, unless it is the only NID, we don't want
1788                          * to mess with it.
1789                          * But the configuration is wrong at this point,
1790                          * so we should flag both of these peers as in a bad
1791                          * state
1792                          */
1793                         spin_lock(&lp2->lp_lock);
1794                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1795                             lp2->lp_nnis > 1) {
1796                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1797                                 spin_unlock(&lp2->lp_lock);
1798                                 spin_lock(&lp->lp_lock);
1799                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1800                                 spin_unlock(&lp->lp_lock);
1801                                 CERROR("Peer %s NID %s is already locked with peer %s\n",
1802                                         libcfs_nidstr(&lp->lp_primary_nid),
1803                                         libcfs_nidstr(nid),
1804                                         libcfs_nidstr(&lp2->lp_primary_nid));
1805                                 goto out_free_lpni;
1806                         }
1807                         peer2_state = lp2->lp_state;
1808                         peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1809                         spin_unlock(&lp2->lp_lock);
1810
1811                         /* NID which got locked the earliest should be
1812                          * kept as primary. In case if the peers were
1813                          * created by Lustre, this allows the
1814                          * first listed NID to stay primary as intended
1815                          * for the purpose of communicating with Lustre
1816                          * even if peer discovery succeeded using
1817                          * a different NID of MR peer.
1818                          */
1819                         spin_lock(&lp->lp_lock);
1820                         if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1821                             ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1822                             peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1823                              !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1824                                 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1825                                 lp->lp_primary_nid = *nid;
1826                                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1827                         }
1828                         spin_unlock(&lp->lp_lock);
1829                         /*
1830                          * if we're trying to delete a router it means
1831                          * we're moving this peer NI to a new peer so must
1832                          * transfer router properties to the new peer
1833                          */
1834                         if (rtr_refcount > 0) {
1835                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1836                                 lnet_rtr_transfer_to_peer(lp2, lp);
1837                         }
1838                         lnet_peer_del(lp2);
1839                         lnet_peer_ni_decref_locked(lpni);
1840                         lpni = lnet_peer_ni_alloc(nid);
1841                         if (!lpni) {
1842                                 rc = -ENOMEM;
1843                                 goto out_free_lpni;
1844                         }
1845                 }
1846         } else {
1847                 lpni = lnet_peer_ni_alloc(nid);
1848                 if (!lpni) {
1849                         rc = -ENOMEM;
1850                         goto out_free_lpni;
1851                 }
1852         }
1853
1854         /*
1855          * Get the peer_net. Check that we're not adding a second
1856          * peer_ni on a peer_net of a non-multi-rail peer.
1857          */
1858         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1859         if (!lpn) {
1860                 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1861                 if (!lpn) {
1862                         rc = -ENOMEM;
1863                         goto out_free_lpni;
1864                 }
1865         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1866                 rc = -ENOTUNIQ;
1867                 goto out_free_lpni;
1868         }
1869
1870         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1871
1872 out_free_lpni:
1873         lnet_peer_ni_decref_locked(lpni);
1874 out:
1875         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1876                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1877                flags, rc);
1878         return rc;
1879 }
1880
1881 /*
1882  * Update the primary NID of a peer, if possible.
1883  *
1884  * Call with the lnet_api_mutex held.
1885  */
1886 static int
1887 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1888                           unsigned int flags)
1889 {
1890         struct lnet_nid old = lp->lp_primary_nid;
1891         int rc = 0;
1892
1893         if (nid_same(&lp->lp_primary_nid, nid))
1894                 goto out;
1895
1896         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1897                 lp->lp_primary_nid = *nid;
1898
1899         rc = lnet_peer_add_nid(lp, nid, flags);
1900         if (rc) {
1901                 lp->lp_primary_nid = old;
1902                 goto out;
1903         }
1904 out:
1905         /* if this is a configured peer or the primary for that peer has
1906          * been locked, then we don't want to flag this scenario as
1907          * a failure
1908          */
1909         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1910             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1911                 return 0;
1912
1913         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1914                libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1915
1916         return rc;
1917 }
1918
1919 /*
1920  * lpni creation initiated due to traffic either sending or receiving.
1921  * Callers must hold ln_api_mutex
1922  * Ref taken on lnet_peer_ni returned by this function
1923  */
1924 static struct lnet_peer_ni *
1925 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1926 __must_hold(&the_lnet.ln_api_mutex)
1927 {
1928         struct lnet_peer *lp = NULL;
1929         struct lnet_peer_net *lpn = NULL;
1930         struct lnet_peer_ni *lpni;
1931         unsigned flags = 0;
1932         int rc = 0;
1933
1934         if (LNET_NID_IS_ANY(nid)) {
1935                 rc = -EINVAL;
1936                 goto out_err;
1937         }
1938
1939         /* lnet_net_lock is not needed here because ln_api_lock is held */
1940         lpni = lnet_peer_ni_find_locked(nid);
1941         if (lpni) {
1942                 /*
1943                  * We must have raced with another thread. Since we
1944                  * know next to nothing about a peer_ni created by
1945                  * traffic, we just assume everything is ok and
1946                  * return.
1947                  */
1948                 goto out;
1949         }
1950
1951         /* Create peer, peer_net, and peer_ni. */
1952         rc = -ENOMEM;
1953         lp = lnet_peer_alloc(nid);
1954         if (!lp)
1955                 goto out_err;
1956         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1957         if (!lpn)
1958                 goto out_err;
1959         lpni = lnet_peer_ni_alloc(nid);
1960         if (!lpni)
1961                 goto out_err;
1962         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1963
1964         /* lnet_peer_attach_peer_ni() always returns 0 */
1965         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1966
1967         lnet_peer_ni_addref_locked(lpni);
1968
1969 out_err:
1970         if (rc) {
1971                 if (lpn)
1972                         LIBCFS_FREE(lpn, sizeof(*lpn));
1973                 if (lp)
1974                         LIBCFS_FREE(lp, sizeof(*lp));
1975                 lpni = ERR_PTR(rc);
1976         }
1977 out:
1978         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1979         return lpni;
1980 }
1981
1982 /*
1983  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1984  *
1985  * This API handles the following combinations:
1986  *   Create a peer with its primary NI if only the prim_nid is provided
1987  *   Add a NID to a peer identified by the prim_nid. The peer identified
1988  *   by the prim_nid must already exist.
1989  *   The peer being created may be non-MR.
1990  *
1991  * The caller must hold ln_api_mutex. This prevents the peer from
1992  * being created/modified/deleted by a different thread.
1993  */
1994 static int
1995 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
1996                  unsigned int flags)
1997 __must_hold(&the_lnet.ln_api_mutex)
1998 {
1999         struct lnet_peer *lp = NULL;
2000         struct lnet_peer_ni *lpni;
2001
2002         /* The prim_nid must always be specified */
2003         if (LNET_NID_IS_ANY(prim_nid))
2004                 return -EINVAL;
2005
2006         if (mr)
2007                 flags |= LNET_PEER_MULTI_RAIL;
2008
2009         /*
2010          * If nid isn't specified, we must create a new peer with
2011          * prim_nid as its primary nid.
2012          */
2013         if (LNET_NID_IS_ANY(nid))
2014                 return lnet_peer_add(prim_nid, flags);
2015
2016         /* Look up the prim_nid, which must exist. */
2017         lpni = lnet_peer_ni_find_locked(prim_nid);
2018         if (!lpni)
2019                 return -ENOENT;
2020         lp = lpni->lpni_peer_net->lpn_peer;
2021         lnet_peer_ni_decref_locked(lpni);
2022
2023         /* Peer must have been configured. */
2024         if ((flags & LNET_PEER_CONFIGURED) &&
2025             !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2026                 CDEBUG(D_NET, "peer %s was not configured\n",
2027                        libcfs_nidstr(prim_nid));
2028                 return -ENOENT;
2029         }
2030
2031         /* Primary NID must match */
2032         if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2033                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2034                        libcfs_nidstr(prim_nid),
2035                        libcfs_nidstr(&lp->lp_primary_nid));
2036                 return -ENODEV;
2037         }
2038
2039         /* Multi-Rail flag must match. */
2040         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2041                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2042                        libcfs_nidstr(prim_nid));
2043                 return -EPERM;
2044         }
2045
2046         if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2047                 CDEBUG(D_NET,
2048                        "Don't add temporary peer NI for uptodate peer %s\n",
2049                        libcfs_nidstr(&lp->lp_primary_nid));
2050                 return -EINVAL;
2051         }
2052
2053         return lnet_peer_add_nid(lp, nid, flags);
2054 }
2055
2056 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2057                           bool mr, bool lock_prim)
2058 {
2059         int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2060
2061         return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2062 }
2063
2064 static int
2065 lnet_reset_peer(struct lnet_peer *lp)
2066 {
2067         struct lnet_peer_net *lpn, *lpntmp;
2068         struct lnet_peer_ni *lpni, *lpnitmp;
2069         unsigned int flags;
2070         int rc;
2071
2072         lnet_peer_cancel_discovery(lp);
2073
2074         flags = LNET_PEER_CONFIGURED;
2075         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2076                 flags |= LNET_PEER_MULTI_RAIL;
2077
2078         list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2079                 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2080                                          lpni_peer_nis) {
2081                         if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2082                                 continue;
2083
2084                         rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2085                         if (rc) {
2086                                 CERROR("Failed to delete %s from peer %s\n",
2087                                        libcfs_nidstr(&lpni->lpni_nid),
2088                                        libcfs_nidstr(&lp->lp_primary_nid));
2089                         }
2090                 }
2091         }
2092
2093         /* mark it for discovery the next time we use it */
2094         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2095         return 0;
2096 }
2097
2098 /*
2099  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2100  *
2101  * This API handles the following combinations:
2102  *   Delete a NI from a peer if both prim_nid and nid are provided.
2103  *   Delete a peer if only prim_nid is provided.
2104  *   Delete a peer if its primary nid is provided.
2105  *
2106  * The caller must hold ln_api_mutex. This prevents the peer from
2107  * being modified/deleted by a different thread.
2108  */
2109 int
2110 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2111                  int force)
2112 {
2113         struct lnet_peer *lp;
2114         struct lnet_peer_ni *lpni;
2115         unsigned int flags;
2116
2117         if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2118                 return -EINVAL;
2119
2120         lpni = lnet_peer_ni_find_locked(prim_nid);
2121         if (!lpni)
2122                 return -ENOENT;
2123         lp = lpni->lpni_peer_net->lpn_peer;
2124         lnet_peer_ni_decref_locked(lpni);
2125
2126         if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2127                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2128                        libcfs_nidstr(prim_nid),
2129                        libcfs_nidstr(&lp->lp_primary_nid));
2130                 return -ENODEV;
2131         }
2132
2133         lnet_net_lock(LNET_LOCK_EX);
2134         if (lp->lp_rtr_refcount > 0) {
2135                 lnet_net_unlock(LNET_LOCK_EX);
2136                 CERROR("%s is a router. Can not be deleted\n",
2137                        libcfs_nidstr(prim_nid));
2138                 return -EBUSY;
2139         }
2140         lnet_net_unlock(LNET_LOCK_EX);
2141
2142         if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2143                 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2144                         CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2145                                libcfs_nidstr(&lp->lp_primary_nid));
2146                         return lnet_reset_peer(lp);
2147                 } else {
2148                         return lnet_peer_del(lp);
2149                 }
2150         }
2151
2152         flags = LNET_PEER_CONFIGURED;
2153         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2154                 flags |= LNET_PEER_MULTI_RAIL;
2155
2156         return lnet_peer_del_nid(lp, nid, flags);
2157 }
2158
2159 void
2160 lnet_destroy_peer_ni_locked(struct kref *ref)
2161 {
2162         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2163                                                  lpni_kref);
2164         struct lnet_peer_table *ptable;
2165         struct lnet_peer_net *lpn;
2166
2167         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2168
2169         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2170         LASSERT(list_empty(&lpni->lpni_txq));
2171         LASSERT(lpni->lpni_txqnob == 0);
2172         LASSERT(list_empty(&lpni->lpni_peer_nis));
2173         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2174
2175         lpn = lpni->lpni_peer_net;
2176         lpni->lpni_peer_net = NULL;
2177         lpni->lpni_net = NULL;
2178
2179         if (!list_empty(&lpni->lpni_hashlist)) {
2180                 /* remove the peer ni from the zombie list */
2181                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2182                 spin_lock(&ptable->pt_zombie_lock);
2183                 list_del_init(&lpni->lpni_hashlist);
2184                 ptable->pt_zombies--;
2185                 spin_unlock(&ptable->pt_zombie_lock);
2186         }
2187
2188         if (lpni->lpni_pref_nnids > 1) {
2189                 struct lnet_nid_list *ne, *tmp;
2190
2191                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2192                                          nl_list) {
2193                         list_del_init(&ne->nl_list);
2194                         LIBCFS_FREE(ne, sizeof(*ne));
2195                 }
2196         }
2197         LIBCFS_FREE(lpni, sizeof(*lpni));
2198
2199         if (lpn)
2200                 lnet_peer_net_decref_locked(lpn);
2201 }
2202
2203 struct lnet_peer_ni *
2204 lnet_nid2peerni_ex(struct lnet_nid *nid)
2205 __must_hold(&the_lnet.ln_api_mutex)
2206 {
2207         struct lnet_peer_ni *lpni = NULL;
2208
2209         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2210                 return ERR_PTR(-ESHUTDOWN);
2211
2212         /*
2213          * find if a peer_ni already exists.
2214          * If so then just return that.
2215          */
2216         lpni = lnet_peer_ni_find_locked(nid);
2217         if (lpni)
2218                 return lpni;
2219
2220         lnet_net_unlock(LNET_LOCK_EX);
2221
2222         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2223
2224         lnet_net_lock(LNET_LOCK_EX);
2225
2226         return lpni;
2227 }
2228
2229 /*
2230  * Get a peer_ni for the given nid, create it if necessary. Takes a
2231  * hold on the peer_ni.
2232  */
2233 struct lnet_peer_ni *
2234 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2235                         struct lnet_nid *pref, int cpt)
2236 {
2237         struct lnet_peer_ni *lpni = NULL;
2238
2239         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2240                 return ERR_PTR(-ESHUTDOWN);
2241
2242         /*
2243          * find if a peer_ni already exists.
2244          * If so then just return that.
2245          */
2246         lpni = lnet_peer_ni_find_locked(nid);
2247         if (lpni)
2248                 return lpni;
2249
2250         /*
2251          * Slow path:
2252          * use the lnet_api_mutex to serialize the creation of the peer_ni
2253          * and the creation/deletion of the local ni/net. When a local ni is
2254          * created, if there exists a set of peer_nis on that network,
2255          * they need to be traversed and updated. When a local NI is
2256          * deleted, which could result in a network being deleted, then
2257          * all peer nis on that network need to be removed as well.
2258          *
2259          * Creation through traffic should also be serialized with
2260          * creation through DLC.
2261          */
2262         lnet_net_unlock(cpt);
2263         mutex_lock(&the_lnet.ln_api_mutex);
2264         /*
2265          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2266          * check here is sufficent.
2267          */
2268         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2269                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2270
2271         mutex_unlock(&the_lnet.ln_api_mutex);
2272         lnet_net_lock(cpt);
2273
2274         /* Lock has been dropped, check again for shutdown. */
2275         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2276                 if (!IS_ERR_OR_NULL(lpni))
2277                         lnet_peer_ni_decref_locked(lpni);
2278                 lpni = ERR_PTR(-ESHUTDOWN);
2279         }
2280
2281         return lpni;
2282 }
2283
2284 bool
2285 lnet_peer_gw_discovery(struct lnet_peer *lp)
2286 {
2287         bool rc = false;
2288
2289         spin_lock(&lp->lp_lock);
2290         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2291                 rc = true;
2292         spin_unlock(&lp->lp_lock);
2293
2294         return rc;
2295 }
2296
2297 bool
2298 lnet_peer_is_uptodate(struct lnet_peer *lp)
2299 {
2300         bool rc;
2301
2302         spin_lock(&lp->lp_lock);
2303         rc = lnet_peer_is_uptodate_locked(lp);
2304         spin_unlock(&lp->lp_lock);
2305         return rc;
2306 }
2307
2308 /*
2309  * Is a peer uptodate from the point of view of discovery?
2310  *
2311  * If it is currently being processed, obviously not.
2312  * A forced Ping or Push is also handled by the discovery thread.
2313  *
2314  * Otherwise look at whether the peer needs rediscovering.
2315  */
2316 bool
2317 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2318 __must_hold(&lp->lp_lock)
2319 {
2320         bool rc;
2321
2322         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2323                             LNET_PEER_FORCE_PING |
2324                             LNET_PEER_FORCE_PUSH)) {
2325                 rc = false;
2326         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2327                 rc = false;
2328         } else if (lnet_peer_needs_push(lp)) {
2329                 rc = false;
2330         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2331                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2332                         rc = true;
2333                 else
2334                         rc = false;
2335         } else {
2336                 rc = false;
2337         }
2338
2339         return rc;
2340 }
2341
2342 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2343 void
2344 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2345 {
2346         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2347          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2348          * when adding to the list and queuing the peer to ensure that we do not
2349          * strand any messages on the lp_dc_pendq. This scheme ensures the
2350          * message will be resent even if the peer is already being discovered.
2351          * Therefore we needn't check the return value of
2352          * lnet_peer_queue_for_discovery(lp).
2353          */
2354         lnet_net_lock(LNET_LOCK_EX);
2355         spin_lock(&lp->lp_lock);
2356         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2357         spin_unlock(&lp->lp_lock);
2358         lnet_peer_queue_for_discovery(lp);
2359         lnet_net_unlock(LNET_LOCK_EX);
2360 }
2361
2362 /*
2363  * Queue a peer for the attention of the discovery thread.  Call with
2364  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2365  * -EALREADY if the peer was already queued.
2366  */
2367 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2368 {
2369         int rc;
2370
2371         spin_lock(&lp->lp_lock);
2372         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2373                 lp->lp_state |= LNET_PEER_DISCOVERING;
2374         spin_unlock(&lp->lp_lock);
2375         if (list_empty(&lp->lp_dc_list)) {
2376                 lnet_peer_addref_locked(lp);
2377                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2378                 wake_up(&the_lnet.ln_dc_waitq);
2379                 rc = 0;
2380         } else {
2381                 rc = -EALREADY;
2382         }
2383
2384         CDEBUG(D_NET, "Queue peer %s: %d\n",
2385                libcfs_nidstr(&lp->lp_primary_nid), rc);
2386
2387         return rc;
2388 }
2389
2390 /*
2391  * Discovery of a peer is complete. Wake all waiters on the peer.
2392  * Call with lnet_net_lock/EX held.
2393  */
2394 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2395 {
2396         struct lnet_msg *msg, *tmp;
2397         int rc = 0;
2398         LIST_HEAD(pending_msgs);
2399
2400         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2401                libcfs_nidstr(&lp->lp_primary_nid));
2402
2403         spin_lock(&lp->lp_lock);
2404         /* Our caller dropped lp_lock which may have allowed another thread to
2405          * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2406          * Ensure it is cleared.
2407          */
2408         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2409         if (dc_error) {
2410                 lp->lp_dc_error = dc_error;
2411                 lp->lp_state |= LNET_PEER_REDISCOVER;
2412         }
2413         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2414         spin_unlock(&lp->lp_lock);
2415         list_del_init(&lp->lp_dc_list);
2416         wake_up(&lp->lp_dc_waitq);
2417
2418         if (lp->lp_rtr_refcount > 0)
2419                 lnet_router_discovery_complete(lp);
2420
2421         lnet_net_unlock(LNET_LOCK_EX);
2422
2423         /* iterate through all pending messages and send them again */
2424         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2425                 list_del_init(&msg->msg_list);
2426                 if (dc_error) {
2427                         lnet_finalize(msg, dc_error);
2428                         continue;
2429                 }
2430
2431                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2432                        lnet_msgtyp2str(msg->msg_type),
2433                        libcfs_idstr(&msg->msg_target));
2434                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2435                                &msg->msg_rtr_nid_param);
2436                 if (rc < 0) {
2437                         CNETERR("Error sending %s to %s: %d\n",
2438                                lnet_msgtyp2str(msg->msg_type),
2439                                libcfs_idstr(&msg->msg_target), rc);
2440                         lnet_finalize(msg, rc);
2441                 }
2442         }
2443         lnet_net_lock(LNET_LOCK_EX);
2444         lnet_peer_decref_locked(lp);
2445 }
2446
2447 /*
2448  * Handle inbound push.
2449  * Like any event handler, called with lnet_res_lock/CPT held.
2450  */
2451 void lnet_peer_push_event(struct lnet_event *ev)
2452 {
2453         struct lnet_ping_buffer *pbuf;
2454         struct lnet_peer *lp;
2455         int infobytes;
2456
2457         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2458
2459         /* lnet_find_peer() adds a refcount */
2460         lp = lnet_find_peer(&ev->source.nid);
2461         if (!lp) {
2462                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2463                        libcfs_nidstr(&ev->initiator.nid),
2464                        libcfs_nidstr(&ev->source.nid));
2465                 pbuf->pb_needs_post = true;
2466                 return;
2467         }
2468
2469         /* Ensure peer state remains consistent while we modify it. */
2470         spin_lock(&lp->lp_lock);
2471
2472         /*
2473          * If some kind of error happened the contents of the message
2474          * cannot be used. Clear the NIDS_UPTODATE and set the
2475          * FORCE_PING flag to trigger a ping.
2476          */
2477         if (ev->status) {
2478                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2479                 lp->lp_state |= LNET_PEER_FORCE_PING;
2480                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2481                        ev->status,
2482                        libcfs_nidstr(&lp->lp_primary_nid),
2483                        libcfs_nidstr(&ev->source.nid));
2484                 goto out;
2485         }
2486
2487         /*
2488          * A push with invalid or corrupted info. Clear the UPTODATE
2489          * flag to trigger a ping.
2490          */
2491         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2492                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2493                 lp->lp_state |= LNET_PEER_FORCE_PING;
2494                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2495                        libcfs_nidstr(&lp->lp_primary_nid));
2496                 goto out;
2497         }
2498
2499         /* Make sure we'll allocate the correct size ping buffer when
2500          * pinging the peer.
2501          */
2502         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2503         if (lp->lp_data_bytes < infobytes)
2504                 lp->lp_data_bytes = infobytes;
2505
2506         /*
2507          * A non-Multi-Rail peer is not supposed to be capable of
2508          * sending a push.
2509          */
2510         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2511                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2512                        libcfs_nidstr(&lp->lp_primary_nid));
2513                 goto out;
2514         }
2515
2516         /*
2517          * The peer may have discovery disabled at its end. Set
2518          * NO_DISCOVERY as appropriate.
2519          */
2520         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2521                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2522                        libcfs_nidstr(&lp->lp_primary_nid));
2523                 /*
2524                  * Mark the peer for deletion if we already know about it
2525                  * and it's going from discovery set to no discovery set
2526                  */
2527                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2528                                       LNET_PEER_DISCOVERING)) &&
2529                      lp->lp_state & LNET_PEER_DISCOVERED) {
2530                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2531                                libcfs_nidstr(&lp->lp_primary_nid),
2532                                lp->lp_state);
2533                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2534                 }
2535                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2536         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2537                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2538                        libcfs_nidstr(&lp->lp_primary_nid));
2539                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2540         }
2541
2542         /*
2543          * Update the MULTI_RAIL flag based on the push. If the peer
2544          * was configured with DLC then the setting should match what
2545          * DLC put in.
2546          * NB: We verified above that the MR feature bit is set in pi_features
2547          */
2548         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2549                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2550                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2551         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2552                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2553                       libcfs_nidstr(&lp->lp_primary_nid));
2554         } else if (lnet_peer_discovery_disabled) {
2555                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2556                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2557         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2558                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2559                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2560         } else {
2561                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2562                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2563                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2564                 lnet_peer_clr_non_mr_pref_nids(lp);
2565         }
2566
2567         /* Check for truncation of the Put message. Clear the
2568          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2569          * and tell discovery to allocate a bigger buffer.
2570          */
2571         if (ev->mlength < ev->rlength) {
2572                 if (the_lnet.ln_push_target_nbytes < infobytes)
2573                         the_lnet.ln_push_target_nbytes = infobytes;
2574                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2575                 lp->lp_state |= LNET_PEER_FORCE_PING;
2576                 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2577                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2578                 goto out;
2579         }
2580
2581         /* always assume new data */
2582         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2583         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2584
2585         /* If there is data present that hasn't been processed yet,
2586          * we'll replace it if the Put contained newer data and it
2587          * fits. We're racing with a Ping or earlier Push in this
2588          * case.
2589          */
2590         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2591                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2592                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2593                     infobytes <= lp->lp_data->pb_nbytes) {
2594                         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2595                                infobytes, FLEXIBLE_OBJECT);
2596                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2597                               libcfs_nidstr(&lp->lp_primary_nid),
2598                               LNET_PING_BUFFER_SEQNO(pbuf),
2599                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2600                 }
2601                 goto out;
2602         }
2603
2604         /*
2605          * Allocate a buffer to copy the data. On a failure we drop
2606          * the Push and set FORCE_PING to force the discovery
2607          * thread to fix the problem by pinging the peer.
2608          */
2609         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2610         if (!lp->lp_data) {
2611                 lp->lp_state |= LNET_PEER_FORCE_PING;
2612                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2613                        libcfs_nidstr(&lp->lp_primary_nid),
2614                        LNET_PING_BUFFER_SEQNO(pbuf));
2615                 goto out;
2616         }
2617
2618         /* Success */
2619         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2620                       FLEXIBLE_OBJECT);
2621         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2622         CDEBUG(D_NET, "Received Push %s %u\n",
2623                libcfs_nidstr(&lp->lp_primary_nid),
2624                LNET_PING_BUFFER_SEQNO(pbuf));
2625
2626 out:
2627         /* We've processed this buffer. It can be reposted */
2628         pbuf->pb_needs_post = true;
2629
2630         /*
2631          * Queue the peer for discovery if not done, force it on the request
2632          * queue and wake the discovery thread if the peer was already queued,
2633          * because its status changed.
2634          */
2635         spin_unlock(&lp->lp_lock);
2636         lnet_net_lock(LNET_LOCK_EX);
2637         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2638                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2639                 wake_up(&the_lnet.ln_dc_waitq);
2640         }
2641         /* Drop refcount from lookup */
2642         lnet_peer_decref_locked(lp);
2643         lnet_net_unlock(LNET_LOCK_EX);
2644 }
2645
2646 /*
2647  * Clear the discovery error state, unless we're already discovering
2648  * this peer, in which case the error is current.
2649  */
2650 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2651 {
2652         spin_lock(&lp->lp_lock);
2653         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2654                 lp->lp_dc_error = 0;
2655         spin_unlock(&lp->lp_lock);
2656 }
2657
2658 /*
2659  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2660  * dropped/retaken within this function. An lnet_peer_ni is passed in
2661  * because discovery could tear down an lnet_peer.
2662  */
2663 int
2664 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2665 {
2666         DEFINE_WAIT(wait);
2667         struct lnet_peer *lp = NULL;
2668         int rc = 0;
2669         int count = 0;
2670
2671 again:
2672         if (lp)
2673                 lnet_peer_decref_locked(lp);
2674         lnet_net_unlock(cpt);
2675         lnet_net_lock(LNET_LOCK_EX);
2676         lp = lpni->lpni_peer_net->lpn_peer;
2677         lnet_peer_clear_discovery_error(lp);
2678
2679         /*
2680          * We're willing to be interrupted. The lpni can become a
2681          * zombie if we race with DLC, so we must check for that.
2682          */
2683         for (;;) {
2684                 /* Keep lp alive when the lnet_net_lock is unlocked */
2685                 lnet_peer_addref_locked(lp);
2686                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2687                 if (signal_pending(current))
2688                         break;
2689                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2690                         break;
2691                 /*
2692                  * Don't repeat discovery if discovery is disabled. This is
2693                  * done to ensure we can use discovery as a standard ping as
2694                  * well for backwards compatibility with routers which do not
2695                  * have discovery or have discovery disabled
2696                  */
2697                 if (lnet_is_discovery_disabled(lp) && count > 0)
2698                         break;
2699                 if (lp->lp_dc_error)
2700                         break;
2701                 if (lnet_peer_is_uptodate(lp))
2702                         break;
2703                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2704                         break;
2705                 lnet_peer_queue_for_discovery(lp);
2706                 count++;
2707                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2708
2709                 /*
2710                  * If caller requested a non-blocking operation then
2711                  * return immediately. Once discovery is complete any
2712                  * pending messages that were stopped due to discovery
2713                  * will be transmitted.
2714                  */
2715                 if (!block)
2716                         break;
2717
2718                 lnet_net_unlock(LNET_LOCK_EX);
2719                 schedule();
2720                 finish_wait(&lp->lp_dc_waitq, &wait);
2721                 lnet_net_lock(LNET_LOCK_EX);
2722                 lnet_peer_decref_locked(lp);
2723                 /* Peer may have changed */
2724                 lp = lpni->lpni_peer_net->lpn_peer;
2725         }
2726         finish_wait(&lp->lp_dc_waitq, &wait);
2727
2728         lnet_net_unlock(LNET_LOCK_EX);
2729         lnet_net_lock(cpt);
2730         /*
2731          * The peer may have changed, so re-check and rediscover if that turns
2732          * out to have been the case. The reference count on lp ensured that
2733          * even if it was unlinked from lpni the memory could not be recycled.
2734          * Thus the check below is sufficient to determine whether the peer
2735          * changed. If the peer changed, then lp must not be dereferenced.
2736          */
2737         if (lp != lpni->lpni_peer_net->lpn_peer)
2738                 goto again;
2739
2740         if (signal_pending(current))
2741                 rc = -EINTR;
2742         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2743                 rc = -ESHUTDOWN;
2744         else if (lp->lp_dc_error)
2745                 rc = lp->lp_dc_error;
2746         else if (!block)
2747                 CDEBUG(D_NET, "non-blocking discovery\n");
2748         else if (!lnet_peer_is_uptodate(lp) &&
2749                  !(lnet_is_discovery_disabled(lp) ||
2750                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2751                 goto again;
2752
2753         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2754                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2755                libcfs_nidstr(&lpni->lpni_nid), rc,
2756                (!block) ? "pending discovery" : "discovery complete");
2757         lnet_peer_decref_locked(lp);
2758
2759         return rc;
2760 }
2761
2762 /* Handle an incoming ack for a push. */
2763 static void
2764 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2765 {
2766         struct lnet_ping_buffer *pbuf;
2767
2768         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2769         spin_lock(&lp->lp_lock);
2770         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2771         lp->lp_push_error = ev->status;
2772         if (ev->status)
2773                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2774         else
2775                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2776         spin_unlock(&lp->lp_lock);
2777
2778         CDEBUG(D_NET, "peer %s ev->status %d\n",
2779                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2780 }
2781
2782 static bool find_primary(struct lnet_nid *nid,
2783                          struct lnet_ping_buffer *pbuf)
2784 {
2785         struct lnet_ping_info *pi = &pbuf->pb_info;
2786         struct lnet_ping_iter piter;
2787         __u32 *stp;
2788
2789         if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2790                 /* First large nid is primary */
2791                 for (stp = ping_iter_first(&piter, pbuf, nid);
2792                      stp;
2793                      stp = ping_iter_next(&piter, nid)) {
2794                         if (nid_is_nid4(nid))
2795                                 continue;
2796                         /* nid has already been copied in */
2797                         return true;
2798                 }
2799                 /* no large nids ... weird ... ignore the flag
2800                  * and use first nid.
2801                  */
2802         }
2803         /* pi_nids[1] is primary */
2804         if (pi->pi_nnis < 2)
2805                 return false;
2806         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2807         return true;
2808 }
2809
2810 /* Handle a Reply message. This is the reply to a Ping message. */
2811 static void
2812 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2813 {
2814         struct lnet_ping_buffer *pbuf;
2815         struct lnet_nid primary;
2816         int infobytes;
2817         int rc;
2818         bool ping_feat_disc;
2819
2820         spin_lock(&lp->lp_lock);
2821
2822         lp->lp_disc_src_nid = ev->target.nid;
2823         lp->lp_disc_dst_nid = ev->source.nid;
2824
2825         /*
2826          * If some kind of error happened the contents of message
2827          * cannot be used. Set PING_FAILED to trigger a retry.
2828          */
2829         if (ev->status) {
2830                 lp->lp_state |= LNET_PEER_PING_FAILED;
2831                 lp->lp_ping_error = ev->status;
2832                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2833                        ev->status,
2834                        libcfs_nidstr(&lp->lp_primary_nid),
2835                        libcfs_nidstr(&ev->source.nid));
2836                 goto out;
2837         }
2838
2839         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2840         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2841                 lnet_swap_pinginfo(pbuf);
2842
2843         /*
2844          * A reply with invalid or corrupted info. Set PING_FAILED to
2845          * trigger a retry.
2846          */
2847         rc = lnet_ping_info_validate(&pbuf->pb_info);
2848         if (rc) {
2849                 lp->lp_state |= LNET_PEER_PING_FAILED;
2850                 lp->lp_ping_error = 0;
2851                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2852                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2853                 goto out;
2854         }
2855
2856         /*
2857          * The peer may have discovery disabled at its end. Set
2858          * NO_DISCOVERY as appropriate.
2859          */
2860         ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2861         if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2862                 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2863                        libcfs_nidstr(&lp->lp_primary_nid),
2864                        ping_feat_disc ? "enabled" : "disabled",
2865                        lnet_peer_discovery_disabled ? "disabled" : "enabled");
2866
2867                 /* Detect whether this peer has toggled discovery from on to
2868                  * off and whether we can delete and re-create the peer. Peers
2869                  * that were manually configured cannot be deleted by discovery.
2870                  * We need to delete this peer and re-create it if the peer was
2871                  * not configured manually, is currently considered DD capable,
2872                  * and either:
2873                  * 1. We've already discovered the peer (the peer has toggled
2874                  *    the discovery feature from on to off), or
2875                  * 2. The peer is considered MR, but it was not user configured
2876                  *    (this was a "temporary" peer created via the kernel APIs
2877                  *     that we're discovering for the first time)
2878                  */
2879                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2880                                       LNET_PEER_NO_DISCOVERY)) &&
2881                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2882                                      LNET_PEER_MULTI_RAIL))) {
2883                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2884                                libcfs_nidstr(&lp->lp_primary_nid),
2885                                lp->lp_state);
2886                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2887                 }
2888                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2889         } else {
2890                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2891                        libcfs_nidstr(&lp->lp_primary_nid));
2892                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2893         }
2894
2895         /*
2896          * Update the MULTI_RAIL flag based on the reply. If the peer
2897          * was configured with DLC then the setting should match what
2898          * DLC put in.
2899          */
2900         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2901                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2902                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2903                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2904                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2905                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2906                               libcfs_nidstr(&lp->lp_primary_nid));
2907                 } else if (lnet_peer_discovery_disabled) {
2908                         CDEBUG(D_NET,
2909                                "peer %s(%p) not MR: DD disabled locally\n",
2910                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2911                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2912                         CDEBUG(D_NET,
2913                                "peer %s(%p) not MR: DD disabled remotely\n",
2914                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2915                 } else {
2916                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2917                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2918                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2919                         lnet_peer_clr_non_mr_pref_nids(lp);
2920                 }
2921         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2922                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2923                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2924                               libcfs_nidstr(&lp->lp_primary_nid));
2925                 } else {
2926                         CERROR("Multi-Rail state vanished from %s\n",
2927                                libcfs_nidstr(&lp->lp_primary_nid));
2928                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2929                 }
2930         }
2931
2932         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2933         /*
2934          * Make sure we'll allocate the correct size ping buffer when
2935          * pinging the peer.
2936          */
2937         if (lp->lp_data_bytes < infobytes)
2938                 lp->lp_data_bytes = infobytes;
2939
2940         /* Check for truncation of the Reply. Clear PING_SENT and set
2941          * PING_FAILED to trigger a retry.
2942          */
2943         if (pbuf->pb_nbytes < infobytes) {
2944                 if (the_lnet.ln_push_target_nbytes < infobytes)
2945                         the_lnet.ln_push_target_nbytes = infobytes;
2946                 lp->lp_state |= LNET_PEER_PING_FAILED;
2947                 lp->lp_ping_error = 0;
2948                 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2949                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2950                 goto out;
2951         }
2952
2953         /*
2954          * Check the sequence numbers in the reply. These are only
2955          * available if the reply came from a Multi-Rail peer.
2956          */
2957         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2958             find_primary(&primary, pbuf) &&
2959             nid_same(&lp->lp_primary_nid, &primary)) {
2960                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2961                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2962                                 libcfs_nidstr(&lp->lp_primary_nid),
2963                                 LNET_PING_BUFFER_SEQNO(pbuf),
2964                                 lp->lp_peer_seqno);
2965
2966                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2967         }
2968
2969         /* We're happy with the state of the data in the buffer. */
2970         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2971                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2972                lp->lp_state);
2973         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2974                 lnet_ping_buffer_decref(lp->lp_data);
2975         else
2976                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2977         lnet_ping_buffer_addref(pbuf);
2978         lp->lp_data = pbuf;
2979 out:
2980         lp->lp_state &= ~LNET_PEER_PING_SENT;
2981         spin_unlock(&lp->lp_lock);
2982 }
2983
2984 /*
2985  * Send event handling. Only matters for error cases, where we clean
2986  * up state on the peer and peer_ni that would otherwise be updated in
2987  * the REPLY event handler for a successful Ping, and the ACK event
2988  * handler for a successful Push.
2989  */
2990 static int
2991 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2992 {
2993         int rc = 0;
2994
2995         if (!ev->status)
2996                 goto out;
2997
2998         spin_lock(&lp->lp_lock);
2999         if (ev->msg_type == LNET_MSG_GET) {
3000                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3001                 lp->lp_state |= LNET_PEER_PING_FAILED;
3002                 lp->lp_ping_error = ev->status;
3003         } else { /* ev->msg_type == LNET_MSG_PUT */
3004                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3005                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3006                 lp->lp_push_error = ev->status;
3007         }
3008         spin_unlock(&lp->lp_lock);
3009         rc = LNET_REDISCOVER_PEER;
3010 out:
3011         CDEBUG(D_NET, "%s Send to %s: %d\n",
3012                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3013                 libcfs_nidstr(&ev->target.nid), rc);
3014         return rc;
3015 }
3016
3017 /*
3018  * Unlink event handling. This event is only seen if a call to
3019  * LNetMDUnlink() caused the event to be unlinked. If this call was
3020  * made after the event was set up in LNetGet() or LNetPut() then we
3021  * assume the Ping or Push timed out.
3022  */
3023 static void
3024 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3025 {
3026         spin_lock(&lp->lp_lock);
3027         /* We've passed through LNetGet() */
3028         if (lp->lp_state & LNET_PEER_PING_SENT) {
3029                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3030                 lp->lp_state |= LNET_PEER_PING_FAILED;
3031                 lp->lp_ping_error = -ETIMEDOUT;
3032                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3033                         libcfs_nidstr(&lp->lp_primary_nid));
3034         }
3035         /* We've passed through LNetPut() */
3036         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3037                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3038                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3039                 lp->lp_push_error = -ETIMEDOUT;
3040                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3041                         libcfs_nidstr(&lp->lp_primary_nid));
3042         }
3043         spin_unlock(&lp->lp_lock);
3044 }
3045
3046 /*
3047  * Event handler for the discovery EQ.
3048  *
3049  * Called with lnet_res_lock(cpt) held. The cpt is the
3050  * lnet_cpt_of_cookie() of the md handle cookie.
3051  */
3052 static void lnet_discovery_event_handler(struct lnet_event *event)
3053 {
3054         struct lnet_peer *lp = event->md_user_ptr;
3055         struct lnet_ping_buffer *pbuf;
3056         int rc;
3057
3058         /* discovery needs to take another look */
3059         rc = LNET_REDISCOVER_PEER;
3060
3061         CDEBUG(D_NET, "Received event: %d\n", event->type);
3062
3063         switch (event->type) {
3064         case LNET_EVENT_ACK:
3065                 lnet_discovery_event_ack(lp, event);
3066                 break;
3067         case LNET_EVENT_REPLY:
3068                 lnet_discovery_event_reply(lp, event);
3069                 break;
3070         case LNET_EVENT_SEND:
3071                 /* Only send failure triggers a retry. */
3072                 rc = lnet_discovery_event_send(lp, event);
3073                 break;
3074         case LNET_EVENT_UNLINK:
3075                 /* LNetMDUnlink() was called */
3076                 lnet_discovery_event_unlink(lp, event);
3077                 break;
3078         default:
3079                 /* Invalid events. */
3080                 LBUG();
3081         }
3082         lnet_net_lock(LNET_LOCK_EX);
3083
3084         /* put peer back at end of request queue, if discovery not already
3085          * done */
3086         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3087             lnet_peer_queue_for_discovery(lp)) {
3088                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3089                 wake_up(&the_lnet.ln_dc_waitq);
3090         }
3091         if (event->unlinked) {
3092                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3093                 lnet_ping_buffer_decref(pbuf);
3094                 lnet_peer_decref_locked(lp);
3095         }
3096         lnet_net_unlock(LNET_LOCK_EX);
3097 }
3098
3099 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3100                      struct lnet_ping_buffer *pbuf,
3101                      struct lnet_nid *nid)
3102 {
3103         pi->pinfo = &pbuf->pb_info;
3104         pi->pos = &pbuf->pb_info.pi_ni;
3105         pi->end = (void *)pi->pinfo +
3106                   min_t(int, pbuf->pb_nbytes,
3107                         lnet_ping_info_size(pi->pinfo));
3108         /* lnet_ping_info_validiate ensures there will be one
3109          * lnet_ni_status at the start
3110          */
3111         if (nid)
3112                 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3113
3114         pi->pos += sizeof(struct lnet_ni_status);
3115         return &pbuf->pb_info.pi_ni[0].ns_status;
3116 }
3117
3118 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3119 {
3120         int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3121
3122         if (pi->pos < ((void *)pi->pinfo + off)) {
3123                 struct lnet_ni_status *ns = pi->pos;
3124
3125                 pi->pos = ns + 1;
3126                 if (pi->pos > pi->end)
3127                         return NULL;
3128                 if (nid)
3129                         lnet_nid4_to_nid(ns->ns_nid, nid);
3130                 return &ns->ns_status;
3131         }
3132
3133         while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3134                 struct lnet_ni_large_status *lns = pi->pos;
3135
3136                 if (pi->pos + 8 > pi->end)
3137                         /* Not safe to examine next */
3138                         return NULL;
3139                 pi->pos = lnet_ping_sts_next(lns);
3140                 if (pi->pos > pi->end)
3141                         return NULL;
3142                 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3143                         continue;
3144                 if (nid)
3145                         *nid = lns->ns_nid;
3146                 return &lns->ns_status;
3147         }
3148         return NULL;
3149 }
3150
3151 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3152 {
3153         struct lnet_ping_iter pi;
3154         u32 *st;
3155         int nnis = 0;
3156
3157         for (st = ping_iter_first(&pi, pbuf, NULL); st;
3158              st = ping_iter_next(&pi, NULL))
3159                 nnis += 1;
3160
3161         return nnis;
3162 }
3163
3164 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3165 {
3166         if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN) {
3167                 lnet_net_lock(0);
3168                 lnet_handle_remote_failure_locked(lpni);
3169                 lnet_net_unlock(0);
3170         } else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3171                  !lpni->lpni_last_alive)
3172                 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3173 }
3174
3175 /*
3176  * Build a peer from incoming data.
3177  *
3178  * The NIDs in the incoming data are supposed to be structured as follows:
3179  *  - loopback
3180  *  - primary NID
3181  *  - other NIDs in same net
3182  *  - NIDs in second net
3183  *  - NIDs in third net
3184  *  - ...
3185  * This due to the way the list of NIDs in the data is created.
3186  *
3187  * Note that this function will mark the peer uptodate unless an
3188  * ENOMEM is encontered. All other errors are due to a conflict
3189  * between the DLC configuration and what discovery sees. We treat DLC
3190  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3191  * peer from becoming stuck in discovery.
3192  */
3193 static int lnet_peer_merge_data(struct lnet_peer *lp,
3194                                 struct lnet_ping_buffer *pbuf)
3195 {
3196         struct lnet_peer_net *lpn;
3197         struct lnet_peer_ni *lpni;
3198         struct lnet_nid *curnis = NULL;
3199         struct lnet_ni_large_status *addnis = NULL;
3200         struct lnet_nid *delnis = NULL;
3201         struct lnet_ping_iter pi;
3202         struct lnet_nid nid;
3203         u32 *stp;
3204         struct lnet_nid primary = {};
3205         bool want_large_primary;
3206         unsigned int flags;
3207         int ncurnis;
3208         int naddnis;
3209         int ndelnis;
3210         int nnis = 0;
3211         int i;
3212         int j;
3213         int rc;
3214         __u32 old_st;
3215
3216         flags = LNET_PEER_DISCOVERED;
3217         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3218                 flags |= LNET_PEER_MULTI_RAIL;
3219
3220         /*
3221          * Cache the routing feature for the peer; whether it is enabled
3222          * for disabled as reported by the remote peer.
3223          */
3224         spin_lock(&lp->lp_lock);
3225         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3226                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3227         else
3228                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3229         spin_unlock(&lp->lp_lock);
3230
3231         nnis = ping_info_count_entries(pbuf);
3232         nnis = max_t(int, lp->lp_nnis, nnis);
3233         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3234         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3235         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3236         if (!curnis || !addnis || !delnis) {
3237                 rc = -ENOMEM;
3238                 goto out;
3239         }
3240         ncurnis = 0;
3241         naddnis = 0;
3242         ndelnis = 0;
3243
3244         /* Construct the list of NIDs present in peer. */
3245         lpni = NULL;
3246         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3247                 curnis[ncurnis++] = lpni->lpni_nid;
3248
3249         /* Check for NIDs in pbuf not present in curnis[].
3250          * Skip the first, which is loop-back.  Take second as
3251          * primary, unless a large primary is found.
3252          */
3253         ping_iter_first(&pi, pbuf, NULL);
3254         stp = ping_iter_next(&pi, &nid);
3255         if (stp)
3256                 primary = nid;
3257         want_large_primary = (pbuf->pb_info.pi_features &
3258                               LNET_PING_FEAT_PRIMARY_LARGE);
3259         for (; stp; stp = ping_iter_next(&pi, &nid)) {
3260                 for (j = 0; j < ncurnis; j++)
3261                         if (nid_same(&nid, &curnis[j]))
3262                                 break;
3263                 if (j == ncurnis) {
3264                         addnis[naddnis].ns_nid = nid;
3265                         addnis[naddnis].ns_status = *stp;
3266                         naddnis += 1;
3267                 }
3268                 if (want_large_primary && nid.nid_size) {
3269                         primary = nid;
3270                         want_large_primary = false;
3271                 }
3272         }
3273         /*
3274          * Check for NIDs in curnis[] not present in pbuf.
3275          * The nested loop starts at 1 to skip the loopback NID.
3276          *
3277          * But never add the loopback NID to delnis[]: if it is
3278          * present in curnis[] then this peer is for this node.
3279          */
3280         for (i = 0; i < ncurnis; i++) {
3281                 if (nid_is_lo0(&curnis[i]))
3282                         continue;
3283                 ping_iter_first(&pi, pbuf, NULL);
3284                 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3285                         if (nid_same(&curnis[i], &nid)) {
3286                                 /*
3287                                  * update the information we cache for the
3288                                  * peer with the latest information we
3289                                  * received
3290                                  */
3291                                 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3292                                 if (lpni) {
3293                                         old_st = lpni->lpni_ns_status;
3294                                         lpni->lpni_ns_status = *stp;
3295                                         if (old_st != lpni->lpni_ns_status)
3296                                                 handle_disc_lpni_health(lpni);
3297                                         lnet_peer_ni_decref_locked(lpni);
3298                                 }
3299                                 break;
3300                         }
3301                 }
3302                 if (!stp)
3303                         delnis[ndelnis++] = curnis[i];
3304         }
3305
3306         /*
3307          * If we get here and the discovery is disabled then we don't want
3308          * to add or delete any NIs. We just updated the ones we have some
3309          * information on, and call it a day
3310          */
3311         rc = 0;
3312         if (lnet_is_discovery_disabled(lp))
3313                 goto out;
3314
3315         for (i = 0; i < naddnis; i++) {
3316                 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3317                 if (rc) {
3318                         CERROR("Error adding NID %s to peer %s: %d\n",
3319                                libcfs_nidstr(&addnis[i].ns_nid),
3320                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3321                         if (rc == -ENOMEM)
3322                                 goto out;
3323                 }
3324                 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3325                 if (lpni) {
3326                         lpni->lpni_ns_status = addnis[i].ns_status;
3327                         handle_disc_lpni_health(lpni);
3328                         lnet_peer_ni_decref_locked(lpni);
3329                 }
3330         }
3331
3332         for (i = 0; i < ndelnis; i++) {
3333                 /*
3334                  * for routers it's okay to delete the primary_nid because
3335                  * the upper layers don't really rely on it. So if we're
3336                  * being told that the router changed its primary_nid
3337                  * then it's okay to delete it.
3338                  */
3339                 if (lp->lp_rtr_refcount > 0)
3340                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3341                 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3342                 if (rc) {
3343                         CERROR("Error deleting NID %s from peer %s: %d\n",
3344                                libcfs_nidstr(&delnis[i]),
3345                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3346                         if (rc == -ENOMEM)
3347                                 goto out;
3348                 }
3349         }
3350
3351         /* The peer net for the primary NID should be the first entry in the
3352          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3353          * be the first entry in its peer net's lpn_peer_nis list.
3354          */
3355         find_primary(&nid, pbuf);
3356         lpni = lnet_peer_ni_find_locked(&nid);
3357         if (!lpni) {
3358                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3359                        libcfs_nidstr(&nid));
3360                 goto out;
3361         }
3362
3363         lpn = lpni->lpni_peer_net;
3364         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3365                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3366
3367         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3368                 list_move(&lpni->lpni_peer_nis,
3369                           &lpni->lpni_peer_net->lpn_peer_nis);
3370
3371         lnet_peer_ni_decref_locked(lpni);
3372         /*
3373          * Errors other than -ENOMEM are due to peers having been
3374          * configured with DLC. Ignore these because DLC overrides
3375          * Discovery.
3376          */
3377         rc = 0;
3378 out:
3379         /* If this peer is a gateway, invoke the routing callback to update
3380          * the associated route status
3381          */
3382         if (lp->lp_rtr_refcount > 0)
3383                 lnet_router_discovery_ping_reply(lp, pbuf);
3384
3385         CFS_FREE_PTR_ARRAY(curnis, nnis);
3386         CFS_FREE_PTR_ARRAY(addnis, nnis);
3387         CFS_FREE_PTR_ARRAY(delnis, nnis);
3388         lnet_ping_buffer_decref(pbuf);
3389         CDEBUG(D_NET, "peer %s (%p): %d\n",
3390                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3391
3392         if (rc) {
3393                 spin_lock(&lp->lp_lock);
3394                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3395                 lp->lp_state |= LNET_PEER_FORCE_PING;
3396                 spin_unlock(&lp->lp_lock);
3397         }
3398         return rc;
3399 }
3400
3401 /*
3402  * The data in pbuf says lp is its primary peer, but the data was
3403  * received by a different peer. Try to update lp with the data.
3404  */
3405 static int
3406 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3407 {
3408         struct lnet_handle_md mdh;
3409
3410         /* Queue lp for discovery, and force it on the request queue. */
3411         lnet_net_lock(LNET_LOCK_EX);
3412         if (lnet_peer_queue_for_discovery(lp))
3413                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3414         lnet_net_unlock(LNET_LOCK_EX);
3415
3416         LNetInvalidateMDHandle(&mdh);
3417
3418         /*
3419          * Decide whether we can move the peer to the DATA_PRESENT state.
3420          *
3421          * We replace stale data for a multi-rail peer, repair PING_FAILED
3422          * status, and preempt FORCE_PING.
3423          *
3424          * If after that we have DATA_PRESENT, we merge it into this peer.
3425          */
3426         spin_lock(&lp->lp_lock);
3427         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3428                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3429                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3430                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3431                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3432                         lnet_ping_buffer_decref(pbuf);
3433                         pbuf = lp->lp_data;
3434                         lp->lp_data = NULL;
3435                 }
3436         }
3437         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3438                 lnet_ping_buffer_decref(lp->lp_data);
3439                 lp->lp_data = NULL;
3440                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3441         }
3442         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3443                 mdh = lp->lp_ping_mdh;
3444                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3445                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3446                 lp->lp_ping_error = 0;
3447         }
3448         if (lp->lp_state & LNET_PEER_FORCE_PING)
3449                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3450         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3451         spin_unlock(&lp->lp_lock);
3452
3453         if (!LNetMDHandleIsInvalid(mdh))
3454                 LNetMDUnlink(mdh);
3455
3456         if (pbuf)
3457                 return lnet_peer_merge_data(lp, pbuf);
3458
3459         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3460         return 0;
3461 }
3462
3463 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3464                                      struct lnet_ping_buffer *pbuf)
3465 {
3466         struct lnet_ping_iter pi;
3467         struct lnet_nid pnid;
3468         u32 *st;
3469
3470         for (st = ping_iter_first(&pi, pbuf, &pnid);
3471              st;
3472              st = ping_iter_next(&pi, &pnid))
3473                 if (nid_same(nid, &pnid))
3474                         return true;
3475         return false;
3476 }
3477
3478 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3479  * to the discovery queue a reference was taken that will prevent the peer from
3480  * actually being freed by this function. After this function exits the
3481  * discovery thread should call lnet_peer_discovery_complete() which will
3482  * drop that reference as well as wake any waiters that may also be holding a
3483  * ref on the peer
3484  */
3485 static int lnet_peer_deletion(struct lnet_peer *lp)
3486 __must_hold(&lp->lp_lock)
3487 {
3488         struct list_head rlist;
3489         struct lnet_route *route, *tmp;
3490         int sensitivity = lp->lp_health_sensitivity;
3491         int rc = 0;
3492
3493         INIT_LIST_HEAD(&rlist);
3494
3495         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3496                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3497
3498         /* no-op if lnet_peer_del() has already been called on this peer */
3499         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3500                 goto clear_discovering;
3501
3502         spin_unlock(&lp->lp_lock);
3503
3504         mutex_lock(&the_lnet.ln_api_mutex);
3505         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3506             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3507                 mutex_unlock(&the_lnet.ln_api_mutex);
3508                 spin_lock(&lp->lp_lock);
3509                 rc = -ESHUTDOWN;
3510                 goto clear_discovering;
3511         }
3512
3513         lnet_peer_cancel_discovery(lp);
3514         lnet_net_lock(LNET_LOCK_EX);
3515         list_for_each_entry_safe(route, tmp,
3516                                  &lp->lp_routes,
3517                                  lr_gwlist)
3518                 lnet_move_route(route, NULL, &rlist);
3519
3520         /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3521         rc = lnet_peer_del_locked(lp);
3522         if (rc)
3523                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3524                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3525
3526         lnet_net_unlock(LNET_LOCK_EX);
3527
3528         list_for_each_entry_safe(route, tmp,
3529                                  &rlist, lr_list) {
3530                 /* re-add these routes */
3531                 lnet_add_route(route->lr_net,
3532                                route->lr_hops,
3533                                &route->lr_nid,
3534                                route->lr_priority,
3535                                sensitivity);
3536                 LIBCFS_FREE(route, sizeof(*route));
3537         }
3538
3539         mutex_unlock(&the_lnet.ln_api_mutex);
3540
3541         spin_lock(&lp->lp_lock);
3542
3543         rc = 0;
3544
3545 clear_discovering:
3546         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3547                           LNET_PEER_FORCE_PUSH);
3548
3549         return rc;
3550 }
3551
3552 /*
3553  * Update a peer using the data received.
3554  */
3555 static int lnet_peer_data_present(struct lnet_peer *lp)
3556 __must_hold(&lp->lp_lock)
3557 {
3558         struct lnet_ping_buffer *pbuf;
3559         struct lnet_peer_ni *lpni;
3560         struct lnet_nid nid;
3561         unsigned int flags;
3562         int rc = 0;
3563
3564         pbuf = lp->lp_data;
3565         lp->lp_data = NULL;
3566         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3567         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3568         spin_unlock(&lp->lp_lock);
3569
3570         /*
3571          * Modifications of peer structures are done while holding the
3572          * ln_api_mutex. A global lock is required because we may be
3573          * modifying multiple peer structures, and a mutex greatly
3574          * simplifies memory management.
3575          *
3576          * The actual changes to the data structures must also protect
3577          * against concurrent lookups, for which the lnet_net_lock in
3578          * LNET_LOCK_EX mode is used.
3579          */
3580         mutex_lock(&the_lnet.ln_api_mutex);
3581         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3582                 lnet_ping_buffer_decref(pbuf);
3583                 rc = -ESHUTDOWN;
3584                 goto out;
3585         }
3586
3587         /*
3588          * If this peer is not on the peer list then it is being torn
3589          * down, and our reference count may be all that is keeping it
3590          * alive. Don't do any work on it.
3591          */
3592         if (list_empty(&lp->lp_peer_list)) {
3593                 lnet_ping_buffer_decref(pbuf);
3594                 goto out;
3595         }
3596
3597         flags = LNET_PEER_DISCOVERED;
3598         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3599                 flags |= LNET_PEER_MULTI_RAIL;
3600
3601         /*
3602          * Check whether the primary NID in the message matches the
3603          * primary NID of the peer. If it does, update the peer, if
3604          * it it does not, check whether there is already a peer with
3605          * that primary NID. If no such peer exists, try to update
3606          * the primary NID of the current peer (allowed if it was
3607          * created due to message traffic) and complete the update.
3608          * If the peer did exist, hand off the data to it.
3609          *
3610          * The peer for the loopback interface is a special case: this
3611          * is the peer for the local node, and we want to set its
3612          * primary NID to the correct value here. Moreover, this peer
3613          * can show up with only the loopback NID in the ping buffer.
3614          */
3615         if (!find_primary(&nid, pbuf)) {
3616                 lnet_ping_buffer_decref(pbuf);
3617                 goto out;
3618         }
3619         if (nid_is_lo0(&lp->lp_primary_nid)) {
3620                 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3621                 if (rc)
3622                         lnet_ping_buffer_decref(pbuf);
3623                 else
3624                         rc = lnet_peer_merge_data(lp, pbuf);
3625         /*
3626          * if the primary nid of the peer is present in the ping info returned
3627          * from the peer, but it's not the local primary peer we have
3628          * cached and discovery is disabled, then we don't want to update
3629          * our local peer info, by adding or removing NIDs, we just want
3630          * to update the status of the nids that we currently have
3631          * recorded in that peer.
3632          */
3633         } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3634                    (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3635                     lnet_is_discovery_disabled(lp))) {
3636                 rc = lnet_peer_merge_data(lp, pbuf);
3637         } else {
3638                 lpni = lnet_peer_ni_find_locked(&nid);
3639                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3640                         rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3641                         if (rc) {
3642                                 CERROR("Primary NID error %s versus %s: %d\n",
3643                                        libcfs_nidstr(&lp->lp_primary_nid),
3644                                        libcfs_nidstr(&nid), rc);
3645                                 lnet_ping_buffer_decref(pbuf);
3646                         } else {
3647                                 rc = lnet_peer_merge_data(lp, pbuf);
3648                         }
3649                         if (lpni)
3650                                 lnet_peer_ni_decref_locked(lpni);
3651                 } else {
3652                         struct lnet_peer *new_lp;
3653                         new_lp = lpni->lpni_peer_net->lpn_peer;
3654                         /*
3655                          * if lp has discovery/MR enabled that means new_lp
3656                          * should have discovery/MR enabled as well, since
3657                          * it's the same peer, which we're about to merge
3658                          */
3659                         spin_lock(&lp->lp_lock);
3660                         spin_lock(&new_lp->lp_lock);
3661                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3662                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3663                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3664                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3665                         /* If we're processing a ping reply then we may be
3666                          * about to send a push to the peer that we ping'd.
3667                          * Since the ping reply that we're processing was
3668                          * received by lp, we need to set the discovery source
3669                          * NID for new_lp to the NID stored in lp.
3670                          */
3671                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3672                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3673                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3674                         }
3675                         spin_unlock(&new_lp->lp_lock);
3676                         spin_unlock(&lp->lp_lock);
3677
3678                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3679                         lnet_consolidate_routes_locked(lp, new_lp);
3680                         lnet_peer_ni_decref_locked(lpni);
3681                 }
3682         }
3683 out:
3684         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3685                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3686                lp->lp_state);
3687         mutex_unlock(&the_lnet.ln_api_mutex);
3688
3689         spin_lock(&lp->lp_lock);
3690         /* Tell discovery to re-check the peer immediately. */
3691         if (!rc)
3692                 rc = LNET_REDISCOVER_PEER;
3693         return rc;
3694 }
3695
3696 /*
3697  * A ping failed. Clear the PING_FAILED state and set the
3698  * FORCE_PING state, to ensure a retry even if discovery is
3699  * disabled. This avoids being left with incorrect state.
3700  */
3701 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3702 __must_hold(&lp->lp_lock)
3703 {
3704         struct lnet_handle_md mdh;
3705         int rc;
3706
3707         mdh = lp->lp_ping_mdh;
3708         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3709         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3710         lp->lp_state |= LNET_PEER_FORCE_PING;
3711         rc = lp->lp_ping_error;
3712         lp->lp_ping_error = 0;
3713         spin_unlock(&lp->lp_lock);
3714
3715         if (!LNetMDHandleIsInvalid(mdh))
3716                 LNetMDUnlink(mdh);
3717
3718         CDEBUG(D_NET, "peer %s:%d\n",
3719                libcfs_nidstr(&lp->lp_primary_nid), rc);
3720
3721         spin_lock(&lp->lp_lock);
3722         return rc ? rc : LNET_REDISCOVER_PEER;
3723 }
3724
3725 /* Active side of ping. */
3726 static int lnet_peer_send_ping(struct lnet_peer *lp)
3727 __must_hold(&lp->lp_lock)
3728 {
3729         int bytes;
3730         int rc;
3731         int cpt;
3732
3733         lp->lp_state |= LNET_PEER_PING_SENT;
3734         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3735         spin_unlock(&lp->lp_lock);
3736
3737         cpt = lnet_net_lock_current();
3738         /* Refcount for MD. */
3739         lnet_peer_addref_locked(lp);
3740         lnet_net_unlock(cpt);
3741
3742         bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3743
3744         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3745                             the_lnet.ln_dc_handler, false);
3746         /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3747          * refcount on the peer, otherwise LNetMDUnlink will be called
3748          * which will eventually do that.
3749          */
3750         if (rc > 0) {
3751                 lnet_net_lock(cpt);
3752                 lnet_peer_decref_locked(lp);
3753                 lnet_net_unlock(cpt);
3754                 rc = -rc; /* change the rc to negative value */
3755                 goto fail_error;
3756         } else if (rc < 0) {
3757                 goto fail_error;
3758         }
3759
3760         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3761
3762         spin_lock(&lp->lp_lock);
3763         return 0;
3764
3765 fail_error:
3766         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3767         /*
3768          * The errors that get us here are considered hard errors and
3769          * cause Discovery to terminate. So we clear PING_SENT, but do
3770          * not set either PING_FAILED or FORCE_PING. In fact we need
3771          * to clear PING_FAILED, because the unlink event handler will
3772          * have set it if we called LNetMDUnlink() above.
3773          */
3774         spin_lock(&lp->lp_lock);
3775         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3776         return rc;
3777 }
3778
3779 /*
3780  * This function exists because you cannot call LNetMDUnlink() from an
3781  * event handler.
3782  */
3783 static int lnet_peer_push_failed(struct lnet_peer *lp)
3784 __must_hold(&lp->lp_lock)
3785 {
3786         struct lnet_handle_md mdh;
3787         int rc;
3788
3789         mdh = lp->lp_push_mdh;
3790         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3791         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3792         rc = lp->lp_push_error;
3793         lp->lp_push_error = 0;
3794         spin_unlock(&lp->lp_lock);
3795
3796         if (!LNetMDHandleIsInvalid(mdh))
3797                 LNetMDUnlink(mdh);
3798
3799         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3800         spin_lock(&lp->lp_lock);
3801         return rc ? rc : LNET_REDISCOVER_PEER;
3802 }
3803
3804 /*
3805  * Mark the peer as discovered.
3806  */
3807 static int lnet_peer_discovered(struct lnet_peer *lp)
3808 __must_hold(&lp->lp_lock)
3809 {
3810         lp->lp_state |= LNET_PEER_DISCOVERED;
3811         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3812                           LNET_PEER_REDISCOVER);
3813
3814         lp->lp_dc_error = 0;
3815
3816         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3817
3818         return 0;
3819 }
3820
3821 /* Active side of push. */
3822 static int lnet_peer_send_push(struct lnet_peer *lp)
3823 __must_hold(&lp->lp_lock)
3824 {
3825         struct lnet_ping_buffer *pbuf;
3826         struct lnet_processid id;
3827         struct lnet_md md;
3828         int cpt;
3829         int rc;
3830
3831         /* Don't push to a non-multi-rail peer. */
3832         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3833                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3834                 /* if peer's NIDs are uptodate then peer is discovered */
3835                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3836                         rc = lnet_peer_discovered(lp);
3837                         return rc;
3838                 }
3839
3840                 return 0;
3841         }
3842
3843         lp->lp_state |= LNET_PEER_PUSH_SENT;
3844         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3845         spin_unlock(&lp->lp_lock);
3846
3847         cpt = lnet_net_lock_current();
3848         pbuf = the_lnet.ln_ping_target;
3849         lnet_ping_buffer_addref(pbuf);
3850         lnet_net_unlock(cpt);
3851
3852         /* Push source MD */
3853         md.start     = &pbuf->pb_info;
3854         md.length    = pbuf->pb_nbytes;
3855         md.threshold = 2; /* Put/Ack */
3856         md.max_size  = 0;
3857         md.options   = LNET_MD_TRACK_RESPONSE;
3858         md.handler   = the_lnet.ln_dc_handler;
3859         md.user_ptr  = lp;
3860
3861         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3862         if (rc) {
3863                 lnet_ping_buffer_decref(pbuf);
3864                 CERROR("Can't bind push source MD: %d\n", rc);
3865                 goto fail_error;
3866         }
3867
3868         cpt = lnet_net_lock_current();
3869         /* Refcount for MD. */
3870         lnet_peer_addref_locked(lp);
3871         id.pid = LNET_PID_LUSTRE;
3872         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3873                 id.nid = lp->lp_disc_dst_nid;
3874         else
3875                 id.nid = lp->lp_primary_nid;
3876         lnet_net_unlock(cpt);
3877
3878         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3879                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3880                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3881
3882         /*
3883          * reset the discovery nid. There is no need to restrict sending
3884          * from that source, if we call lnet_push_update_to_peers(). It'll
3885          * get set to a specific NID, if we initiate discovery from the
3886          * scratch
3887          */
3888         lp->lp_disc_src_nid = LNET_ANY_NID;
3889         lp->lp_disc_dst_nid = LNET_ANY_NID;
3890
3891         if (rc)
3892                 goto fail_unlink;
3893
3894         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3895
3896         spin_lock(&lp->lp_lock);
3897         return 0;
3898
3899 fail_unlink:
3900         LNetMDUnlink(lp->lp_push_mdh);
3901         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3902 fail_error:
3903         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3904                lp, rc);
3905         /*
3906          * The errors that get us here are considered hard errors and
3907          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3908          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3909          * because the unlink event handler will have set it if we
3910          * called LNetMDUnlink() above.
3911          */
3912         spin_lock(&lp->lp_lock);
3913         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3914         return rc;
3915 }
3916
3917 /*
3918  * Wait for work to be queued or some other change that must be
3919  * attended to. Returns non-zero if the discovery thread should shut
3920  * down.
3921  */
3922 static int lnet_peer_discovery_wait_for_work(void)
3923 {
3924         int cpt;
3925         int rc = 0;
3926
3927         DEFINE_WAIT(wait);
3928
3929         cpt = lnet_net_lock_current();
3930         for (;;) {
3931                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3932                                 TASK_INTERRUPTIBLE);
3933                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3934                         break;
3935                 if (lnet_push_target_resize_needed() ||
3936                     the_lnet.ln_push_target->pb_needs_post)
3937                         break;
3938                 if (!list_empty(&the_lnet.ln_dc_request))
3939                         break;
3940                 if (!list_empty(&the_lnet.ln_msg_resend))
3941                         break;
3942                 lnet_net_unlock(cpt);
3943
3944                 /*
3945                  * wakeup max every second to check if there are peers that
3946                  * have been stuck on the working queue for greater than
3947                  * the peer timeout.
3948                  */
3949                 schedule_timeout(cfs_time_seconds(1));
3950                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3951                 cpt = lnet_net_lock_current();
3952         }
3953         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3954
3955         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3956                 rc = -ESHUTDOWN;
3957
3958         lnet_net_unlock(cpt);
3959
3960         CDEBUG(D_NET, "woken: %d\n", rc);
3961
3962         return rc;
3963 }
3964
3965 /*
3966  * Messages that were pending on a destroyed peer will be put on a global
3967  * resend list. The message resend list will be checked by
3968  * the discovery thread when it wakes up, and will resend messages. These
3969  * messages can still be sendable in the case the lpni which was the initial
3970  * cause of the message re-queue was transfered to another peer.
3971  *
3972  * It is possible that LNet could be shutdown while we're iterating
3973  * through the list. lnet_shudown_lndnets() will attempt to access the
3974  * resend list, but will have to wait until the spinlock is released, by
3975  * which time there shouldn't be any more messages on the resend list.
3976  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3977  * for the messages so they can be released. The other case is that
3978  * lnet_shudown_lndnets() can finalize all the messages before this
3979  * function can visit the resend list, in which case this function will be
3980  * a no-op.
3981  */
3982 static void lnet_resend_msgs(void)
3983 {
3984         struct lnet_msg *msg, *tmp;
3985         LIST_HEAD(resend);
3986         int rc;
3987
3988         spin_lock(&the_lnet.ln_msg_resend_lock);
3989         list_splice(&the_lnet.ln_msg_resend, &resend);
3990         spin_unlock(&the_lnet.ln_msg_resend_lock);
3991
3992         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3993                 list_del_init(&msg->msg_list);
3994                 rc = lnet_send(&msg->msg_src_nid_param, msg,
3995                                &msg->msg_rtr_nid_param);
3996                 if (rc < 0) {
3997                         CNETERR("Error sending %s to %s: %d\n",
3998                                lnet_msgtyp2str(msg->msg_type),
3999                                libcfs_idstr(&msg->msg_target), rc);
4000                         lnet_finalize(msg, rc);
4001                 }
4002         }
4003 }
4004
4005 /* The discovery thread. */
4006 static int lnet_peer_discovery(void *arg)
4007 {
4008         struct lnet_peer *lp;
4009         int retry = 3;
4010         int rc;
4011
4012         wait_for_completion(&the_lnet.ln_started);
4013
4014         CDEBUG(D_NET, "started\n");
4015
4016         for (;;) {
4017                 if (lnet_peer_discovery_wait_for_work())
4018                         break;
4019
4020                 if (lnet_push_target_resize_needed())
4021                         lnet_push_target_resize();
4022                 else if (the_lnet.ln_push_target->pb_needs_post)
4023                         lnet_push_target_post(the_lnet.ln_push_target,
4024                                               &the_lnet.ln_push_target_md);
4025
4026                 lnet_resend_msgs();
4027
4028                 lnet_net_lock(LNET_LOCK_EX);
4029                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4030                         lnet_net_unlock(LNET_LOCK_EX);
4031                         break;
4032                 }
4033
4034                 /*
4035                  * Process all incoming discovery work requests.  When
4036                  * discovery must wait on a peer to change state, it
4037                  * is added to the tail of the ln_dc_working queue. A
4038                  * timestamp keeps track of when the peer was added,
4039                  * so we can time out discovery requests that take too
4040                  * long.
4041                  */
4042                 while (!list_empty(&the_lnet.ln_dc_request)) {
4043                         lp = list_first_entry(&the_lnet.ln_dc_request,
4044                                               struct lnet_peer, lp_dc_list);
4045                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4046                         /*
4047                          * set the time the peer was put on the dc_working
4048                          * queue. It shouldn't remain on the queue
4049                          * forever, in case the GET message (for ping)
4050                          * doesn't get a REPLY or the PUT message (for
4051                          * push) doesn't get an ACK.
4052                          */
4053                         lp->lp_last_queued = ktime_get_real_seconds();
4054                         lnet_net_unlock(LNET_LOCK_EX);
4055
4056                         if (lnet_push_target_resize_needed())
4057                                 lnet_push_target_resize();
4058                         else if (the_lnet.ln_push_target->pb_needs_post)
4059                                 lnet_push_target_post(the_lnet.ln_push_target,
4060                                                       &the_lnet.ln_push_target_md);
4061
4062                         /*
4063                          * Select an action depending on the state of
4064                          * the peer and whether discovery is disabled.
4065                          * The check whether discovery is disabled is
4066                          * done after the code that handles processing
4067                          * for arrived data, cleanup for failures, and
4068                          * forcing a Ping or Push.
4069                          */
4070                         spin_lock(&lp->lp_lock);
4071                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4072                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4073                                 lp->lp_state);
4074                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4075                                             LNET_PEER_MARK_DELETED))
4076                                 rc = lnet_peer_deletion(lp);
4077                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4078                                 rc = lnet_peer_data_present(lp);
4079                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
4080                                 rc = lnet_peer_ping_failed(lp);
4081                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4082                                 rc = lnet_peer_push_failed(lp);
4083                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
4084                                 rc = lnet_peer_send_ping(lp);
4085                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4086                                 rc = lnet_peer_send_push(lp);
4087                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4088                                 rc = lnet_peer_send_ping(lp);
4089                         else if (lnet_peer_needs_push(lp))
4090                                 rc = lnet_peer_send_push(lp);
4091                         else
4092                                 rc = lnet_peer_discovered(lp);
4093                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4094                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4095                                 lp->lp_state, rc);
4096
4097                         if (rc == LNET_REDISCOVER_PEER) {
4098                                 spin_unlock(&lp->lp_lock);
4099                                 lnet_net_lock(LNET_LOCK_EX);
4100                                 list_move(&lp->lp_dc_list,
4101                                           &the_lnet.ln_dc_request);
4102                         } else if (rc ||
4103                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4104                                 spin_unlock(&lp->lp_lock);
4105                                 lnet_net_lock(LNET_LOCK_EX);
4106                                 lnet_peer_discovery_complete(lp, rc);
4107                         } else {
4108                                 spin_unlock(&lp->lp_lock);
4109                                 lnet_net_lock(LNET_LOCK_EX);
4110                         }
4111
4112                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4113                                 break;
4114
4115                 }
4116
4117                 lnet_net_unlock(LNET_LOCK_EX);
4118         }
4119
4120 cleanup:
4121         CDEBUG(D_NET, "stopping\n");
4122         /*
4123          * Clean up before telling lnet_peer_discovery_stop() that
4124          * we're done. Use wake_up() below to somewhat reduce the
4125          * size of the thundering herd if there are multiple threads
4126          * waiting on discovery of a single peer.
4127          */
4128
4129         /* Queue cleanup 1: stop all pending pings and pushes. */
4130         lnet_net_lock(LNET_LOCK_EX);
4131         while (!list_empty(&the_lnet.ln_dc_working)) {
4132                 lp = list_first_entry(&the_lnet.ln_dc_working,
4133                                       struct lnet_peer, lp_dc_list);
4134                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4135                 lnet_net_unlock(LNET_LOCK_EX);
4136                 lnet_peer_cancel_discovery(lp);
4137                 lnet_net_lock(LNET_LOCK_EX);
4138         }
4139         lnet_net_unlock(LNET_LOCK_EX);
4140
4141         /* Queue cleanup 2: wait for the expired queue to clear. */
4142         while (!list_empty(&the_lnet.ln_dc_expired))
4143                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4144
4145         /* Queue cleanup 3: clear the request queue. */
4146         lnet_net_lock(LNET_LOCK_EX);
4147         while (!list_empty(&the_lnet.ln_dc_request)) {
4148                 lp = list_first_entry(&the_lnet.ln_dc_request,
4149                                       struct lnet_peer, lp_dc_list);
4150                 lnet_net_unlock(LNET_LOCK_EX);
4151                 spin_lock(&lp->lp_lock);
4152                 if (lp->lp_state & LNET_PEER_PING_FAILED)
4153                         (void)lnet_peer_ping_failed(lp);
4154                 if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4155                         (void)lnet_peer_push_failed(lp);
4156                 spin_unlock(&lp->lp_lock);
4157                 lnet_net_lock(LNET_LOCK_EX);
4158                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4159         }
4160         lnet_net_unlock(LNET_LOCK_EX);
4161
4162         if (lnet_assert_handler_unused(the_lnet.ln_dc_handler, --retry <= 0))
4163                 goto cleanup;
4164
4165         the_lnet.ln_dc_handler = NULL;
4166
4167         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4168         wake_up(&the_lnet.ln_dc_waitq);
4169
4170         CDEBUG(D_NET, "stopped\n");
4171
4172         return 0;
4173 }
4174
4175 /* ln_api_mutex is held on entry. */
4176 int lnet_peer_discovery_start(void)
4177 {
4178         struct task_struct *task;
4179         int rc = 0;
4180
4181         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4182                 return -EALREADY;
4183
4184         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4185         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4186         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4187         if (IS_ERR(task)) {
4188                 rc = PTR_ERR(task);
4189                 CERROR("Can't start peer discovery thread: %d\n", rc);
4190
4191                 the_lnet.ln_dc_handler = NULL;
4192
4193                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4194         }
4195
4196         CDEBUG(D_NET, "discovery start: %d\n", rc);
4197
4198         return rc;
4199 }
4200
4201 /* ln_api_mutex is held on entry. */
4202 void lnet_peer_discovery_stop(void)
4203 {
4204         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4205                 return;
4206
4207         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4208         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4209
4210         /* In the LNetNIInit() path we may be stopping discovery before it
4211          * entered its work loop
4212          */
4213         if (!completion_done(&the_lnet.ln_started))
4214                 complete(&the_lnet.ln_started);
4215         else
4216                 wake_up(&the_lnet.ln_dc_waitq);
4217
4218         mutex_unlock(&the_lnet.ln_api_mutex);
4219         wait_event(the_lnet.ln_dc_waitq,
4220                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4221         mutex_lock(&the_lnet.ln_api_mutex);
4222
4223         LASSERT(list_empty(&the_lnet.ln_dc_request));
4224         LASSERT(list_empty(&the_lnet.ln_dc_working));
4225         LASSERT(list_empty(&the_lnet.ln_dc_expired));
4226
4227         CDEBUG(D_NET, "discovery stopped\n");
4228 }
4229
4230 /* Debugging */
4231
4232 void
4233 lnet_debug_peer(struct lnet_nid *nid)
4234 {
4235         char                    *aliveness = "NA";
4236         struct lnet_peer_ni     *lp;
4237         int                     cpt;
4238
4239         cpt = lnet_nid2cpt(nid, NULL);
4240         lnet_net_lock(cpt);
4241
4242         lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4243         if (IS_ERR(lp)) {
4244                 lnet_net_unlock(cpt);
4245                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4246                 return;
4247         }
4248
4249         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4250                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4251
4252         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4253                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4254                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4255                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4256                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4257
4258         lnet_peer_ni_decref_locked(lp);
4259
4260         lnet_net_unlock(cpt);
4261 }
4262
4263 /* Gathering information for userspace. */
4264
4265 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4266                           char aliveness[LNET_MAX_STR_LEN],
4267                           __u32 *cpt_iter, __u32 *refcount,
4268                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4269                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4270                           __u32 *peer_tx_qnob)
4271 {
4272         struct lnet_peer_table          *peer_table;
4273         struct lnet_peer_ni             *lp;
4274         int                             j;
4275         int                             lncpt;
4276         bool                            found = false;
4277
4278         /* get the number of CPTs */
4279         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4280
4281         /* if the cpt number to be examined is >= the number of cpts in
4282          * the system then indicate that there are no more cpts to examin
4283          */
4284         if (*cpt_iter >= lncpt)
4285                 return -ENOENT;
4286
4287         /* get the current table */
4288         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4289         /* if the ptable is NULL then there are no more cpts to examine */
4290         if (peer_table == NULL)
4291                 return -ENOENT;
4292
4293         lnet_net_lock(*cpt_iter);
4294
4295         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4296                 struct list_head *peers = &peer_table->pt_hash[j];
4297
4298                 list_for_each_entry(lp, peers, lpni_hashlist) {
4299                         if (!nid_is_nid4(&lp->lpni_nid))
4300                                 continue;
4301                         if (peer_index-- > 0)
4302                                 continue;
4303
4304                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4305                         if (lnet_isrouter(lp) ||
4306                                 lnet_peer_aliveness_enabled(lp))
4307                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4308                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4309
4310                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4311                         *refcount = kref_read(&lp->lpni_kref);
4312                         *ni_peer_tx_credits =
4313                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4314                         *peer_tx_credits = lp->lpni_txcredits;
4315                         *peer_rtr_credits = lp->lpni_rtrcredits;
4316                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4317                         *peer_tx_qnob = lp->lpni_txqnob;
4318
4319                         found = true;
4320                 }
4321
4322         }
4323         lnet_net_unlock(*cpt_iter);
4324
4325         *cpt_iter = lncpt;
4326
4327         return found ? 0 : -ENOENT;
4328 }
4329
4330 /* ln_api_mutex is held, which keeps the peer list stable */
4331 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4332 {
4333         struct lnet_ioctl_element_stats *lpni_stats;
4334         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4335         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4336         struct lnet_peer_ni_credit_info *lpni_info;
4337         struct lnet_peer_ni *lpni;
4338         struct lnet_peer *lp;
4339         lnet_nid_t nid4;
4340         struct lnet_nid nid;
4341         __u32 size;
4342         int rc;
4343
4344         lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4345         lp = lnet_find_peer(&nid);
4346         if (!lp) {
4347                 rc = -ENOENT;
4348                 goto out;
4349         }
4350
4351         size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4352                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4353         size *= lp->lp_nnis;
4354         if (size > cfg->prcfg_size) {
4355                 cfg->prcfg_size = size;
4356                 rc = -E2BIG;
4357                 goto out_lp_decref;
4358         }
4359
4360         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4361         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4362         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4363         cfg->prcfg_count = lp->lp_nnis;
4364         cfg->prcfg_size = size;
4365         cfg->prcfg_state = lp->lp_state;
4366
4367         /* Allocate helper buffers. */
4368         rc = -ENOMEM;
4369         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4370         if (!lpni_info)
4371                 goto out_lp_decref;
4372         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4373         if (!lpni_stats)
4374                 goto out_free_info;
4375         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4376         if (!lpni_msg_stats)
4377                 goto out_free_stats;
4378         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4379         if (!lpni_hstats)
4380                 goto out_free_msg_stats;
4381
4382
4383         lpni = NULL;
4384         rc = -EFAULT;
4385         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4386                 if (!nid_is_nid4(&lpni->lpni_nid))
4387                         continue;
4388                 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4389                 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4390                         goto out_free_hstats;
4391                 bulk += sizeof(nid4);
4392
4393                 memset(lpni_info, 0, sizeof(*lpni_info));
4394                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4395                 if (lnet_isrouter(lpni) ||
4396                         lnet_peer_aliveness_enabled(lpni))
4397                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4398                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4399
4400                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4401                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4402                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4403                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4404                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4405                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4406                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4407                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4408                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4409                         goto out_free_hstats;
4410                 bulk += sizeof(*lpni_info);
4411
4412                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4413                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4414                                                             LNET_STATS_TYPE_SEND);
4415                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4416                                                             LNET_STATS_TYPE_RECV);
4417                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4418                                                             LNET_STATS_TYPE_DROP);
4419                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4420                         goto out_free_hstats;
4421                 bulk += sizeof(*lpni_stats);
4422                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4423                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4424                         goto out_free_hstats;
4425                 bulk += sizeof(*lpni_msg_stats);
4426                 lpni_hstats->hlpni_network_timeout =
4427                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4428                 lpni_hstats->hlpni_remote_dropped =
4429                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4430                 lpni_hstats->hlpni_remote_timeout =
4431                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4432                 lpni_hstats->hlpni_remote_error =
4433                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4434                 lpni_hstats->hlpni_health_value =
4435                   atomic_read(&lpni->lpni_healthv);
4436                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4437                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4438                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4439                         goto out_free_hstats;
4440                 bulk += sizeof(*lpni_hstats);
4441         }
4442         rc = 0;
4443
4444 out_free_hstats:
4445         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4446 out_free_msg_stats:
4447         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4448 out_free_stats:
4449         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4450 out_free_info:
4451         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4452 out_lp_decref:
4453         lnet_peer_decref_locked(lp);
4454 out:
4455         return rc;
4456 }
4457
4458 /* must hold net_lock/0 */
4459 void
4460 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4461                                      struct list_head *recovery_queue,
4462                                      time64_t now)
4463 {
4464         /* the mt could've shutdown and cleaned up the queues */
4465         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4466                 return;
4467
4468         if (!list_empty(&lpni->lpni_recovery))
4469                 return;
4470
4471         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4472                 return;
4473
4474         if (!lpni->lpni_last_alive) {
4475                 CDEBUG(D_NET,
4476                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4477                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4478                        lpni->lpni_last_alive);
4479                 return;
4480         }
4481
4482         if (lnet_recovery_limit &&
4483             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4484                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4485                        libcfs_nidstr(&lpni->lpni_nid),
4486                        lpni->lpni_last_alive);
4487                 /* Reset the ping count so that if this peer NI is added back to
4488                  * the recovery queue we will send the first ping right away.
4489                  */
4490                 lpni->lpni_ping_count = 0;
4491                 return;
4492         }
4493
4494         /* This peer NI is going on the recovery queue, so take a ref on it */
4495         lnet_peer_ni_addref_locked(lpni);
4496
4497         lnet_peer_ni_set_next_ping(lpni, now);
4498
4499         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4500                libcfs_nidstr(&lpni->lpni_nid),
4501                lpni->lpni_ping_count,
4502                lpni->lpni_next_ping,
4503                lpni->lpni_last_alive,
4504                atomic_read(&lpni->lpni_healthv));
4505
4506         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4507 }
4508
4509 /* Call with the ln_api_mutex held */
4510 void
4511 lnet_peer_ni_set_healthv(struct lnet_nid *nid, int value, bool all)
4512 {
4513         struct lnet_peer_table *ptable;
4514         struct lnet_peer *lp;
4515         struct lnet_peer_net *lpn;
4516         struct lnet_peer_ni *lpni;
4517         int lncpt;
4518         int cpt;
4519         time64_t now;
4520
4521         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4522                 return;
4523
4524         now = ktime_get_seconds();
4525
4526         if (!all) {
4527                 lnet_net_lock(LNET_LOCK_EX);
4528                 lpni = lnet_peer_ni_find_locked(nid);
4529                 if (!lpni) {
4530                         lnet_net_unlock(LNET_LOCK_EX);
4531                         return;
4532                 }
4533                 lnet_set_lpni_healthv_locked(lpni, value);
4534                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4535                                              &the_lnet.ln_mt_peerNIRecovq, now);
4536                 lnet_peer_ni_decref_locked(lpni);
4537                 lnet_net_unlock(LNET_LOCK_EX);
4538                 return;
4539         }
4540
4541         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4542
4543         /*
4544          * Walk all the peers and reset the health value for each one to the
4545          * specified value.
4546          */
4547         lnet_net_lock(LNET_LOCK_EX);
4548         for (cpt = 0; cpt < lncpt; cpt++) {
4549                 ptable = the_lnet.ln_peer_tables[cpt];
4550                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4551                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4552                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4553                                                     lpni_peer_nis) {
4554                                         lnet_set_lpni_healthv_locked(lpni,
4555                                                                      value);
4556                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4557                                              &the_lnet.ln_mt_peerNIRecovq, now);
4558                                 }
4559                         }
4560                 }
4561         }
4562         lnet_net_unlock(LNET_LOCK_EX);
4563 }
4564