Whamcloud - gitweb
LU-19098 hsm: don't print progname twice with lhsmtool
[fs/lustre-release.git] / lnet / lnet / peer.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 /* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4  * Use is subject to license terms.
5  *
6  * Copyright (c) 2012, 2017, Intel Corporation.
7  */
8
9 /* This file is part of Lustre, http://www.lustre.org/ */
10
11 #define DEBUG_SUBSYSTEM S_LNET
12
13 #include <linux/sched.h>
14 #ifdef HAVE_SCHED_HEADERS
15 #include <linux/sched/signal.h>
16 #endif
17 #include <linux/uaccess.h>
18
19 #include <lnet/udsp.h>
20 #include <lnet/lib-lnet.h>
21 #include <uapi/linux/lnet/lnet-dlc.h>
22
23 /* Value indicating that recovery needs to re-check a peer immediately. */
24 #define LNET_REDISCOVER_PEER    (1)
25
26 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
27 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
28                             unsigned int flags);
29
30 static void
31 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
32 {
33         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
34                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
35                 lnet_peer_ni_decref_locked(lpni);
36         }
37 }
38
39 void
40 lnet_peer_net_added(struct lnet_net *net)
41 {
42         struct lnet_peer_ni *lpni, *tmp;
43
44         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
45                                  lpni_on_remote_peer_ni_list) {
46
47                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
48                         lpni->lpni_net = net;
49
50                         spin_lock(&lpni->lpni_lock);
51                         lpni->lpni_txcredits =
52                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
53                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
54                         lpni->lpni_rtrcredits =
55                                 lnet_peer_buffer_credits(lpni->lpni_net);
56                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
57                         spin_unlock(&lpni->lpni_lock);
58
59                         lnet_peer_remove_from_remote_list(lpni);
60                 }
61         }
62 }
63
64 static void
65 lnet_peer_tables_destroy(void)
66 {
67         struct lnet_peer_table  *ptable;
68         struct list_head        *hash;
69         int                     i;
70         int                     j;
71
72         if (!the_lnet.ln_peer_tables)
73                 return;
74
75         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
76                 hash = ptable->pt_hash;
77                 if (!hash) /* not intialized */
78                         break;
79
80                 LASSERT(list_empty(&ptable->pt_zombie_list));
81
82                 ptable->pt_hash = NULL;
83                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
84                         LASSERT(list_empty(&hash[j]));
85
86                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
87         }
88
89         cfs_percpt_free(the_lnet.ln_peer_tables);
90         the_lnet.ln_peer_tables = NULL;
91 }
92
93 int
94 lnet_peer_tables_create(void)
95 {
96         struct lnet_peer_table  *ptable;
97         struct list_head        *hash;
98         int                     i;
99         int                     j;
100
101         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
102                                                    sizeof(*ptable));
103         if (the_lnet.ln_peer_tables == NULL) {
104                 CERROR("Failed to allocate cpu-partition peer tables\n");
105                 return -ENOMEM;
106         }
107
108         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
109                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
110                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
111                 if (hash == NULL) {
112                         CERROR("Failed to create peer hash table\n");
113                         lnet_peer_tables_destroy();
114                         return -ENOMEM;
115                 }
116
117                 spin_lock_init(&ptable->pt_zombie_lock);
118                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
119
120                 INIT_LIST_HEAD(&ptable->pt_peer_list);
121
122                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
123                         INIT_LIST_HEAD(&hash[j]);
124                 ptable->pt_hash = hash; /* sign of initialization */
125         }
126
127         return 0;
128 }
129
130 static struct lnet_peer_ni *
131 lnet_peer_ni_alloc(struct lnet_nid *nid)
132 {
133         struct lnet_peer_ni *lpni;
134         struct lnet_net *net;
135         int cpt;
136
137         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
138
139         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
140         if (!lpni)
141                 return NULL;
142
143         INIT_LIST_HEAD(&lpni->lpni_txq);
144         INIT_LIST_HEAD(&lpni->lpni_hashlist);
145         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
146         INIT_LIST_HEAD(&lpni->lpni_recovery);
147         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
148         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
149         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
150         kref_init(&lpni->lpni_kref);
151         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
152
153         spin_lock_init(&lpni->lpni_lock);
154
155         if (lnet_peers_start_down())
156                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
157         else
158                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
159         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
160         lpni->lpni_nid = *nid;
161         lpni->lpni_cpt = cpt;
162         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
163         lpni->lpni_notified = false;
164
165         net = lnet_get_net_locked(LNET_NID_NET(nid));
166         lpni->lpni_net = net;
167         if (net) {
168                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
169                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
170                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
171                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
172         } else {
173                 /*
174                  * This peer_ni is not on a local network, so we
175                  * cannot add the credits here. In case the net is
176                  * added later, add the peer_ni to the remote peer ni
177                  * list so it can be easily found and revisited.
178                  */
179                 /* FIXME: per-net implementation instead? */
180                 kref_get(&lpni->lpni_kref);
181                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
182                               &the_lnet.ln_remote_peer_ni_list);
183         }
184
185         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
186
187         return lpni;
188 }
189
190 static struct lnet_peer_net *
191 lnet_peer_net_alloc(__u32 net_id)
192 {
193         struct lnet_peer_net *lpn;
194
195         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
196         if (!lpn)
197                 return NULL;
198
199         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
200         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
201         lpn->lpn_net_id = net_id;
202         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
203
204         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
205
206         return lpn;
207 }
208
209 void
210 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
211 {
212         struct lnet_peer *lp;
213
214         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
215
216         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
217         LASSERT(list_empty(&lpn->lpn_peer_nis));
218         LASSERT(list_empty(&lpn->lpn_peer_nets));
219         lp = lpn->lpn_peer;
220         lpn->lpn_peer = NULL;
221         LIBCFS_FREE(lpn, sizeof(*lpn));
222
223         lnet_peer_decref_locked(lp);
224 }
225
226 static struct lnet_peer *
227 lnet_peer_alloc(struct lnet_nid *nid)
228 {
229         struct lnet_peer *lp;
230
231         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
232         if (!lp)
233                 return NULL;
234
235         INIT_LIST_HEAD(&lp->lp_rtrq);
236         INIT_LIST_HEAD(&lp->lp_routes);
237         INIT_LIST_HEAD(&lp->lp_peer_list);
238         INIT_LIST_HEAD(&lp->lp_peer_nets);
239         INIT_LIST_HEAD(&lp->lp_dc_list);
240         INIT_LIST_HEAD(&lp->lp_dc_pendq);
241         INIT_LIST_HEAD(&lp->lp_rtr_list);
242         init_waitqueue_head(&lp->lp_dc_waitq);
243         spin_lock_init(&lp->lp_lock);
244         lp->lp_primary_nid = *nid;
245         lp->lp_disc_src_nid = LNET_ANY_NID;
246         lp->lp_disc_dst_nid = LNET_ANY_NID;
247         lp->lp_merge_primary_nid = LNET_ANY_NID;
248         if (lnet_peers_start_down())
249                 lp->lp_alive = false;
250         else
251                 lp->lp_alive = true;
252
253         /*
254          * all peers created on a router should have health on
255          * if it's not already on.
256          */
257         if (the_lnet.ln_routing && !lnet_health_sensitivity)
258                 lp->lp_health_sensitivity = 1;
259
260         /*
261          * Turn off discovery for loopback peer. If you're creating a peer
262          * for the loopback interface then that was initiated when we
263          * attempted to send a message over the loopback. There is no need
264          * to ever use a different interface when sending messages to
265          * myself.
266          */
267         if (nid_is_lo0(nid))
268                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
269         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
270
271         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
272
273         return lp;
274 }
275
276 void
277 lnet_destroy_peer_locked(struct lnet_peer *lp)
278 {
279         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
280
281         LASSERT(atomic_read(&lp->lp_refcount) == 0);
282         LASSERT(lp->lp_rtr_refcount == 0);
283         LASSERT(list_empty(&lp->lp_peer_nets));
284         LASSERT(list_empty(&lp->lp_peer_list));
285         LASSERT(list_empty(&lp->lp_dc_list));
286
287         if (lp->lp_data)
288                 kref_put(&lp->lp_data->pb_refcnt, lnet_ping_buffer_free);
289
290         /*
291          * if there are messages still on the pending queue, then make
292          * sure to queue them on the ln_msg_resend list so they can be
293          * resent at a later point if the discovery thread is still
294          * running.
295          * If the discovery thread has stopped, then the wakeup will be a
296          * no-op, and it is expected the lnet_shutdown_lndnets() will
297          * eventually be called, which will traverse this list and
298          * finalize the messages on the list.
299          * We can not resend them now because we're holding the cpt lock.
300          * Releasing the lock can cause an inconsistent state
301          */
302         spin_lock(&the_lnet.ln_msg_resend_lock);
303         spin_lock(&lp->lp_lock);
304         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
305         spin_unlock(&lp->lp_lock);
306         spin_unlock(&the_lnet.ln_msg_resend_lock);
307         wake_up(&the_lnet.ln_dc_waitq);
308
309         LIBCFS_FREE(lp, sizeof(*lp));
310 }
311
312 /*
313  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
314  * that peer_net, detach the peer_net from the peer.
315  *
316  * Call with lnet_net_lock/EX held
317  */
318 static void
319 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
320 {
321         struct lnet_peer_table *ptable;
322         struct lnet_peer_net *lpn;
323         struct lnet_peer *lp;
324
325         /*
326          * Belts and suspenders: gracefully handle teardown of a
327          * partially connected peer_ni.
328          */
329         lpn = lpni->lpni_peer_net;
330
331         list_del_init(&lpni->lpni_peer_nis);
332         /*
333          * If there are no lpni's left, we detach lpn from
334          * lp_peer_nets, so it cannot be found anymore.
335          */
336         if (list_empty(&lpn->lpn_peer_nis))
337                 list_del_init(&lpn->lpn_peer_nets);
338
339         /* Update peer NID count. */
340         lp = lpn->lpn_peer;
341         lp->lp_nnis--;
342
343         /*
344          * If there are no more peer nets, make the peer unfindable
345          * via the peer_tables.
346          *
347          * Otherwise, if the peer is DISCOVERED, tell discovery to
348          * take another look at it. This is a no-op if discovery for
349          * this peer did the detaching.
350          */
351         if (list_empty(&lp->lp_peer_nets)) {
352                 list_del_init(&lp->lp_peer_list);
353                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
354                 ptable->pt_peers--;
355         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
356                 /* Discovery isn't running, nothing to do here. */
357         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
358                 lnet_peer_queue_for_discovery(lp);
359                 wake_up(&the_lnet.ln_dc_waitq);
360         }
361         CDEBUG(D_NET, "peer %s NID %s\n",
362                 libcfs_nidstr(&lp->lp_primary_nid),
363                 libcfs_nidstr(&lpni->lpni_nid));
364 }
365
366 /* called with lnet_net_lock LNET_LOCK_EX held */
367 static int
368 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
369 {
370         struct lnet_peer_table *ptable = NULL;
371
372         /* don't remove a peer_ni if it's also a gateway */
373         if (lnet_isrouter(lpni) && !force) {
374                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
375                        libcfs_nidstr(&lpni->lpni_nid));
376                 return -EBUSY;
377         }
378
379         lnet_peer_remove_from_remote_list(lpni);
380
381         /* remove peer ni from the hash list. */
382         list_del_init(&lpni->lpni_hashlist);
383
384         /*
385          * indicate the peer is being deleted so the monitor thread can
386          * remove it from the recovery queue.
387          */
388         spin_lock(&lpni->lpni_lock);
389         lpni->lpni_state |= LNET_PEER_NI_DELETING;
390         spin_unlock(&lpni->lpni_lock);
391
392         /* decrement the ref count on the peer table */
393         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
394
395         /*
396          * The peer_ni can no longer be found with a lookup. But there
397          * can be current users, so keep track of it on the zombie
398          * list until the reference count has gone to zero.
399          *
400          * The last reference may be lost in a place where the
401          * lnet_net_lock locks only a single cpt, and that cpt may not
402          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
403          * has its own lock.
404          */
405         spin_lock(&ptable->pt_zombie_lock);
406         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
407         ptable->pt_zombies++;
408         spin_unlock(&ptable->pt_zombie_lock);
409
410         /* no need to keep this peer_ni on the hierarchy anymore */
411         lnet_peer_detach_peer_ni_locked(lpni);
412
413         /* remove hashlist reference on peer_ni */
414         lnet_peer_ni_decref_locked(lpni);
415
416         return 0;
417 }
418
419 void lnet_peer_uninit(void)
420 {
421         struct lnet_peer_ni *lpni, *tmp;
422
423         lnet_net_lock(LNET_LOCK_EX);
424
425         /* remove all peer_nis from the remote peer and the hash list */
426         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
427                                  lpni_on_remote_peer_ni_list)
428                 lnet_peer_ni_del_locked(lpni, false);
429
430         lnet_peer_tables_destroy();
431
432         lnet_net_unlock(LNET_LOCK_EX);
433 }
434
435 static int
436 lnet_peer_del_locked(struct lnet_peer *peer)
437 {
438         struct lnet_peer_ni *lpni = NULL, *lpni2;
439         int rc = 0, rc2 = 0;
440
441         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
442
443         spin_lock(&peer->lp_lock);
444         peer->lp_state |= LNET_PEER_MARK_DELETED;
445         spin_unlock(&peer->lp_lock);
446
447         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
448         while (lpni != NULL) {
449                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
450                 rc = lnet_peer_ni_del_locked(lpni, false);
451                 if (rc != 0)
452                         rc2 = rc;
453                 lpni = lpni2;
454         }
455
456         return rc2;
457 }
458
459 /*
460  * Discovering this peer is taking too long. Cancel any Ping or Push
461  * that discovery is waiting on by unlinking the relevant MDs. The
462  * lnet_discovery_event_handler() will proceed from here and complete
463  * the cleanup.
464  */
465 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
466 {
467         struct lnet_handle_md ping_mdh;
468         struct lnet_handle_md push_mdh;
469
470         LNetInvalidateMDHandle(&ping_mdh);
471         LNetInvalidateMDHandle(&push_mdh);
472
473         spin_lock(&lp->lp_lock);
474         if (lp->lp_state & LNET_PEER_PING_SENT) {
475                 ping_mdh = lp->lp_ping_mdh;
476                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
477         }
478         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
479                 push_mdh = lp->lp_push_mdh;
480                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
481         }
482         spin_unlock(&lp->lp_lock);
483
484         if (!LNetMDHandleIsInvalid(ping_mdh))
485                 LNetMDUnlink(ping_mdh);
486         if (!LNetMDHandleIsInvalid(push_mdh))
487                 LNetMDUnlink(push_mdh);
488 }
489
490 static int
491 lnet_peer_del(struct lnet_peer *peer)
492 {
493         int rc;
494
495         lnet_peer_cancel_discovery(peer);
496         lnet_net_lock(LNET_LOCK_EX);
497         rc = lnet_peer_del_locked(peer);
498         lnet_net_unlock(LNET_LOCK_EX);
499
500         return rc;
501 }
502
503 /*
504  * Delete a NID from a peer. Call with ln_api_mutex held.
505  *
506  * Error codes:
507  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
508  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
509  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
510  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
511  */
512 static int
513 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
514                   unsigned int flags)
515 {
516         struct lnet_peer_ni *lpni;
517         struct lnet_nid primary_nid = lp->lp_primary_nid;
518         int rc = 0;
519         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
520
521         if (!(flags & LNET_PEER_CONFIGURED)) {
522                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
523                         rc = -EPERM;
524                         goto out;
525                 }
526         }
527
528         /* If we're asked to lock down the primary NID we shouldn't be
529          * deleting it
530          */
531         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
532             nid_same(&primary_nid, nid)) {
533                 rc = -EPERM;
534                 goto out;
535         }
536
537         lpni = lnet_peer_ni_find_locked(nid);
538         if (!lpni) {
539                 rc = -ENOENT;
540                 goto out;
541         }
542         if (lp != lpni->lpni_peer_net->lpn_peer) {
543                 rc = -ECHILD;
544                 lnet_peer_ni_decref_locked(lpni);
545                 goto out;
546         }
547
548         /*
549          * This function only allows deletion of the primary NID if it
550          * is the only NID.
551          */
552         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
553                 rc = -EBUSY;
554                 lnet_peer_ni_decref_locked(lpni);
555                 goto out;
556         }
557
558         lnet_net_lock(LNET_LOCK_EX);
559
560         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
561                 struct lnet_peer_ni *lpni2;
562                 /* assign the next peer_ni to be the primary */
563                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
564                 LASSERT(lpni2);
565                 lp->lp_primary_nid = lpni2->lpni_nid;
566         }
567         rc = lnet_peer_ni_del_locked(lpni, force);
568         lnet_peer_ni_decref_locked(lpni);
569
570         lnet_net_unlock(LNET_LOCK_EX);
571
572 out:
573         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
574                libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
575                flags, rc);
576
577         return rc;
578 }
579
580 static void
581 lnet_peer_table_cleanup_locked(struct lnet_net *net,
582                                struct lnet_peer_table *ptable)
583 {
584         int                      i;
585         struct lnet_peer_ni     *next;
586         struct lnet_peer_ni     *lpni;
587         struct lnet_peer        *peer;
588
589         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
590                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
591                                          lpni_hashlist) {
592                         if (net != NULL && net != lpni->lpni_net)
593                                 continue;
594
595                         peer = lpni->lpni_peer_net->lpn_peer;
596                         if (!nid_same(&peer->lp_primary_nid,
597                                        &lpni->lpni_nid)) {
598                                 lnet_peer_ni_del_locked(lpni, false);
599                                 continue;
600                         }
601                         /*
602                          * Removing the primary NID implies removing
603                          * the entire peer. Advance next beyond any
604                          * peer_ni that belongs to the same peer.
605                          */
606                         list_for_each_entry_from(next, &ptable->pt_hash[i],
607                                                  lpni_hashlist) {
608                                 if (next->lpni_peer_net->lpn_peer != peer)
609                                         break;
610                         }
611                         lnet_peer_del_locked(peer);
612                 }
613         }
614 }
615
616 static void
617 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
618 {
619         wait_var_event_warning(&ptable->pt_zombies,
620                                ptable->pt_zombies == 0,
621                                "Waiting for %d zombies on peer table\n",
622                                ptable->pt_zombies);
623 }
624
625 static void
626 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
627                                 struct lnet_peer_table *ptable)
628 {
629         struct lnet_peer_ni     *lp;
630         struct lnet_peer_ni     *tmp;
631         struct lnet_nid         gw_nid;
632         int                     i;
633
634         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
635                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
636                                          lpni_hashlist) {
637                         if (net != lp->lpni_net)
638                                 continue;
639
640                         if (!lnet_isrouter(lp))
641                                 continue;
642
643                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
644
645                         lnet_net_unlock(LNET_LOCK_EX);
646                         lnet_del_route(LNET_NET_ANY, &gw_nid);
647                         lnet_net_lock(LNET_LOCK_EX);
648                 }
649         }
650 }
651
652 void
653 lnet_peer_tables_cleanup(struct lnet_net *net)
654 {
655         int i;
656         struct lnet_peer_table *ptable;
657
658         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
659         /* If just deleting the peers for a NI, get rid of any routes these
660          * peers are gateways for. */
661         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
662                 lnet_net_lock(LNET_LOCK_EX);
663                 lnet_peer_table_del_rtrs_locked(net, ptable);
664                 lnet_net_unlock(LNET_LOCK_EX);
665         }
666
667         /* Start the cleanup process */
668         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
669                 lnet_net_lock(LNET_LOCK_EX);
670                 lnet_peer_table_cleanup_locked(net, ptable);
671                 lnet_net_unlock(LNET_LOCK_EX);
672         }
673
674         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
675                 lnet_peer_ni_finalize_wait(ptable);
676 }
677
678 static struct lnet_peer_ni *
679 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
680 {
681         struct list_head        *peers;
682         struct lnet_peer_ni     *lp;
683
684         if (the_lnet.ln_state != LNET_STATE_RUNNING)
685                 return NULL;
686
687         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
688         list_for_each_entry(lp, peers, lpni_hashlist) {
689                 if (nid_same(&lp->lpni_nid, nid)) {
690                         kref_get(&lp->lpni_kref);
691                         return lp;
692                 }
693         }
694
695         return NULL;
696 }
697
698 struct lnet_peer_ni *
699 lnet_peer_ni_find_locked(struct lnet_nid *nid)
700 {
701         struct lnet_peer_ni *lpni;
702         struct lnet_peer_table *ptable;
703         int cpt;
704
705         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
706
707         ptable = the_lnet.ln_peer_tables[cpt];
708         lpni = lnet_get_peer_ni_locked(ptable, nid);
709
710         return lpni;
711 }
712
713 struct lnet_peer_ni *
714 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
715 {
716         struct lnet_peer_net *lpn;
717         struct lnet_peer_ni *lpni;
718
719         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
720         if (!lpn)
721                 return NULL;
722
723         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
724                 if (nid_same(&lpni->lpni_nid, nid))
725                         return lpni;
726         }
727
728         return NULL;
729 }
730
731 struct lnet_peer *
732 lnet_find_peer(struct lnet_nid *nid)
733 {
734         struct lnet_peer_ni *lpni;
735         struct lnet_peer *lp = NULL;
736         int cpt;
737
738         cpt = lnet_net_lock_current();
739         lpni = lnet_peer_ni_find_locked(nid);
740         if (lpni) {
741                 lp = lpni->lpni_peer_net->lpn_peer;
742                 lnet_peer_addref_locked(lp);
743                 lnet_peer_ni_decref_locked(lpni);
744         }
745         lnet_net_unlock(cpt);
746
747         return lp;
748 }
749
750 struct lnet_peer_net *
751 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
752 {
753         struct lnet_peer_net *net;
754
755         if (!prev_lpn_id) {
756                 /* no net id provided return the first net */
757                 net = list_first_entry_or_null(&lp->lp_peer_nets,
758                                                struct lnet_peer_net,
759                                                lpn_peer_nets);
760
761                 return net;
762         }
763
764         /* find the net after the one provided */
765         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
766                 if (net->lpn_net_id == prev_lpn_id) {
767                         /*
768                          * if we reached the end of the list loop to the
769                          * beginning.
770                          */
771                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
772                                 return list_first_entry_or_null(&lp->lp_peer_nets,
773                                                                 struct lnet_peer_net,
774                                                                 lpn_peer_nets);
775                         else
776                                 return list_next_entry(net, lpn_peer_nets);
777                 }
778         }
779
780         return NULL;
781 }
782
783 struct lnet_peer_ni *
784 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
785                              struct lnet_peer_net *peer_net,
786                              struct lnet_peer_ni *prev)
787 {
788         struct lnet_peer_ni *lpni;
789         struct lnet_peer_net *net = peer_net;
790
791         if (!prev) {
792                 if (!net) {
793                         if (list_empty(&peer->lp_peer_nets))
794                                 return NULL;
795
796                         net = list_first_entry(&peer->lp_peer_nets,
797                                                struct lnet_peer_net,
798                                                lpn_peer_nets);
799                 }
800                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
801                                         lpni_peer_nis);
802
803                 return lpni;
804         }
805
806         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
807                 /*
808                  * if you reached the end of the peer ni list and the peer
809                  * net is specified then there are no more peer nis in that
810                  * net.
811                  */
812                 if (net)
813                         return NULL;
814
815                 /*
816                  * we reached the end of this net ni list. move to the
817                  * next net
818                  */
819                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
820                     &peer->lp_peer_nets)
821                         /* no more nets and no more NIs. */
822                         return NULL;
823
824                 /* get the next net */
825                 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
826                                        struct lnet_peer_net,
827                                        lpn_peer_nets);
828                 /* get the ni on it */
829                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
830                                         lpni_peer_nis);
831
832                 return lpni;
833         }
834
835         /* there are more nis left */
836         lpni = list_first_entry(&prev->lpni_peer_nis,
837                                 struct lnet_peer_ni, lpni_peer_nis);
838
839         return lpni;
840 }
841
842 /* Call with the ln_api_mutex held */
843 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
844 {
845         struct lnet_process_id id;
846         struct lnet_peer_table *ptable;
847         struct lnet_peer *lp;
848         __u32 count = 0;
849         __u32 size = 0;
850         int lncpt;
851         int cpt;
852         __u32 i;
853         int rc;
854
855         rc = -ESHUTDOWN;
856         if (the_lnet.ln_state != LNET_STATE_RUNNING)
857                 goto done;
858
859         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
860
861         /*
862          * Count the number of peers, and return E2BIG if the buffer
863          * is too small. We'll also return the desired size.
864          */
865         rc = -E2BIG;
866         for (cpt = 0; cpt < lncpt; cpt++) {
867                 ptable = the_lnet.ln_peer_tables[cpt];
868                 count += ptable->pt_peers;
869         }
870         size = count * sizeof(*ids);
871         if (size > *sizep)
872                 goto done;
873
874         /*
875          * Walk the peer lists and copy out the primary nids.
876          * This is safe because the peer lists are only modified
877          * while the ln_api_mutex is held. So we don't need to
878          * hold the lnet_net_lock as well, and can therefore
879          * directly call copy_to_user().
880          */
881         rc = -EFAULT;
882         memset(&id, 0, sizeof(id));
883         id.pid = LNET_PID_LUSTRE;
884         i = 0;
885         for (cpt = 0; cpt < lncpt; cpt++) {
886                 ptable = the_lnet.ln_peer_tables[cpt];
887                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
888                         if (!nid_is_nid4(&lp->lp_primary_nid))
889                                 continue;
890                         if (i >= count)
891                                 goto done;
892                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
893                         if (copy_to_user(&ids[i], &id, sizeof(id)))
894                                 goto done;
895                         i++;
896                 }
897         }
898         rc = 0;
899 done:
900         *countp = count;
901         *sizep = size;
902         return rc;
903 }
904
905 /*
906  * Start pushes to peers that need to be updated for a configuration
907  * change on this node.
908  */
909 void
910 lnet_push_update_to_peers(int force)
911 {
912         struct lnet_peer_table *ptable;
913         struct lnet_peer *lp;
914         int lncpt;
915         int cpt;
916
917         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
918                 return;
919
920         lnet_net_lock(LNET_LOCK_EX);
921         if (lnet_peer_discovery_disabled)
922                 force = 0;
923         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
924         for (cpt = 0; cpt < lncpt; cpt++) {
925                 ptable = the_lnet.ln_peer_tables[cpt];
926                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
927                         if (force) {
928                                 spin_lock(&lp->lp_lock);
929                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
930                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
931                                 spin_unlock(&lp->lp_lock);
932                         }
933                         if (lnet_peer_needs_push(lp))
934                                 lnet_peer_queue_for_discovery(lp);
935                 }
936         }
937         lnet_net_unlock(LNET_LOCK_EX);
938         wake_up(&the_lnet.ln_dc_waitq);
939 }
940
941 /* find the NID in the preferred gateways for the remote peer
942  * return:
943  *      false: list is not empty and NID is not preferred
944  *      false: list is empty
945  *      true: nid is found in the list
946  */
947 bool
948 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
949                              struct lnet_nid *gw_nid)
950 {
951         struct lnet_nid_list *ne;
952
953         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
954                libcfs_nidstr(&lpni->lpni_nid),
955                list_empty(&lpni->lpni_rtr_pref_nids));
956
957         if (list_empty(&lpni->lpni_rtr_pref_nids))
958                 return false;
959
960         /* iterate through all the preferred NIDs and see if any of them
961          * matches the provided gw_nid
962          */
963         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
964                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
965                        libcfs_nidstr(&ne->nl_nid),
966                        libcfs_nidstr(gw_nid));
967                 if (nid_same(&ne->nl_nid, gw_nid))
968                         return true;
969         }
970
971         return false;
972 }
973
974 void
975 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
976 {
977         struct list_head zombies;
978         struct lnet_nid_list *ne;
979         struct lnet_nid_list *tmp;
980         int cpt = lpni->lpni_cpt;
981
982         INIT_LIST_HEAD(&zombies);
983
984         lnet_net_lock(cpt);
985         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
986         lnet_net_unlock(cpt);
987
988         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
989                 list_del(&ne->nl_list);
990                 LIBCFS_FREE(ne, sizeof(*ne));
991         }
992 }
993
994 int
995 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
996                        struct lnet_nid *gw_nid)
997 {
998         int cpt = lpni->lpni_cpt;
999         struct lnet_nid_list *ne = NULL;
1000
1001         /* This function is called with api_mutex held. When the api_mutex
1002          * is held the list can not be modified, as it is only modified as
1003          * a result of applying a UDSP and that happens under api_mutex
1004          * lock.
1005          */
1006         __must_hold(&the_lnet.ln_api_mutex);
1007
1008         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1009                 if (nid_same(&ne->nl_nid, gw_nid))
1010                         return -EEXIST;
1011         }
1012
1013         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1014         if (!ne)
1015                 return -ENOMEM;
1016
1017         ne->nl_nid = *gw_nid;
1018
1019         /* Lock the cpt to protect against addition and checks in the
1020          * selection algorithm
1021          */
1022         lnet_net_lock(cpt);
1023         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1024         lnet_net_unlock(cpt);
1025
1026         return 0;
1027 }
1028
1029 /*
1030  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1031  * this is a preferred point-to-point path. Call with lnet_net_lock in
1032  * shared mmode.
1033  */
1034 bool
1035 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1036 {
1037         struct lnet_nid_list *ne;
1038
1039         if (lpni->lpni_pref_nnids == 0)
1040                 return false;
1041         if (lpni->lpni_pref_nnids == 1)
1042                 return nid_same(&lpni->lpni_pref.nid, nid);
1043         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1044                 if (nid_same(&ne->nl_nid, nid))
1045                         return true;
1046         }
1047         return false;
1048 }
1049
1050 /*
1051  * Set a single ni as preferred, provided no preferred ni is already
1052  * defined. Only to be used for non-multi-rail peer_ni.
1053  */
1054 int
1055 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1056                                   struct lnet_nid *nid)
1057 {
1058         int rc = 0;
1059
1060         if (!nid)
1061                 return -EINVAL;
1062         spin_lock(&lpni->lpni_lock);
1063         if (LNET_NID_IS_ANY(nid)) {
1064                 rc = -EINVAL;
1065         } else if (lpni->lpni_pref_nnids > 0) {
1066                 rc = -EPERM;
1067         } else if (lpni->lpni_pref_nnids == 0) {
1068                 lpni->lpni_pref.nid = *nid;
1069                 lpni->lpni_pref_nnids = 1;
1070                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1071         }
1072         spin_unlock(&lpni->lpni_lock);
1073
1074         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1075                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1076         return rc;
1077 }
1078
1079 /*
1080  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1081  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1082  */
1083 static int
1084 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1085 {
1086         int rc = 0;
1087
1088         spin_lock(&lpni->lpni_lock);
1089         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1090                 lpni->lpni_pref_nnids = 0;
1091                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1092         } else if (lpni->lpni_pref_nnids == 0) {
1093                 rc = -ENOENT;
1094         } else {
1095                 rc = -EPERM;
1096         }
1097         spin_unlock(&lpni->lpni_lock);
1098
1099         CDEBUG(D_NET, "peer %s: %d\n",
1100                libcfs_nidstr(&lpni->lpni_nid), rc);
1101         return rc;
1102 }
1103
1104 void
1105 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1106 {
1107         lpni->lpni_sel_priority = priority;
1108 }
1109
1110 /*
1111  * Clear the preferred NIDs from a non-multi-rail peer.
1112  */
1113 static void
1114 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1115 {
1116         struct lnet_peer_ni *lpni = NULL;
1117
1118         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1119                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1120 }
1121
1122 int
1123 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1124 {
1125         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1126         struct lnet_nid_list *ne1 = NULL;
1127         struct lnet_nid_list *ne2 = NULL;
1128         struct lnet_nid *tmp_nid = NULL;
1129         int rc = 0;
1130
1131         if (LNET_NID_IS_ANY(nid)) {
1132                 rc = -EINVAL;
1133                 goto out;
1134         }
1135
1136         if (lpni->lpni_pref_nnids == 1 &&
1137             nid_same(&lpni->lpni_pref.nid, nid)) {
1138                 rc = -EEXIST;
1139                 goto out;
1140         }
1141
1142         /* A non-MR node may have only one preferred NI per peer_ni */
1143         if (lpni->lpni_pref_nnids > 0 &&
1144             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1145                 rc = -EPERM;
1146                 goto out;
1147         }
1148
1149         /* add the new preferred nid to the list of preferred nids */
1150         if (lpni->lpni_pref_nnids != 0) {
1151                 size_t alloc_size = sizeof(*ne1);
1152
1153                 if (lpni->lpni_pref_nnids == 1) {
1154                         tmp_nid = &lpni->lpni_pref.nid;
1155                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1156                 }
1157
1158                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1159                         if (nid_same(&ne1->nl_nid, nid)) {
1160                                 rc = -EEXIST;
1161                                 goto out;
1162                         }
1163                 }
1164
1165                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1166                                  alloc_size);
1167                 if (!ne1) {
1168                         rc = -ENOMEM;
1169                         goto out;
1170                 }
1171
1172                 /* move the originally stored nid to the list */
1173                 if (lpni->lpni_pref_nnids == 1) {
1174                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1175                                 lpni->lpni_cpt, alloc_size);
1176                         if (!ne2) {
1177                                 rc = -ENOMEM;
1178                                 goto out;
1179                         }
1180                         INIT_LIST_HEAD(&ne2->nl_list);
1181                         ne2->nl_nid = *tmp_nid;
1182                 }
1183                 ne1->nl_nid = *nid;
1184         }
1185
1186         lnet_net_lock(LNET_LOCK_EX);
1187         spin_lock(&lpni->lpni_lock);
1188         if (lpni->lpni_pref_nnids == 0) {
1189                 lpni->lpni_pref.nid = *nid;
1190         } else {
1191                 if (ne2)
1192                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1193                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1194         }
1195         lpni->lpni_pref_nnids++;
1196         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1197         spin_unlock(&lpni->lpni_lock);
1198         lnet_net_unlock(LNET_LOCK_EX);
1199
1200 out:
1201         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1202                 spin_lock(&lpni->lpni_lock);
1203                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1204                 spin_unlock(&lpni->lpni_lock);
1205         }
1206         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1207                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1208         return rc;
1209 }
1210
1211 int
1212 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1213 {
1214         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1215         struct lnet_nid_list *ne = NULL;
1216         int rc = 0;
1217
1218         if (lpni->lpni_pref_nnids == 0) {
1219                 rc = -ENOENT;
1220                 goto out;
1221         }
1222
1223         if (lpni->lpni_pref_nnids == 1) {
1224                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1225                         rc = -ENOENT;
1226                         goto out;
1227                 }
1228         } else {
1229                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1230                         if (nid_same(&ne->nl_nid, nid))
1231                                 goto remove_nid_entry;
1232                 }
1233                 rc = -ENOENT;
1234                 ne = NULL;
1235                 goto out;
1236         }
1237
1238 remove_nid_entry:
1239         lnet_net_lock(LNET_LOCK_EX);
1240         spin_lock(&lpni->lpni_lock);
1241         if (lpni->lpni_pref_nnids == 1)
1242                 lpni->lpni_pref.nid = LNET_ANY_NID;
1243         else {
1244                 list_del_init(&ne->nl_list);
1245                 if (lpni->lpni_pref_nnids == 2) {
1246                         struct lnet_nid_list *ne, *tmp;
1247
1248                         list_for_each_entry_safe(ne, tmp,
1249                                                  &lpni->lpni_pref.nids,
1250                                                  nl_list) {
1251                                 lpni->lpni_pref.nid = ne->nl_nid;
1252                                 list_del_init(&ne->nl_list);
1253                                 LIBCFS_FREE(ne, sizeof(*ne));
1254                         }
1255                 }
1256         }
1257         lpni->lpni_pref_nnids--;
1258         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1259         spin_unlock(&lpni->lpni_lock);
1260         lnet_net_unlock(LNET_LOCK_EX);
1261
1262         LIBCFS_FREE(ne, sizeof(*ne));
1263 out:
1264         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1265                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1266         return rc;
1267 }
1268
1269 void
1270 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1271 {
1272         struct list_head zombies;
1273         struct lnet_nid_list *ne;
1274         struct lnet_nid_list *tmp;
1275
1276         INIT_LIST_HEAD(&zombies);
1277
1278         lnet_net_lock(LNET_LOCK_EX);
1279         if (lpni->lpni_pref_nnids == 1)
1280                 lpni->lpni_pref.nid = LNET_ANY_NID;
1281         else if (lpni->lpni_pref_nnids > 1)
1282                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1283         lpni->lpni_pref_nnids = 0;
1284         lnet_net_unlock(LNET_LOCK_EX);
1285
1286         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1287                 list_del_init(&ne->nl_list);
1288                 LIBCFS_FREE(ne, sizeof(*ne));
1289         }
1290 }
1291
1292 void
1293 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1294 {
1295         struct lnet_peer_ni *lpni;
1296
1297         *result = *nid;
1298         lpni = lnet_peer_ni_find_locked(nid);
1299         if (lpni) {
1300                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1301                 lnet_peer_ni_decref_locked(lpni);
1302         }
1303 }
1304
1305 bool
1306 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1307 __must_hold(&lp->lp_lock)
1308 {
1309         if (lnet_peer_discovery_disabled)
1310                 return true;
1311
1312         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1313             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1314                 return true;
1315         }
1316
1317         return false;
1318 }
1319
1320 /*
1321  * Peer Discovery
1322  */
1323 bool
1324 lnet_is_discovery_disabled(struct lnet_peer *lp)
1325 {
1326         bool rc = false;
1327
1328         spin_lock(&lp->lp_lock);
1329         rc = lnet_is_discovery_disabled_locked(lp);
1330         spin_unlock(&lp->lp_lock);
1331
1332         return rc;
1333 }
1334
1335 static void
1336 lnet_discover_peer_nid(struct lnet_nid *nid)
1337 {
1338         int cpt = lnet_net_lock_current();
1339         struct lnet_peer_ni *lpni = lnet_peer_ni_find_locked(nid);
1340
1341         if (lpni) {
1342                 lnet_discover_peer_locked(lpni, cpt, false);
1343                 lnet_peer_ni_decref_locked(lpni);
1344         }
1345         lnet_net_unlock(cpt);
1346 }
1347
1348 int
1349 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1350 {
1351         struct lnet_nid pnid = LNET_ANY_NID;
1352         bool mr;
1353         int i, rc;
1354         int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1355
1356         if (!nids || num_nids < 1)
1357                 return -EINVAL;
1358
1359         rc = LNetNIInit(LNET_PID_ANY);
1360         if (rc < 0)
1361                 return rc;
1362
1363         mutex_lock(&the_lnet.ln_api_mutex);
1364
1365         mr = lnet_peer_discovery_disabled == 0;
1366
1367         rc = 0;
1368
1369         for (i = 0; i < num_nids; i++) {
1370                 if (nid_is_lo0(&nids[i]))
1371                         continue;
1372
1373                 if (LNET_NID_IS_ANY(&pnid)) {
1374                         pnid = nids[i];
1375                         rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1376                         if (rc == -EALREADY) {
1377                                 struct lnet_peer *lp;
1378
1379                                 CDEBUG(D_NET, "A peer exists for NID %s\n",
1380                                        libcfs_nidstr(&pnid));
1381                                 rc = 0;
1382                                 /* Adds a refcount */
1383                                 lp = lnet_find_peer(&pnid);
1384                                 LASSERT(lp);
1385                                 pnid = lp->lp_primary_nid;
1386                                 /* Drop refcount from lookup */
1387                                 lnet_peer_decref_locked(lp);
1388                         } else if (mr && !rc) {
1389                                 lnet_discover_peer_nid(&pnid);
1390                         }
1391                 } else if (lnet_peer_discovery_disabled) {
1392                         rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1393                                               flags);
1394                 } else if (!nid_same(&pnid, &nids[i])) {
1395                         rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID,
1396                                               mr, 0);
1397                         if (!rc) {
1398                                 if (lock_prim_nid) {
1399                                         struct lnet_peer *lp;
1400                                         lp = lnet_find_peer(&nids[i]);
1401                                         if (lp) {
1402                                                 lp->lp_merge_primary_nid = pnid;
1403                                                 lnet_peer_decref_locked(lp);
1404                                         }
1405                                 }
1406                                 lnet_discover_peer_nid(&nids[i]);
1407                         }
1408                 }
1409
1410                 if (rc && rc != -EEXIST)
1411                         goto unlock;
1412         }
1413
1414 unlock:
1415         mutex_unlock(&the_lnet.ln_api_mutex);
1416
1417         LNetNIFini();
1418
1419         return rc == -EEXIST ? 0 : rc;
1420 }
1421 EXPORT_SYMBOL(LNetAddPeer);
1422
1423 void LNetPrimaryNID(struct lnet_nid *nid)
1424 {
1425         struct lnet_peer *lp;
1426         struct lnet_peer_ni *lpni;
1427         struct lnet_nid orig;
1428         int rc = 0;
1429         int cpt;
1430
1431         if (!nid || nid_is_lo0(nid))
1432                 return;
1433         orig = *nid;
1434
1435         cpt = lnet_net_lock_current();
1436         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1437         if (IS_ERR(lpni)) {
1438                 rc = PTR_ERR(lpni);
1439                 goto out_unlock;
1440         }
1441         lp = lpni->lpni_peer_net->lpn_peer;
1442
1443         /* If discovery is disabled locally then we needn't bother running
1444          * discovery here because discovery will not modify whatever
1445          * primary NID is currently set for this peer. If the specified peer is
1446          * down then this discovery can introduce long delays into the mount
1447          * process, so skip it if it isn't necessary.
1448          */
1449 again:
1450         spin_lock(&lp->lp_lock);
1451         /* DD disabled, nothing to do */
1452         if (lnet_peer_discovery_disabled) {
1453                 *nid = lp->lp_primary_nid;
1454                 spin_unlock(&lp->lp_lock);
1455                 goto out_decref;
1456         }
1457
1458         /* Peer already up to date, nothing to do */
1459         if (lnet_peer_is_uptodate_locked(lp)) {
1460                 *nid = lp->lp_primary_nid;
1461                 spin_unlock(&lp->lp_lock);
1462                 goto out_decref;
1463         }
1464         spin_unlock(&lp->lp_lock);
1465
1466         /* If primary nid locking is enabled, discovery is performed
1467          * in the background.
1468          * If primary nid locking is disabled, discovery blocks here.
1469          * Messages to the peer will not go through until the discovery is
1470          * complete.
1471          */
1472         if (lock_prim_nid && lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1473                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1474         else
1475                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1476         if (rc)
1477                 goto out_decref;
1478
1479         /* The lpni (or lp) for this NID may have changed and our ref is
1480          * the only thing keeping the old one around. Release the ref
1481          * and lookup the lpni again
1482          */
1483         lnet_peer_ni_decref_locked(lpni);
1484         lpni = lnet_peer_ni_find_locked(nid);
1485         if (!lpni) {
1486                 rc = -ENOENT;
1487                 goto out_unlock;
1488         }
1489         lp = lpni->lpni_peer_net->lpn_peer;
1490
1491         if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1492                 goto again;
1493         *nid = lp->lp_primary_nid;
1494 out_decref:
1495         lnet_peer_ni_decref_locked(lpni);
1496 out_unlock:
1497         lnet_net_unlock(cpt);
1498
1499         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1500                libcfs_nidstr(nid), rc);
1501 }
1502 EXPORT_SYMBOL(LNetPrimaryNID);
1503
1504 int LNetPeerDiscovered(struct lnet_nid *nid)
1505 {
1506         int rc;
1507         struct lnet_peer *lp;
1508
1509         if (nid_is_lo0(nid))
1510                 return 1;
1511
1512         lp = lnet_find_peer(nid);
1513         if (!lp) {
1514                 CDEBUG(D_NET, "No peer for NID %s, can't discover\n",
1515                        libcfs_nidstr(nid));
1516                 return -EHOSTUNREACH;
1517         }
1518
1519         lnet_net_lock(LNET_LOCK_EX);
1520         spin_lock(&lp->lp_lock);
1521         if (lp->lp_state & LNET_PEER_NO_DISCOVERY ||
1522             (lp->lp_state & LNET_PEER_DISCOVERED &&
1523              lp->lp_state & LNET_PEER_NIDS_UPTODATE))
1524                 rc = 1;
1525         else if (lp->lp_state & LNET_PEER_PING_FAILED)
1526                 rc = -EHOSTUNREACH;
1527         else if (lp->lp_state & LNET_PEER_DISCOVERING)
1528                 rc = -EALREADY;
1529         else
1530                 rc = -EAGAIN;
1531         spin_unlock(&lp->lp_lock);
1532
1533         if (rc == -EAGAIN)
1534                 lnet_peer_queue_for_discovery(lp);
1535
1536         /* Drop refcount from lookup */
1537         lnet_peer_decref_locked(lp);
1538         lnet_net_unlock(LNET_LOCK_EX);
1539
1540         CDEBUG(D_NET, "Peer NID %s is %sdiscovered: rc = %d\n",
1541                libcfs_nidstr(nid), rc > 0 ? "" : "not ", rc);
1542         return rc;
1543 }
1544 EXPORT_SYMBOL(LNetPeerDiscovered);
1545
1546 struct lnet_peer_net *
1547 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1548 {
1549         struct lnet_peer_net *peer_net;
1550         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1551                 if (peer_net->lpn_net_id == net_id)
1552                         return peer_net;
1553         }
1554         return NULL;
1555 }
1556
1557 /*
1558  * Attach a peer_ni to a peer_net and peer. This function assumes
1559  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1560  * may be attached to a different peer, in which case it will be
1561  * properly detached first. The whole operation is done atomically.
1562  *
1563  * This function consumes the reference on lpni and Always returns 0.
1564  * This is the last function called from functions that do return an
1565  * int, so returning 0 here allows the compiler to do a tail call.
1566  */
1567 static int
1568 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1569                          struct lnet_peer_net *lpn,
1570                          struct lnet_peer_ni *lpni,
1571                          unsigned flags)
1572 {
1573         struct lnet_peer_table *ptable;
1574         bool new_lpn = false;
1575         int rc;
1576
1577         /* Install the new peer_ni */
1578         lnet_net_lock(LNET_LOCK_EX);
1579         /* Add peer_ni to global peer table hash, if necessary. */
1580         if (list_empty(&lpni->lpni_hashlist)) {
1581                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1582
1583                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1584                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1585                 ptable->pt_version++;
1586                 kref_get(&lpni->lpni_kref);
1587         }
1588
1589         /* Detach the peer_ni from an existing peer, if necessary. */
1590         if (lpni->lpni_peer_net) {
1591                 LASSERT(lpni->lpni_peer_net != lpn);
1592                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1593                 lnet_peer_detach_peer_ni_locked(lpni);
1594                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1595                 lpni->lpni_peer_net = NULL;
1596         }
1597
1598         /* Add peer_ni to peer_net */
1599         lpni->lpni_peer_net = lpn;
1600         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1601                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1602         else
1603                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1604         lnet_update_peer_net_healthv(lpni);
1605         lnet_peer_net_addref_locked(lpn);
1606
1607         /* Add peer_net to peer */
1608         if (!lpn->lpn_peer) {
1609                 new_lpn = true;
1610                 lpn->lpn_peer = lp;
1611                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1612                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1613                 else
1614                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1615                 lnet_peer_addref_locked(lp);
1616         }
1617
1618         /* Add peer to global peer list, if necessary */
1619         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1620         if (list_empty(&lp->lp_peer_list)) {
1621                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1622                 ptable->pt_peers++;
1623         }
1624
1625         /* Update peer state */
1626         spin_lock(&lp->lp_lock);
1627         if (flags & LNET_PEER_CONFIGURED) {
1628                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1629                         lp->lp_state |= LNET_PEER_CONFIGURED;
1630         }
1631         if (flags & LNET_PEER_MULTI_RAIL) {
1632                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1633                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1634                         lnet_peer_clr_non_mr_pref_nids(lp);
1635                 }
1636         }
1637         if (flags & LNET_PEER_LOCK_PRIMARY) {
1638                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1639                 lp->lp_prim_lock_ts = ktime_get_ns();
1640         }
1641         spin_unlock(&lp->lp_lock);
1642
1643         lp->lp_nnis++;
1644
1645         /* apply UDSPs */
1646         if (new_lpn) {
1647                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1648                 if (rc)
1649                         CERROR("Failed to apply UDSPs on lpn %s\n",
1650                                libcfs_net2str(lpn->lpn_net_id));
1651         }
1652         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1653         if (rc)
1654                 CERROR("Failed to apply UDSPs on lpni %s\n",
1655                        libcfs_nidstr(&lpni->lpni_nid));
1656
1657         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1658                libcfs_nidstr(&lp->lp_primary_nid),
1659                libcfs_nidstr(&lpni->lpni_nid), flags);
1660         lnet_peer_ni_decref_locked(lpni);
1661         lnet_net_unlock(LNET_LOCK_EX);
1662
1663         return 0;
1664 }
1665
1666 /*
1667  * Create a new peer, with nid as its primary nid.
1668  *
1669  * Call with the lnet_api_mutex held.
1670  */
1671 static int
1672 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1673 {
1674         struct lnet_peer *lp;
1675         struct lnet_peer_net *lpn;
1676         struct lnet_peer_ni *lpni;
1677         int rc = 0;
1678
1679         LASSERT(nid);
1680
1681         /*
1682          * No need for the lnet_net_lock here, because the
1683          * lnet_api_mutex is held.
1684          */
1685         lpni = lnet_peer_ni_find_locked(nid);
1686         if (lpni) {
1687                 /* A peer with this NID already exists. */
1688                 lp = lpni->lpni_peer_net->lpn_peer;
1689                 lnet_peer_ni_decref_locked(lpni);
1690                 /*
1691                  * This is an error if the peer was configured and the
1692                  * primary NID differs or an attempt is made to change
1693                  * the Multi-Rail flag. Otherwise the assumption is
1694                  * that an existing peer is being modified.
1695                  */
1696                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1697                         if (!nid_same(&lp->lp_primary_nid, nid))
1698                                 rc = -EEXIST;
1699                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1700                                 rc = -EPERM;
1701                         goto out;
1702                 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1703                         if (nid_same(&lp->lp_primary_nid, nid))
1704                                 rc = -EEXIST;
1705                         /* we're trying to recreate an existing peer which
1706                          * has already been created and its primary
1707                          * locked. This is likely due to two servers
1708                          * existing on the same node. So we'll just refer
1709                          * to that node with the primary NID which was
1710                          * first added by Lustre
1711                          */
1712                         else
1713                                 rc = -EALREADY;
1714                         goto out;
1715                 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1716                         /* if not recreating peer as configured and
1717                          * not locking primary nid, no need to
1718                          * do anything if primary nid is not being changed
1719                          */
1720                         if (nid_same(&lp->lp_primary_nid, nid)) {
1721                                 rc = -EEXIST;
1722                                 goto out;
1723                         }
1724                 }
1725                 /* Delete and recreate the peer.
1726                  * We can get here:
1727                  * 1. If the peer is being recreated as a configured NID
1728                  * 2. if there already exists a peer which
1729                  *    was discovered manually, but is recreated via Lustre
1730                  *    with PRIMARY_lock
1731                  */
1732                 rc = lnet_peer_del(lp);
1733                 if (rc)
1734                         goto out;
1735         }
1736
1737         /* Create peer, peer_net, and peer_ni. */
1738         rc = -ENOMEM;
1739         lp = lnet_peer_alloc(nid);
1740         if (!lp)
1741                 goto out;
1742         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1743         if (!lpn)
1744                 goto out_free_lp;
1745         lpni = lnet_peer_ni_alloc(nid);
1746         if (!lpni)
1747                 goto out_free_lpn;
1748
1749         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1750
1751 out_free_lpn:
1752         LIBCFS_FREE(lpn, sizeof(*lpn));
1753 out_free_lp:
1754         LIBCFS_FREE(lp, sizeof(*lp));
1755 out:
1756         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1757                libcfs_nidstr(nid), flags, rc);
1758         return rc;
1759 }
1760
1761 /*
1762  * Add a NID to a peer. Call with ln_api_mutex held.
1763  *
1764  * Error codes:
1765  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1766  *  -EEXIST:   The NID was configured by DLC for a different peer.
1767  *  -ENOMEM:   Out of memory.
1768  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1769  *             non-multi-rail peer.
1770  */
1771 static int
1772 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1773                   unsigned int flags)
1774 {
1775         struct lnet_peer_net *lpn;
1776         struct lnet_peer_ni *lpni;
1777         int rc = 0;
1778
1779         LASSERT(lp);
1780         LASSERT(nid);
1781
1782         /* A configured peer can only be updated through configuration. */
1783         if (!(flags & LNET_PEER_CONFIGURED)) {
1784                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1785                         rc = -EPERM;
1786                         goto out;
1787                 }
1788         }
1789
1790         /*
1791          * The MULTI_RAIL flag can be set but not cleared, because
1792          * that would leave the peer struct in an invalid state.
1793          */
1794         if (flags & LNET_PEER_MULTI_RAIL) {
1795                 spin_lock(&lp->lp_lock);
1796                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1797                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1798                         lnet_peer_clr_non_mr_pref_nids(lp);
1799                 }
1800                 spin_unlock(&lp->lp_lock);
1801         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1802                 rc = -EPERM;
1803                 goto out;
1804         }
1805
1806         lpni = lnet_peer_ni_find_locked(nid);
1807         if (lpni) {
1808                 /*
1809                  * A peer_ni already exists. This is only a problem if
1810                  * it is not connected to this peer and was configured
1811                  * by DLC.
1812                  */
1813                 if (lpni->lpni_peer_net->lpn_peer == lp)
1814                         goto out_free_lpni;
1815                 if (lnet_peer_ni_is_configured(lpni)) {
1816                         rc = -EEXIST;
1817                         goto out_free_lpni;
1818                 }
1819                 /* If this is the primary NID, destroy the peer. */
1820                 if (lnet_peer_ni_is_primary(lpni)) {
1821                         struct lnet_peer *lp2 =
1822                                 lpni->lpni_peer_net->lpn_peer;
1823                         int rtr_refcount = lp2->lp_rtr_refcount;
1824                         unsigned int peer2_state;
1825                         __u64 peer2_prim_lock_ts;
1826
1827                         /* If there's another peer that this NID belongs to
1828                          * and the primary NID for that peer is locked,
1829                          * then, unless it is the only NID, we don't want
1830                          * to mess with it.
1831                          * But the configuration is wrong at this point,
1832                          * so we should flag both of these peers as in a bad
1833                          * state
1834                          */
1835                         spin_lock(&lp2->lp_lock);
1836                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1837                             lp2->lp_nnis > 1) {
1838                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1839                                 spin_unlock(&lp2->lp_lock);
1840                                 spin_lock(&lp->lp_lock);
1841                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1842                                 spin_unlock(&lp->lp_lock);
1843                                 CERROR("Peer %s NID %s is already locked with peer %s\n",
1844                                         libcfs_nidstr(&lp->lp_primary_nid),
1845                                         libcfs_nidstr(nid),
1846                                         libcfs_nidstr(&lp2->lp_primary_nid));
1847                                 goto out_free_lpni;
1848                         }
1849                         peer2_state = lp2->lp_state;
1850                         peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1851                         spin_unlock(&lp2->lp_lock);
1852
1853                         /* NID which got locked the earliest should be
1854                          * kept as primary. In case if the peers were
1855                          * created by Lustre, this allows the
1856                          * first listed NID to stay primary as intended
1857                          * for the purpose of communicating with Lustre
1858                          * even if peer discovery succeeded using
1859                          * a different NID of MR peer.
1860                          */
1861                         spin_lock(&lp->lp_lock);
1862                         if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1863                             ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1864                             peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1865                              !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1866                                 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1867                                 lp->lp_primary_nid = *nid;
1868                                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1869                         }
1870                         spin_unlock(&lp->lp_lock);
1871                         /*
1872                          * if we're trying to delete a router it means
1873                          * we're moving this peer NI to a new peer so must
1874                          * transfer router properties to the new peer
1875                          */
1876                         if (rtr_refcount > 0) {
1877                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1878                                 lnet_rtr_transfer_to_peer(lp2, lp);
1879                         }
1880                         lnet_peer_del(lp2);
1881                         lnet_peer_ni_decref_locked(lpni);
1882                         lpni = lnet_peer_ni_alloc(nid);
1883                         if (!lpni) {
1884                                 rc = -ENOMEM;
1885                                 goto out_free_lpni;
1886                         }
1887                 }
1888         } else {
1889                 lpni = lnet_peer_ni_alloc(nid);
1890                 if (!lpni) {
1891                         rc = -ENOMEM;
1892                         goto out_free_lpni;
1893                 }
1894         }
1895
1896         /*
1897          * Get the peer_net. Check that we're not adding a second
1898          * peer_ni on a peer_net of a non-multi-rail peer.
1899          */
1900         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1901         if (!lpn) {
1902                 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1903                 if (!lpn) {
1904                         rc = -ENOMEM;
1905                         goto out_free_lpni;
1906                 }
1907         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1908                 rc = -ENOTUNIQ;
1909                 goto out_free_lpni;
1910         }
1911
1912         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1913
1914 out_free_lpni:
1915         lnet_peer_ni_decref_locked(lpni);
1916 out:
1917         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1918                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1919                flags, rc);
1920         return rc;
1921 }
1922
1923 /*
1924  * Update the primary NID of a peer, if possible.
1925  *
1926  * Call with the lnet_api_mutex held.
1927  */
1928 static int
1929 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1930                           unsigned int flags)
1931 {
1932         struct lnet_nid old = lp->lp_primary_nid;
1933         int rc = 0;
1934
1935         if (nid_same(&lp->lp_primary_nid, nid))
1936                 goto out;
1937
1938         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1939                 lp->lp_primary_nid = *nid;
1940
1941         rc = lnet_peer_add_nid(lp, nid, flags);
1942         if (rc) {
1943                 lp->lp_primary_nid = old;
1944                 goto out;
1945         }
1946 out:
1947         /* if this is a configured peer or the primary for that peer has
1948          * been locked, then we don't want to flag this scenario as
1949          * a failure
1950          */
1951         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1952             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1953                 return 0;
1954
1955         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1956                libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1957
1958         return rc;
1959 }
1960
1961 /*
1962  * lpni creation initiated due to traffic either sending or receiving.
1963  * Callers must hold ln_api_mutex
1964  * Ref taken on lnet_peer_ni returned by this function
1965  */
1966 static struct lnet_peer_ni *
1967 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1968 __must_hold(&the_lnet.ln_api_mutex)
1969 {
1970         struct lnet_peer *lp = NULL;
1971         struct lnet_peer_net *lpn = NULL;
1972         struct lnet_peer_ni *lpni;
1973         unsigned flags = 0;
1974         int rc = 0;
1975
1976         if (LNET_NID_IS_ANY(nid)) {
1977                 rc = -EINVAL;
1978                 goto out_err;
1979         }
1980
1981         /* lnet_net_lock is not needed here because ln_api_lock is held */
1982         lpni = lnet_peer_ni_find_locked(nid);
1983         if (lpni) {
1984                 /*
1985                  * We must have raced with another thread. Since we
1986                  * know next to nothing about a peer_ni created by
1987                  * traffic, we just assume everything is ok and
1988                  * return.
1989                  */
1990                 goto out;
1991         }
1992
1993         /* Create peer, peer_net, and peer_ni. */
1994         rc = -ENOMEM;
1995         lp = lnet_peer_alloc(nid);
1996         if (!lp)
1997                 goto out_err;
1998         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1999         if (!lpn)
2000                 goto out_err;
2001         lpni = lnet_peer_ni_alloc(nid);
2002         if (!lpni)
2003                 goto out_err;
2004         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
2005
2006         /* lnet_peer_attach_peer_ni() always returns 0 */
2007         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
2008
2009         kref_get(&lpni->lpni_kref);
2010
2011 out_err:
2012         if (rc) {
2013                 LIBCFS_FREE(lpn, sizeof(*lpn));
2014                 LIBCFS_FREE(lp, sizeof(*lp));
2015                 lpni = ERR_PTR(rc);
2016         }
2017 out:
2018         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
2019         return lpni;
2020 }
2021
2022 /*
2023  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
2024  *
2025  * This API handles the following combinations:
2026  *   Create a peer with its primary NI if only the prim_nid is provided
2027  *   Add a NID to a peer identified by the prim_nid. The peer identified
2028  *   by the prim_nid must already exist.
2029  *   The peer being created may be non-MR.
2030  *
2031  * The caller must hold ln_api_mutex. This prevents the peer from
2032  * being created/modified/deleted by a different thread.
2033  */
2034 static int
2035 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
2036                  unsigned int flags)
2037 __must_hold(&the_lnet.ln_api_mutex)
2038 {
2039         struct lnet_peer *lp = NULL;
2040         struct lnet_peer_ni *lpni;
2041
2042         /* The prim_nid must always be specified */
2043         if (LNET_NID_IS_ANY(prim_nid))
2044                 return -EINVAL;
2045
2046         if (mr)
2047                 flags |= LNET_PEER_MULTI_RAIL;
2048
2049         /*
2050          * If nid isn't specified, we must create a new peer with
2051          * prim_nid as its primary nid.
2052          */
2053         if (LNET_NID_IS_ANY(nid))
2054                 return lnet_peer_add(prim_nid, flags);
2055
2056         /* Look up the prim_nid, which must exist. */
2057         lpni = lnet_peer_ni_find_locked(prim_nid);
2058         if (!lpni)
2059                 return -ENOENT;
2060         lp = lpni->lpni_peer_net->lpn_peer;
2061         lnet_peer_ni_decref_locked(lpni);
2062
2063         /* Peer must have been configured. */
2064         if ((flags & LNET_PEER_CONFIGURED) &&
2065             !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2066                 CDEBUG(D_NET, "peer %s was not configured\n",
2067                        libcfs_nidstr(prim_nid));
2068                 return -ENOENT;
2069         }
2070
2071         /* Primary NID must match */
2072         if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2073                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2074                        libcfs_nidstr(prim_nid),
2075                        libcfs_nidstr(&lp->lp_primary_nid));
2076                 return -ENODEV;
2077         }
2078
2079         /* Multi-Rail flag must match. */
2080         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2081                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2082                        libcfs_nidstr(prim_nid));
2083                 return -EPERM;
2084         }
2085
2086         if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2087                 CDEBUG(D_NET,
2088                        "Don't add temporary peer NI for uptodate peer %s\n",
2089                        libcfs_nidstr(&lp->lp_primary_nid));
2090                 return -EINVAL;
2091         }
2092
2093         return lnet_peer_add_nid(lp, nid, flags);
2094 }
2095
2096 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2097                           bool mr, bool lock_prim)
2098 {
2099         int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2100
2101         return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2102 }
2103
2104 static int
2105 lnet_reset_peer(struct lnet_peer *lp)
2106 {
2107         struct lnet_peer_net *lpn, *lpntmp;
2108         struct lnet_peer_ni *lpni, *lpnitmp;
2109         unsigned int flags;
2110         int rc;
2111
2112         lnet_peer_cancel_discovery(lp);
2113
2114         flags = LNET_PEER_CONFIGURED;
2115         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2116                 flags |= LNET_PEER_MULTI_RAIL;
2117
2118         list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2119                 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2120                                          lpni_peer_nis) {
2121                         if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2122                                 continue;
2123
2124                         rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2125                         if (rc) {
2126                                 CERROR("Failed to delete %s from peer %s\n",
2127                                        libcfs_nidstr(&lpni->lpni_nid),
2128                                        libcfs_nidstr(&lp->lp_primary_nid));
2129                         }
2130                 }
2131         }
2132
2133         /* mark it for discovery the next time we use it */
2134         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2135         return 0;
2136 }
2137
2138 /*
2139  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2140  *
2141  * This API handles the following combinations:
2142  *   Delete a NI from a peer if both prim_nid and nid are provided.
2143  *   Delete a peer if only prim_nid is provided.
2144  *   Delete a peer if its primary nid is provided.
2145  *
2146  * The caller must hold ln_api_mutex. This prevents the peer from
2147  * being modified/deleted by a different thread.
2148  */
2149 int
2150 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2151                  int force)
2152 {
2153         struct lnet_peer *lp;
2154         struct lnet_peer_ni *lpni;
2155         unsigned int flags;
2156
2157         if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2158                 return -EINVAL;
2159
2160         lpni = lnet_peer_ni_find_locked(prim_nid);
2161         if (!lpni)
2162                 return -ENOENT;
2163         lp = lpni->lpni_peer_net->lpn_peer;
2164         lnet_peer_ni_decref_locked(lpni);
2165
2166         if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2167                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2168                        libcfs_nidstr(prim_nid),
2169                        libcfs_nidstr(&lp->lp_primary_nid));
2170                 return -ENODEV;
2171         }
2172
2173         lnet_net_lock(LNET_LOCK_EX);
2174         if (lp->lp_rtr_refcount > 0) {
2175                 lnet_net_unlock(LNET_LOCK_EX);
2176                 CERROR("%s is a router. Can not be deleted\n",
2177                        libcfs_nidstr(prim_nid));
2178                 return -EBUSY;
2179         }
2180         lnet_net_unlock(LNET_LOCK_EX);
2181
2182         if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2183                 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2184                         CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2185                                libcfs_nidstr(&lp->lp_primary_nid));
2186                         return lnet_reset_peer(lp);
2187                 } else {
2188                         return lnet_peer_del(lp);
2189                 }
2190         }
2191
2192         flags = LNET_PEER_CONFIGURED;
2193         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2194                 flags |= LNET_PEER_MULTI_RAIL;
2195
2196         return lnet_peer_del_nid(lp, nid, flags);
2197 }
2198
2199 void
2200 lnet_destroy_peer_ni_locked(struct kref *ref)
2201 {
2202         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2203                                                  lpni_kref);
2204         struct lnet_peer_table *ptable;
2205         struct lnet_peer_net *lpn;
2206
2207         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2208
2209         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2210         LASSERT(list_empty(&lpni->lpni_txq));
2211         LASSERT(lpni->lpni_txqnob == 0);
2212         LASSERT(list_empty(&lpni->lpni_peer_nis));
2213         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2214
2215         lpn = lpni->lpni_peer_net;
2216         lpni->lpni_peer_net = NULL;
2217         lpni->lpni_net = NULL;
2218
2219         if (!list_empty(&lpni->lpni_hashlist)) {
2220                 /* remove the peer ni from the zombie list */
2221                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2222                 spin_lock(&ptable->pt_zombie_lock);
2223                 list_del_init(&lpni->lpni_hashlist);
2224                 ptable->pt_zombies--;
2225                 spin_unlock(&ptable->pt_zombie_lock);
2226         }
2227
2228         if (lpni->lpni_pref_nnids > 1) {
2229                 struct lnet_nid_list *ne, *tmp;
2230
2231                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2232                                          nl_list) {
2233                         list_del_init(&ne->nl_list);
2234                         LIBCFS_FREE(ne, sizeof(*ne));
2235                 }
2236         }
2237         LIBCFS_FREE(lpni, sizeof(*lpni));
2238
2239         if (lpn)
2240                 lnet_peer_net_decref_locked(lpn);
2241 }
2242
2243 struct lnet_peer_ni *
2244 lnet_nid2peerni_ex(struct lnet_nid *nid)
2245 __must_hold(&the_lnet.ln_api_mutex)
2246 {
2247         struct lnet_peer_ni *lpni = NULL;
2248
2249         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2250                 return ERR_PTR(-ESHUTDOWN);
2251
2252         /*
2253          * find if a peer_ni already exists.
2254          * If so then just return that.
2255          */
2256         lpni = lnet_peer_ni_find_locked(nid);
2257         if (lpni)
2258                 return lpni;
2259
2260         lnet_net_unlock(LNET_LOCK_EX);
2261
2262         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2263
2264         lnet_net_lock(LNET_LOCK_EX);
2265
2266         return lpni;
2267 }
2268
2269 /*
2270  * Get a peer_ni for the given nid, create it if necessary. Takes a
2271  * hold on the peer_ni.
2272  */
2273 struct lnet_peer_ni *
2274 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2275                         struct lnet_nid *pref, int cpt)
2276 {
2277         struct lnet_peer_ni *lpni = NULL;
2278
2279         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2280                 return ERR_PTR(-ESHUTDOWN);
2281
2282         /*
2283          * find if a peer_ni already exists.
2284          * If so then just return that.
2285          */
2286         lpni = lnet_peer_ni_find_locked(nid);
2287         if (lpni)
2288                 return lpni;
2289
2290         /*
2291          * Slow path:
2292          * use the lnet_api_mutex to serialize the creation of the peer_ni
2293          * and the creation/deletion of the local ni/net. When a local ni is
2294          * created, if there exists a set of peer_nis on that network,
2295          * they need to be traversed and updated. When a local NI is
2296          * deleted, which could result in a network being deleted, then
2297          * all peer nis on that network need to be removed as well.
2298          *
2299          * Creation through traffic should also be serialized with
2300          * creation through DLC.
2301          */
2302         lnet_net_unlock(cpt);
2303         mutex_lock(&the_lnet.ln_api_mutex);
2304         /*
2305          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2306          * check here is sufficent.
2307          */
2308         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2309                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2310
2311         mutex_unlock(&the_lnet.ln_api_mutex);
2312         lnet_net_lock(cpt);
2313
2314         /* Lock has been dropped, check again for shutdown. */
2315         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2316                 if (!IS_ERR_OR_NULL(lpni))
2317                         lnet_peer_ni_decref_locked(lpni);
2318                 lpni = ERR_PTR(-ESHUTDOWN);
2319         }
2320
2321         return lpni;
2322 }
2323
2324 bool
2325 lnet_peer_gw_discovery(struct lnet_peer *lp)
2326 {
2327         bool rc = false;
2328
2329         spin_lock(&lp->lp_lock);
2330         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2331                 rc = true;
2332         spin_unlock(&lp->lp_lock);
2333
2334         return rc;
2335 }
2336
2337 bool
2338 lnet_peer_is_uptodate(struct lnet_peer *lp)
2339 {
2340         bool rc;
2341
2342         spin_lock(&lp->lp_lock);
2343         rc = lnet_peer_is_uptodate_locked(lp);
2344         spin_unlock(&lp->lp_lock);
2345         return rc;
2346 }
2347
2348 /*
2349  * Is a peer uptodate from the point of view of discovery?
2350  *
2351  * If it is currently being processed, obviously not.
2352  * A forced Ping or Push is also handled by the discovery thread.
2353  *
2354  * Otherwise look at whether the peer needs rediscovering.
2355  */
2356 bool
2357 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2358 __must_hold(&lp->lp_lock)
2359 {
2360         bool rc;
2361
2362         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2363                             LNET_PEER_FORCE_PING |
2364                             LNET_PEER_FORCE_PUSH)) {
2365                 rc = false;
2366         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2367                 rc = false;
2368         } else if (lnet_peer_needs_push(lp)) {
2369                 rc = false;
2370         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2371                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2372                         rc = true;
2373                 else
2374                         rc = false;
2375         } else {
2376                 rc = false;
2377         }
2378
2379         return rc;
2380 }
2381
2382 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2383 void
2384 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2385 {
2386         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2387          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2388          * when adding to the list and queuing the peer to ensure that we do not
2389          * strand any messages on the lp_dc_pendq. This scheme ensures the
2390          * message will be resent even if the peer is already being discovered.
2391          * Therefore we needn't check the return value of
2392          * lnet_peer_queue_for_discovery(lp).
2393          */
2394         lnet_net_lock(LNET_LOCK_EX);
2395         spin_lock(&lp->lp_lock);
2396         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2397         spin_unlock(&lp->lp_lock);
2398         lnet_peer_queue_for_discovery(lp);
2399         lnet_net_unlock(LNET_LOCK_EX);
2400 }
2401
2402 /*
2403  * Queue a peer for the attention of the discovery thread.  Call with
2404  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2405  * -EALREADY if the peer was already queued.
2406  */
2407 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2408 {
2409         int rc;
2410
2411         spin_lock(&lp->lp_lock);
2412         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2413                 lp->lp_state |= LNET_PEER_DISCOVERING;
2414         spin_unlock(&lp->lp_lock);
2415         if (list_empty(&lp->lp_dc_list)) {
2416                 lnet_peer_addref_locked(lp);
2417                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2418                 wake_up(&the_lnet.ln_dc_waitq);
2419                 rc = 0;
2420         } else {
2421                 rc = -EALREADY;
2422         }
2423
2424         CDEBUG(D_NET, "Queue peer %s: %d\n",
2425                libcfs_nidstr(&lp->lp_primary_nid), rc);
2426
2427         return rc;
2428 }
2429
2430 /*
2431  * Discovery of a peer is complete. Wake all waiters on the peer.
2432  * Call with lnet_net_lock/EX held.
2433  */
2434 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2435 {
2436         struct lnet_msg *msg, *tmp;
2437         int rc = 0;
2438         LIST_HEAD(pending_msgs);
2439
2440         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2441                libcfs_nidstr(&lp->lp_primary_nid));
2442
2443         spin_lock(&lp->lp_lock);
2444         /* Our caller dropped lp_lock which may have allowed another thread to
2445          * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2446          * Ensure it is cleared.
2447          */
2448         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2449         if (dc_error) {
2450                 lp->lp_dc_error = dc_error;
2451                 lp->lp_state |= LNET_PEER_REDISCOVER;
2452         }
2453         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2454         spin_unlock(&lp->lp_lock);
2455         list_del_init(&lp->lp_dc_list);
2456         wake_up(&lp->lp_dc_waitq);
2457
2458         if (lp->lp_rtr_refcount > 0)
2459                 lnet_router_discovery_complete(lp);
2460
2461         lnet_net_unlock(LNET_LOCK_EX);
2462
2463         /* iterate through all pending messages and send them again */
2464         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2465                 list_del_init(&msg->msg_list);
2466                 if (dc_error) {
2467                         lnet_finalize(msg, dc_error);
2468                         continue;
2469                 }
2470
2471                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2472                        lnet_msgtyp2str(msg->msg_type),
2473                        libcfs_idstr(&msg->msg_target));
2474                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2475                                &msg->msg_rtr_nid_param);
2476                 if (rc < 0) {
2477                         CNETERR("Error sending %s to %s: %d\n",
2478                                lnet_msgtyp2str(msg->msg_type),
2479                                libcfs_idstr(&msg->msg_target), rc);
2480                         lnet_finalize(msg, rc);
2481                 }
2482         }
2483         lnet_net_lock(LNET_LOCK_EX);
2484         lnet_peer_decref_locked(lp);
2485 }
2486
2487 /*
2488  * Handle inbound push.
2489  * Like any event handler, called with lnet_res_lock/CPT held.
2490  */
2491 void lnet_peer_push_event(struct lnet_event *ev)
2492 {
2493         struct lnet_ping_buffer *pbuf;
2494         struct lnet_peer *lp;
2495         int infobytes;
2496
2497         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2498
2499         /* lnet_find_peer() adds a refcount */
2500         lp = lnet_find_peer(&ev->source.nid);
2501         if (!lp) {
2502                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2503                        libcfs_nidstr(&ev->initiator.nid),
2504                        libcfs_nidstr(&ev->source.nid));
2505                 pbuf->pb_needs_post = true;
2506                 return;
2507         }
2508
2509         /* Ensure peer state remains consistent while we modify it. */
2510         spin_lock(&lp->lp_lock);
2511
2512         /*
2513          * If some kind of error happened the contents of the message
2514          * cannot be used. Clear the NIDS_UPTODATE and set the
2515          * FORCE_PING flag to trigger a ping.
2516          */
2517         if (ev->status) {
2518                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2519                 lp->lp_state |= LNET_PEER_FORCE_PING;
2520                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2521                        ev->status,
2522                        libcfs_nidstr(&lp->lp_primary_nid),
2523                        libcfs_nidstr(&ev->source.nid));
2524                 goto out;
2525         }
2526
2527         /*
2528          * A push with invalid or corrupted info. Clear the UPTODATE
2529          * flag to trigger a ping.
2530          */
2531         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2532                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2533                 lp->lp_state |= LNET_PEER_FORCE_PING;
2534                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2535                        libcfs_nidstr(&lp->lp_primary_nid));
2536                 goto out;
2537         }
2538
2539         /* Make sure we'll allocate the correct size ping buffer when
2540          * pinging the peer.
2541          */
2542         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2543         if (lp->lp_data_bytes < infobytes)
2544                 lp->lp_data_bytes = infobytes;
2545
2546         /*
2547          * A non-Multi-Rail peer is not supposed to be capable of
2548          * sending a push.
2549          */
2550         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2551                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2552                        libcfs_nidstr(&lp->lp_primary_nid));
2553                 goto out;
2554         }
2555
2556         /*
2557          * The peer may have discovery disabled at its end. Set
2558          * NO_DISCOVERY as appropriate.
2559          */
2560         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2561                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2562                        libcfs_nidstr(&lp->lp_primary_nid));
2563                 /*
2564                  * Mark the peer for deletion if we already know about it
2565                  * and it's going from discovery set to no discovery set
2566                  */
2567                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2568                                       LNET_PEER_DISCOVERING)) &&
2569                      lp->lp_state & LNET_PEER_DISCOVERED) {
2570                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2571                                libcfs_nidstr(&lp->lp_primary_nid),
2572                                lp->lp_state);
2573                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2574                 }
2575                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2576         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2577                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2578                        libcfs_nidstr(&lp->lp_primary_nid));
2579                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2580         }
2581
2582         /*
2583          * Update the MULTI_RAIL flag based on the push. If the peer
2584          * was configured with DLC then the setting should match what
2585          * DLC put in.
2586          * NB: We verified above that the MR feature bit is set in pi_features
2587          */
2588         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2589                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2590                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2591         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2592                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2593                       libcfs_nidstr(&lp->lp_primary_nid));
2594         } else if (lnet_peer_discovery_disabled) {
2595                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2596                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2597         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2598                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2599                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2600         } else {
2601                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2602                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2603                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2604                 lnet_peer_clr_non_mr_pref_nids(lp);
2605         }
2606
2607         /* Check for truncation of the Put message. Clear the
2608          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2609          * and tell discovery to allocate a bigger buffer.
2610          */
2611         if (ev->mlength < ev->rlength) {
2612                 if (the_lnet.ln_push_target_nbytes < infobytes)
2613                         the_lnet.ln_push_target_nbytes = infobytes;
2614                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2615                 lp->lp_state |= LNET_PEER_FORCE_PING;
2616                 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2617                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2618                 goto out;
2619         }
2620
2621         /* always assume new data */
2622         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2623         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2624
2625         /* If there is data present that hasn't been processed yet,
2626          * we'll replace it if the Put contained newer data and it
2627          * fits. We're racing with a Ping or earlier Push in this
2628          * case.
2629          */
2630         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2631                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2632                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2633                     infobytes <= lp->lp_data->pb_nbytes) {
2634                         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2635                                infobytes, FLEXIBLE_OBJECT);
2636                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2637                               libcfs_nidstr(&lp->lp_primary_nid),
2638                               LNET_PING_BUFFER_SEQNO(pbuf),
2639                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2640                 }
2641                 goto out;
2642         }
2643
2644         /*
2645          * Allocate a buffer to copy the data. On a failure we drop
2646          * the Push and set FORCE_PING to force the discovery
2647          * thread to fix the problem by pinging the peer.
2648          */
2649         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2650         if (!lp->lp_data) {
2651                 lp->lp_state |= LNET_PEER_FORCE_PING;
2652                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2653                        libcfs_nidstr(&lp->lp_primary_nid),
2654                        LNET_PING_BUFFER_SEQNO(pbuf));
2655                 goto out;
2656         }
2657
2658         /* Success */
2659         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2660                       FLEXIBLE_OBJECT);
2661         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2662         CDEBUG(D_NET, "Received Push %s %u\n",
2663                libcfs_nidstr(&lp->lp_primary_nid),
2664                LNET_PING_BUFFER_SEQNO(pbuf));
2665
2666 out:
2667         /* We've processed this buffer. It can be reposted */
2668         pbuf->pb_needs_post = true;
2669
2670         /*
2671          * Queue the peer for discovery if not done, force it on the request
2672          * queue and wake the discovery thread if the peer was already queued,
2673          * because its status changed.
2674          */
2675         spin_unlock(&lp->lp_lock);
2676         lnet_net_lock(LNET_LOCK_EX);
2677         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2678                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2679                 wake_up(&the_lnet.ln_dc_waitq);
2680         }
2681         /* Drop refcount from lookup */
2682         lnet_peer_decref_locked(lp);
2683         lnet_net_unlock(LNET_LOCK_EX);
2684 }
2685
2686 /*
2687  * Clear the discovery error state, unless we're already discovering
2688  * this peer, in which case the error is current.
2689  */
2690 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2691 {
2692         spin_lock(&lp->lp_lock);
2693         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2694                 lp->lp_dc_error = 0;
2695         spin_unlock(&lp->lp_lock);
2696 }
2697
2698 /*
2699  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2700  * dropped/retaken within this function. An lnet_peer_ni is passed in
2701  * because discovery could tear down an lnet_peer.
2702  */
2703 int
2704 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2705 {
2706         DEFINE_WAIT(wait);
2707         struct lnet_peer *lp = NULL;
2708         int rc = 0;
2709         int count = 0;
2710
2711 again:
2712         if (lp)
2713                 lnet_peer_decref_locked(lp);
2714         lnet_net_unlock(cpt);
2715         lnet_net_lock(LNET_LOCK_EX);
2716         lp = lpni->lpni_peer_net->lpn_peer;
2717         lnet_peer_clear_discovery_error(lp);
2718
2719         /*
2720          * We're willing to be interrupted. The lpni can become a
2721          * zombie if we race with DLC, so we must check for that.
2722          */
2723         for (;;) {
2724                 /* Keep lp alive when the lnet_net_lock is unlocked */
2725                 lnet_peer_addref_locked(lp);
2726                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2727                 if (signal_pending(current))
2728                         break;
2729                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2730                         break;
2731                 /*
2732                  * Don't repeat discovery if discovery is disabled. This is
2733                  * done to ensure we can use discovery as a standard ping as
2734                  * well for backwards compatibility with routers which do not
2735                  * have discovery or have discovery disabled
2736                  */
2737                 if (lnet_is_discovery_disabled(lp) && count > 0)
2738                         break;
2739                 if (lp->lp_dc_error)
2740                         break;
2741                 if (lnet_peer_is_uptodate(lp))
2742                         break;
2743                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2744                         break;
2745                 lnet_peer_queue_for_discovery(lp);
2746                 count++;
2747                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2748
2749                 /*
2750                  * If caller requested a non-blocking operation then
2751                  * return immediately. Once discovery is complete any
2752                  * pending messages that were stopped due to discovery
2753                  * will be transmitted.
2754                  */
2755                 if (!block)
2756                         break;
2757
2758                 lnet_net_unlock(LNET_LOCK_EX);
2759                 schedule();
2760                 finish_wait(&lp->lp_dc_waitq, &wait);
2761                 lnet_net_lock(LNET_LOCK_EX);
2762                 lnet_peer_decref_locked(lp);
2763                 /* Peer may have changed */
2764                 lp = lpni->lpni_peer_net->lpn_peer;
2765         }
2766         finish_wait(&lp->lp_dc_waitq, &wait);
2767
2768         lnet_net_unlock(LNET_LOCK_EX);
2769         lnet_net_lock(cpt);
2770         /*
2771          * The peer may have changed, so re-check and rediscover if that turns
2772          * out to have been the case. The reference count on lp ensured that
2773          * even if it was unlinked from lpni the memory could not be recycled.
2774          * Thus the check below is sufficient to determine whether the peer
2775          * changed. If the peer changed, then lp must not be dereferenced.
2776          */
2777         if (lp != lpni->lpni_peer_net->lpn_peer)
2778                 goto again;
2779
2780         if (signal_pending(current))
2781                 rc = -EINTR;
2782         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2783                 rc = -ESHUTDOWN;
2784         else if (lp->lp_dc_error)
2785                 rc = lp->lp_dc_error;
2786         else if (!block)
2787                 CDEBUG(D_NET, "non-blocking discovery\n");
2788         else if (!lnet_peer_is_uptodate(lp) &&
2789                  !(lnet_is_discovery_disabled(lp) ||
2790                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2791                 goto again;
2792
2793         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2794                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2795                libcfs_nidstr(&lpni->lpni_nid), rc,
2796                (!block) ? "pending discovery" : "discovery complete");
2797         lnet_peer_decref_locked(lp);
2798
2799         return rc;
2800 }
2801
2802 /* Handle an incoming ack for a push. */
2803 static void
2804 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2805 {
2806         struct lnet_ping_buffer *pbuf;
2807
2808         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2809         spin_lock(&lp->lp_lock);
2810         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2811         lp->lp_push_error = ev->status;
2812         if (ev->status)
2813                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2814         else
2815                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2816         spin_unlock(&lp->lp_lock);
2817
2818         CDEBUG(D_NET, "peer %s ev->status %d\n",
2819                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2820 }
2821
2822 static bool find_primary(struct lnet_nid *nid,
2823                          struct lnet_ping_buffer *pbuf)
2824 {
2825         struct lnet_ping_info *pi = &pbuf->pb_info;
2826         struct lnet_ping_iter piter;
2827         __u32 *stp;
2828
2829         if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2830                 /* First large nid is primary */
2831                 for (stp = ping_iter_first(&piter, pbuf, nid);
2832                      stp;
2833                      stp = ping_iter_next(&piter, nid)) {
2834                         if (nid_is_nid4(nid))
2835                                 continue;
2836                         /* nid has already been copied in */
2837                         return true;
2838                 }
2839                 /* no large nids ... weird ... ignore the flag
2840                  * and use first nid.
2841                  */
2842         }
2843         /* pi_nids[1] is primary */
2844         if (pi->pi_nnis < 2)
2845                 return false;
2846         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2847         return true;
2848 }
2849
2850 /* Handle a Reply message. This is the reply to a Ping message. */
2851 static void
2852 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2853 {
2854         struct lnet_ping_buffer *pbuf;
2855         struct lnet_nid primary;
2856         int infobytes;
2857         int rc;
2858         bool ping_feat_disc;
2859
2860         spin_lock(&lp->lp_lock);
2861
2862         lp->lp_disc_src_nid = ev->target.nid;
2863         lp->lp_disc_dst_nid = ev->source.nid;
2864
2865         /*
2866          * If some kind of error happened the contents of message
2867          * cannot be used. Set PING_FAILED to trigger a retry.
2868          */
2869         if (ev->status) {
2870                 lp->lp_state |= LNET_PEER_PING_FAILED;
2871                 lp->lp_ping_error = ev->status;
2872                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2873                        ev->status,
2874                        libcfs_nidstr(&lp->lp_primary_nid),
2875                        libcfs_nidstr(&ev->source.nid));
2876                 goto out;
2877         }
2878
2879         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2880         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2881                 lnet_swap_pinginfo(pbuf);
2882
2883         /*
2884          * A reply with invalid or corrupted info. Set PING_FAILED to
2885          * trigger a retry.
2886          */
2887         rc = lnet_ping_info_validate(&pbuf->pb_info);
2888         if (rc) {
2889                 lp->lp_state |= LNET_PEER_PING_FAILED;
2890                 lp->lp_ping_error = 0;
2891                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2892                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2893                 goto out;
2894         }
2895
2896         /*
2897          * The peer may have discovery disabled at its end. Set
2898          * NO_DISCOVERY as appropriate.
2899          */
2900         ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2901         if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2902                 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2903                        libcfs_nidstr(&lp->lp_primary_nid),
2904                        ping_feat_disc ? "enabled" : "disabled",
2905                        lnet_peer_discovery_disabled ? "disabled" : "enabled");
2906
2907                 /* Detect whether this peer has toggled discovery from on to
2908                  * off and whether we can delete and re-create the peer. Peers
2909                  * that were manually configured cannot be deleted by discovery.
2910                  * We need to delete this peer and re-create it if the peer was
2911                  * not configured manually, is currently considered DD capable,
2912                  * and either:
2913                  * 1. We've already discovered the peer (the peer has toggled
2914                  *    the discovery feature from on to off), or
2915                  * 2. The peer is considered MR, but it was not user configured
2916                  *    (this was a "temporary" peer created via the kernel APIs
2917                  *     that we're discovering for the first time)
2918                  */
2919                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2920                                       LNET_PEER_NO_DISCOVERY)) &&
2921                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2922                                      LNET_PEER_MULTI_RAIL))) {
2923                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2924                                libcfs_nidstr(&lp->lp_primary_nid),
2925                                lp->lp_state);
2926                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2927                 }
2928                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2929         } else {
2930                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2931                        libcfs_nidstr(&lp->lp_primary_nid));
2932                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2933         }
2934
2935         /*
2936          * Update the MULTI_RAIL flag based on the reply. If the peer
2937          * was configured with DLC then the setting should match what
2938          * DLC put in.
2939          */
2940         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2941                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2942                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2943                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2944                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2945                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2946                               libcfs_nidstr(&lp->lp_primary_nid));
2947                 } else if (lnet_peer_discovery_disabled) {
2948                         CDEBUG(D_NET,
2949                                "peer %s(%p) not MR: DD disabled locally\n",
2950                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2951                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2952                         CDEBUG(D_NET,
2953                                "peer %s(%p) not MR: DD disabled remotely\n",
2954                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2955                 } else {
2956                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2957                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2958                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2959                         lnet_peer_clr_non_mr_pref_nids(lp);
2960                 }
2961         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2962                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2963                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2964                               libcfs_nidstr(&lp->lp_primary_nid));
2965                 } else {
2966                         CERROR("Multi-Rail state vanished from %s\n",
2967                                libcfs_nidstr(&lp->lp_primary_nid));
2968                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2969                 }
2970         }
2971
2972         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2973         /*
2974          * Make sure we'll allocate the correct size ping buffer when
2975          * pinging the peer.
2976          */
2977         if (lp->lp_data_bytes < infobytes)
2978                 lp->lp_data_bytes = infobytes;
2979
2980         /* Check for truncation of the Reply. Clear PING_SENT and set
2981          * PING_FAILED to trigger a retry.
2982          */
2983         if (pbuf->pb_nbytes < infobytes) {
2984                 if (the_lnet.ln_push_target_nbytes < infobytes)
2985                         the_lnet.ln_push_target_nbytes = infobytes;
2986                 lp->lp_state |= LNET_PEER_PING_FAILED;
2987                 lp->lp_ping_error = 0;
2988                 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2989                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2990                 goto out;
2991         }
2992
2993         /*
2994          * Check the sequence numbers in the reply. These are only
2995          * available if the reply came from a Multi-Rail peer.
2996          */
2997         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2998             find_primary(&primary, pbuf) &&
2999             nid_same(&lp->lp_primary_nid, &primary)) {
3000                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
3001                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
3002                                 libcfs_nidstr(&lp->lp_primary_nid),
3003                                 LNET_PING_BUFFER_SEQNO(pbuf),
3004                                 lp->lp_peer_seqno);
3005
3006                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3007         }
3008
3009         /* We're happy with the state of the data in the buffer. */
3010         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
3011                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
3012                lp->lp_state);
3013         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3014                 kref_put(&lp->lp_data->pb_refcnt, lnet_ping_buffer_free);
3015         else
3016                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
3017         kref_get(&pbuf->pb_refcnt);
3018         lp->lp_data = pbuf;
3019 out:
3020         lp->lp_state &= ~LNET_PEER_PING_SENT;
3021         spin_unlock(&lp->lp_lock);
3022 }
3023
3024 /*
3025  * Send event handling. Only matters for error cases, where we clean
3026  * up state on the peer and peer_ni that would otherwise be updated in
3027  * the REPLY event handler for a successful Ping, and the ACK event
3028  * handler for a successful Push.
3029  */
3030 static int
3031 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
3032 {
3033         int rc = 0;
3034
3035         if (!ev->status)
3036                 goto out;
3037
3038         spin_lock(&lp->lp_lock);
3039         if (ev->msg_type == LNET_MSG_GET) {
3040                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3041                 lp->lp_state |= LNET_PEER_PING_FAILED;
3042                 lp->lp_ping_error = ev->status;
3043         } else { /* ev->msg_type == LNET_MSG_PUT */
3044                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3045                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3046                 lp->lp_push_error = ev->status;
3047         }
3048         spin_unlock(&lp->lp_lock);
3049         rc = LNET_REDISCOVER_PEER;
3050 out:
3051         CDEBUG(D_NET, "%s Send to %s: %d\n",
3052                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3053                 libcfs_nidstr(&ev->target.nid), rc);
3054         return rc;
3055 }
3056
3057 /*
3058  * Unlink event handling. This event is only seen if a call to
3059  * LNetMDUnlink() caused the event to be unlinked. If this call was
3060  * made after the event was set up in LNetGet() or LNetPut() then we
3061  * assume the Ping or Push timed out.
3062  */
3063 static void
3064 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3065 {
3066         spin_lock(&lp->lp_lock);
3067         /* We've passed through LNetGet() */
3068         if (lp->lp_state & LNET_PEER_PING_SENT) {
3069                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3070                 lp->lp_state |= LNET_PEER_PING_FAILED;
3071                 lp->lp_ping_error = -ETIMEDOUT;
3072                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3073                         libcfs_nidstr(&lp->lp_primary_nid));
3074         }
3075         /* We've passed through LNetPut() */
3076         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3077                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3078                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3079                 lp->lp_push_error = -ETIMEDOUT;
3080                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3081                         libcfs_nidstr(&lp->lp_primary_nid));
3082         }
3083         spin_unlock(&lp->lp_lock);
3084 }
3085
3086 /*
3087  * Event handler for the discovery EQ.
3088  *
3089  * Called with lnet_res_lock(cpt) held. The cpt is the
3090  * lnet_cpt_of_cookie() of the md handle cookie.
3091  */
3092 static void lnet_discovery_event_handler(struct lnet_event *event)
3093 {
3094         struct lnet_peer *lp = event->md_user_ptr;
3095         struct lnet_ping_buffer *pbuf;
3096         int rc;
3097
3098         /* discovery needs to take another look */
3099         rc = LNET_REDISCOVER_PEER;
3100
3101         CDEBUG(D_NET, "Received event: %d\n", event->type);
3102
3103         switch (event->type) {
3104         case LNET_EVENT_ACK:
3105                 lnet_discovery_event_ack(lp, event);
3106                 break;
3107         case LNET_EVENT_REPLY:
3108                 lnet_discovery_event_reply(lp, event);
3109                 break;
3110         case LNET_EVENT_SEND:
3111                 /* Only send failure triggers a retry. */
3112                 rc = lnet_discovery_event_send(lp, event);
3113                 break;
3114         case LNET_EVENT_UNLINK:
3115                 /* LNetMDUnlink() was called */
3116                 lnet_discovery_event_unlink(lp, event);
3117                 break;
3118         default:
3119                 /* Invalid events. */
3120                 LBUG();
3121         }
3122         lnet_net_lock(LNET_LOCK_EX);
3123
3124         /* put peer back at end of request queue, if discovery not already
3125          * done */
3126         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3127             lnet_peer_queue_for_discovery(lp)) {
3128                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3129                 wake_up(&the_lnet.ln_dc_waitq);
3130         }
3131         if (event->unlinked) {
3132                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3133                 kref_put(&pbuf->pb_refcnt, lnet_ping_buffer_free);
3134                 lnet_peer_decref_locked(lp);
3135         }
3136         lnet_net_unlock(LNET_LOCK_EX);
3137 }
3138
3139 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3140                      struct lnet_ping_buffer *pbuf,
3141                      struct lnet_nid *nid)
3142 {
3143         pi->pinfo = &pbuf->pb_info;
3144         pi->pos = &pbuf->pb_info.pi_ni;
3145         pi->end = (void *)pi->pinfo +
3146                   min_t(int, pbuf->pb_nbytes,
3147                         lnet_ping_info_size(pi->pinfo));
3148         /* lnet_ping_info_validiate ensures there will be one
3149          * lnet_ni_status at the start
3150          */
3151         if (nid)
3152                 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3153
3154         pi->pos += sizeof(struct lnet_ni_status);
3155         return &pbuf->pb_info.pi_ni[0].ns_status;
3156 }
3157
3158 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3159 {
3160         int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3161
3162         if (pi->pos < ((void *)pi->pinfo + off)) {
3163                 struct lnet_ni_status *ns = pi->pos;
3164
3165                 pi->pos = ns + 1;
3166                 if (pi->pos > pi->end)
3167                         return NULL;
3168                 if (nid)
3169                         lnet_nid4_to_nid(ns->ns_nid, nid);
3170                 return &ns->ns_status;
3171         }
3172
3173         while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3174                 struct lnet_ni_large_status *lns = pi->pos;
3175
3176                 if (pi->pos + 8 > pi->end)
3177                         /* Not safe to examine next */
3178                         return NULL;
3179                 pi->pos = lnet_ping_sts_next(lns);
3180                 if (pi->pos > pi->end)
3181                         return NULL;
3182                 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3183                         continue;
3184                 if (nid)
3185                         *nid = lns->ns_nid;
3186                 return &lns->ns_status;
3187         }
3188         return NULL;
3189 }
3190
3191 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3192 {
3193         struct lnet_ping_iter pi;
3194         u32 *st;
3195         int nnis = 0;
3196
3197         for (st = ping_iter_first(&pi, pbuf, NULL); st;
3198              st = ping_iter_next(&pi, NULL))
3199                 nnis += 1;
3200
3201         return nnis;
3202 }
3203
3204 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni,
3205                                            __u32 new_status)
3206 {
3207         __u32 old_status;
3208
3209         spin_lock(&lpni->lpni_lock);
3210         old_status = lpni->lpni_ns_status;
3211         lpni->lpni_ns_status = new_status;
3212         spin_unlock(&lpni->lpni_lock);
3213
3214         /* Decrement health when transitioning from UP to DOWN */
3215         if (old_status != new_status && new_status == LNET_NI_STATUS_DOWN) {
3216                 lnet_net_lock(0);
3217                 lnet_handle_remote_failure_locked(lpni);
3218                 lnet_net_unlock(0);
3219         } else if (new_status == LNET_NI_STATUS_UP && !lpni->lpni_last_alive) {
3220                 /* Set health to max if the initial status is UP */
3221                 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3222         }
3223 }
3224
3225 /*
3226  * Build a peer from incoming data.
3227  *
3228  * The NIDs in the incoming data are supposed to be structured as follows:
3229  *  - loopback
3230  *  - primary NID
3231  *  - other NIDs in same net
3232  *  - NIDs in second net
3233  *  - NIDs in third net
3234  *  - ...
3235  * This due to the way the list of NIDs in the data is created.
3236  *
3237  * Note that this function will mark the peer uptodate unless an
3238  * ENOMEM is encontered. All other errors are due to a conflict
3239  * between the DLC configuration and what discovery sees. We treat DLC
3240  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3241  * peer from becoming stuck in discovery.
3242  */
3243 static int lnet_peer_merge_data(struct lnet_peer *lp,
3244                                 struct lnet_ping_buffer *pbuf)
3245 {
3246         struct lnet_peer_net *lpn;
3247         struct lnet_peer_ni *lpni;
3248         struct lnet_nid *curnis = NULL;
3249         struct lnet_ni_large_status *addnis = NULL;
3250         struct lnet_nid *delnis = NULL;
3251         struct lnet_ping_iter pi;
3252         struct lnet_nid nid;
3253         u32 *stp;
3254         unsigned int flags;
3255         int ncurnis;
3256         int naddnis;
3257         int ndelnis;
3258         int nnis = 0;
3259         int i;
3260         int j;
3261         int rc;
3262
3263         flags = LNET_PEER_DISCOVERED;
3264         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3265                 flags |= LNET_PEER_MULTI_RAIL;
3266
3267         /*
3268          * Cache the routing feature for the peer; whether it is enabled
3269          * for disabled as reported by the remote peer.
3270          */
3271         spin_lock(&lp->lp_lock);
3272         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3273                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3274         else
3275                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3276         spin_unlock(&lp->lp_lock);
3277
3278         nnis = ping_info_count_entries(pbuf);
3279         nnis = max_t(int, lp->lp_nnis, nnis);
3280         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3281         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3282         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3283         if (!curnis || !addnis || !delnis) {
3284                 rc = -ENOMEM;
3285                 goto out;
3286         }
3287         ncurnis = 0;
3288         naddnis = 0;
3289         ndelnis = 0;
3290
3291         /* Construct the list of NIDs present in peer. */
3292         lpni = NULL;
3293         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3294                 curnis[ncurnis++] = lpni->lpni_nid;
3295
3296         /* Check for NIDs in pbuf not present in curnis[].
3297          * Skip the first, which is loop-back.  Take second as
3298          * primary, unless a large primary is found.
3299          */
3300         ping_iter_first(&pi, pbuf, NULL);
3301         stp = ping_iter_next(&pi, &nid);
3302         for (; stp; stp = ping_iter_next(&pi, &nid)) {
3303                 for (j = 0; j < ncurnis; j++)
3304                         if (nid_same(&nid, &curnis[j]))
3305                                 break;
3306                 if (j == ncurnis) {
3307                         addnis[naddnis].ns_nid = nid;
3308                         addnis[naddnis].ns_status = *stp;
3309                         naddnis += 1;
3310                 }
3311         }
3312         /*
3313          * Check for NIDs in curnis[] not present in pbuf.
3314          * The nested loop starts at 1 to skip the loopback NID.
3315          *
3316          * But never add the loopback NID to delnis[]: if it is
3317          * present in curnis[] then this peer is for this node.
3318          */
3319         for (i = 0; i < ncurnis; i++) {
3320                 if (nid_is_lo0(&curnis[i]))
3321                         continue;
3322                 ping_iter_first(&pi, pbuf, NULL);
3323                 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3324                         if (nid_same(&curnis[i], &nid)) {
3325                                 /*
3326                                  * update the information we cache for the
3327                                  * peer with the latest information we
3328                                  * received
3329                                  */
3330                                 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3331                                 if (lpni) {
3332                                         handle_disc_lpni_health(lpni, *stp);
3333                                         lnet_peer_ni_decref_locked(lpni);
3334                                 }
3335                                 break;
3336                         }
3337                 }
3338                 if (!stp)
3339                         delnis[ndelnis++] = curnis[i];
3340         }
3341
3342         /*
3343          * If we get here and the discovery is disabled then we don't want
3344          * to add or delete any NIs. We just updated the ones we have some
3345          * information on, and call it a day
3346          */
3347         rc = 0;
3348         if (lnet_is_discovery_disabled(lp))
3349                 goto out;
3350
3351         for (i = 0; i < naddnis; i++) {
3352                 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3353                 if (rc) {
3354                         CERROR("Error adding NID %s to peer %s: %d\n",
3355                                libcfs_nidstr(&addnis[i].ns_nid),
3356                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3357                         if (rc == -ENOMEM)
3358                                 goto out;
3359                 }
3360                 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3361                 if (lpni) {
3362                         handle_disc_lpni_health(lpni, addnis[i].ns_status);
3363                         lnet_peer_ni_decref_locked(lpni);
3364                 }
3365         }
3366
3367         for (i = 0; i < ndelnis; i++) {
3368                 /*
3369                  * for routers it's okay to delete the primary_nid because
3370                  * the upper layers don't really rely on it. So if we're
3371                  * being told that the router changed its primary_nid
3372                  * then it's okay to delete it.
3373                  */
3374                 if (lp->lp_rtr_refcount > 0)
3375                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3376                 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3377                 if (rc) {
3378                         CERROR("Error deleting NID %s from peer %s: %d\n",
3379                                libcfs_nidstr(&delnis[i]),
3380                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3381                         if (rc == -ENOMEM)
3382                                 goto out;
3383                 }
3384         }
3385
3386         /* The peer net for the primary NID should be the first entry in the
3387          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3388          * be the first entry in its peer net's lpn_peer_nis list.
3389          */
3390         find_primary(&nid, pbuf);
3391         lpni = lnet_peer_ni_find_locked(&nid);
3392         if (!lpni) {
3393                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3394                        libcfs_nidstr(&nid));
3395                 goto out;
3396         }
3397
3398         lpn = lpni->lpni_peer_net;
3399         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3400                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3401
3402         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3403                 list_move(&lpni->lpni_peer_nis,
3404                           &lpni->lpni_peer_net->lpn_peer_nis);
3405
3406         lnet_peer_ni_decref_locked(lpni);
3407         /*
3408          * Errors other than -ENOMEM are due to peers having been
3409          * configured with DLC. Ignore these because DLC overrides
3410          * Discovery.
3411          */
3412         rc = 0;
3413 out:
3414         /* If this peer is a gateway, invoke the routing callback to update
3415          * the associated route status
3416          */
3417         if (lp->lp_rtr_refcount > 0)
3418                 lnet_router_discovery_ping_reply(lp, pbuf);
3419
3420         CFS_FREE_PTR_ARRAY(curnis, nnis);
3421         CFS_FREE_PTR_ARRAY(addnis, nnis);
3422         CFS_FREE_PTR_ARRAY(delnis, nnis);
3423         kref_put(&pbuf->pb_refcnt, lnet_ping_buffer_free);
3424         CDEBUG(D_NET, "peer %s (%p): %d\n",
3425                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3426
3427         if (rc) {
3428                 spin_lock(&lp->lp_lock);
3429                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3430                 lp->lp_state |= LNET_PEER_FORCE_PING;
3431                 spin_unlock(&lp->lp_lock);
3432         }
3433         return rc;
3434 }
3435
3436 /*
3437  * The data in pbuf says lp is its primary peer, but the data was
3438  * received by a different peer. Try to update lp with the data.
3439  */
3440 static int
3441 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3442 {
3443         struct lnet_handle_md mdh;
3444
3445         /* Queue lp for discovery, and force it on the request queue. */
3446         lnet_net_lock(LNET_LOCK_EX);
3447         if (lnet_peer_queue_for_discovery(lp))
3448                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3449         lnet_net_unlock(LNET_LOCK_EX);
3450
3451         LNetInvalidateMDHandle(&mdh);
3452
3453         /*
3454          * Decide whether we can move the peer to the DATA_PRESENT state.
3455          *
3456          * We replace stale data for a multi-rail peer, repair PING_FAILED
3457          * status, and preempt FORCE_PING.
3458          *
3459          * If after that we have DATA_PRESENT, we merge it into this peer.
3460          */
3461         spin_lock(&lp->lp_lock);
3462         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3463                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3464                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3465                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3466                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3467                         kref_put(&pbuf->pb_refcnt, lnet_ping_buffer_free);
3468                         pbuf = lp->lp_data;
3469                         lp->lp_data = NULL;
3470                 }
3471         }
3472         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3473                 kref_put(&lp->lp_data->pb_refcnt, lnet_ping_buffer_free);
3474                 lp->lp_data = NULL;
3475                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3476         }
3477         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3478                 mdh = lp->lp_ping_mdh;
3479                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3480                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3481                 lp->lp_ping_error = 0;
3482         }
3483         if (lp->lp_state & LNET_PEER_FORCE_PING)
3484                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3485         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3486         spin_unlock(&lp->lp_lock);
3487
3488         if (!LNetMDHandleIsInvalid(mdh))
3489                 LNetMDUnlink(mdh);
3490
3491         if (pbuf)
3492                 return lnet_peer_merge_data(lp, pbuf);
3493
3494         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3495         return 0;
3496 }
3497
3498 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3499                                      struct lnet_ping_buffer *pbuf)
3500 {
3501         struct lnet_ping_iter pi;
3502         struct lnet_nid pnid;
3503         u32 *st;
3504
3505         for (st = ping_iter_first(&pi, pbuf, &pnid);
3506              st;
3507              st = ping_iter_next(&pi, &pnid))
3508                 if (nid_same(nid, &pnid))
3509                         return true;
3510         return false;
3511 }
3512
3513 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3514  * to the discovery queue a reference was taken that will prevent the peer from
3515  * actually being freed by this function. After this function exits the
3516  * discovery thread should call lnet_peer_discovery_complete() which will
3517  * drop that reference as well as wake any waiters that may also be holding a
3518  * ref on the peer
3519  */
3520 static int lnet_peer_deletion(struct lnet_peer *lp)
3521 __must_hold(&lp->lp_lock)
3522 {
3523         struct list_head rlist;
3524         struct lnet_route *route, *tmp;
3525         int sensitivity = lp->lp_health_sensitivity;
3526         int rc = 0;
3527
3528         INIT_LIST_HEAD(&rlist);
3529
3530         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3531                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3532
3533         /* no-op if lnet_peer_del() has already been called on this peer */
3534         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3535                 goto clear_discovering;
3536
3537         spin_unlock(&lp->lp_lock);
3538
3539         mutex_lock(&the_lnet.ln_api_mutex);
3540         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3541             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3542                 mutex_unlock(&the_lnet.ln_api_mutex);
3543                 spin_lock(&lp->lp_lock);
3544                 rc = -ESHUTDOWN;
3545                 goto clear_discovering;
3546         }
3547
3548         lnet_peer_cancel_discovery(lp);
3549         lnet_net_lock(LNET_LOCK_EX);
3550         list_for_each_entry_safe(route, tmp,
3551                                  &lp->lp_routes,
3552                                  lr_gwlist)
3553                 lnet_move_route(route, NULL, &rlist);
3554
3555         /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3556         rc = lnet_peer_del_locked(lp);
3557         if (rc)
3558                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3559                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3560
3561         lnet_net_unlock(LNET_LOCK_EX);
3562
3563         list_for_each_entry_safe(route, tmp,
3564                                  &rlist, lr_list) {
3565                 /* re-add these routes */
3566                 lnet_add_route(route->lr_net,
3567                                route->lr_hops,
3568                                &route->lr_nid,
3569                                route->lr_priority,
3570                                sensitivity);
3571                 LIBCFS_FREE(route, sizeof(*route));
3572         }
3573
3574         mutex_unlock(&the_lnet.ln_api_mutex);
3575
3576         spin_lock(&lp->lp_lock);
3577
3578         rc = 0;
3579
3580 clear_discovering:
3581         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3582                           LNET_PEER_FORCE_PUSH);
3583
3584         return rc;
3585 }
3586
3587 /*
3588  * Update a peer using the data received.
3589  */
3590 static int lnet_peer_data_present(struct lnet_peer *lp)
3591 __must_hold(&lp->lp_lock)
3592 {
3593         struct lnet_ping_buffer *pbuf;
3594         struct lnet_peer_ni *lpni;
3595         struct lnet_nid nid;
3596         unsigned int flags;
3597         int rc = 0;
3598
3599         pbuf = lp->lp_data;
3600         lp->lp_data = NULL;
3601         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3602         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3603         spin_unlock(&lp->lp_lock);
3604
3605         /*
3606          * Modifications of peer structures are done while holding the
3607          * ln_api_mutex. A global lock is required because we may be
3608          * modifying multiple peer structures, and a mutex greatly
3609          * simplifies memory management.
3610          *
3611          * The actual changes to the data structures must also protect
3612          * against concurrent lookups, for which the lnet_net_lock in
3613          * LNET_LOCK_EX mode is used.
3614          */
3615         mutex_lock(&the_lnet.ln_api_mutex);
3616         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3617                 kref_put(&pbuf->pb_refcnt, lnet_ping_buffer_free);
3618                 rc = -ESHUTDOWN;
3619                 goto out;
3620         }
3621
3622         /*
3623          * If this peer is not on the peer list then it is being torn
3624          * down, and our reference count may be all that is keeping it
3625          * alive. Don't do any work on it.
3626          */
3627         if (list_empty(&lp->lp_peer_list)) {
3628                 kref_put(&pbuf->pb_refcnt, lnet_ping_buffer_free);
3629                 goto out;
3630         }
3631
3632         flags = LNET_PEER_DISCOVERED;
3633         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3634                 flags |= LNET_PEER_MULTI_RAIL;
3635         /*
3636          * Check whether the primary NID in the message matches the
3637          * primary NID of the peer. If it does, update the peer, if
3638          * it it does not, check whether there is already a peer with
3639          * that primary NID. If no such peer exists, try to update
3640          * the primary NID of the current peer (allowed if it was
3641          * created due to message traffic) and complete the update.
3642          * If the peer did exist, hand off the data to it.
3643          *
3644          * The peer for the loopback interface is a special case: this
3645          * is the peer for the local node, and we want to set its
3646          * primary NID to the correct value here. Moreover, this peer
3647          * can show up with only the loopback NID in the ping buffer.
3648          */
3649         if (!find_primary(&nid, pbuf)) {
3650                 kref_put(&pbuf->pb_refcnt, lnet_ping_buffer_free);
3651                 goto out;
3652         }
3653         /* If lp_merge_primary_nid is set, assign it as primary,
3654          * which causes the peers to merge.
3655          */
3656         if (!LNET_NID_IS_ANY(&lp->lp_merge_primary_nid)) {
3657
3658                 lnet_peer_set_primary_nid(lp, &lp->lp_merge_primary_nid,
3659                                           flags);
3660                 lp->lp_merge_primary_nid = LNET_ANY_NID;
3661         }
3662
3663         if (nid_is_lo0(&lp->lp_primary_nid)) {
3664                 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3665                 if (rc)
3666                         kref_put(&pbuf->pb_refcnt, lnet_ping_buffer_free);
3667                 else
3668                         rc = lnet_peer_merge_data(lp, pbuf);
3669         /*
3670          * if the primary nid of the peer is present in the ping info returned
3671          * from the peer, but it's not the local primary peer we have
3672          * cached and discovery is disabled, then we don't want to update
3673          * our local peer info, by adding or removing NIDs, we just want
3674          * to update the status of the nids that we currently have
3675          * recorded in that peer.
3676          */
3677         } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3678                    (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3679                     lnet_is_discovery_disabled(lp))) {
3680                 rc = lnet_peer_merge_data(lp, pbuf);
3681         } else {
3682                 lpni = lnet_peer_ni_find_locked(&nid);
3683                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3684                         rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3685                         if (rc) {
3686                                 CERROR("Primary NID error %s versus %s: %d\n",
3687                                        libcfs_nidstr(&lp->lp_primary_nid),
3688                                        libcfs_nidstr(&nid), rc);
3689                                 kref_put(&pbuf->pb_refcnt,
3690                                          lnet_ping_buffer_free);
3691                         } else {
3692                                 rc = lnet_peer_merge_data(lp, pbuf);
3693                         }
3694                         if (lpni)
3695                                 lnet_peer_ni_decref_locked(lpni);
3696                 } else {
3697                         struct lnet_peer *new_lp;
3698                         new_lp = lpni->lpni_peer_net->lpn_peer;
3699                         /*
3700                          * if lp has discovery/MR enabled that means new_lp
3701                          * should have discovery/MR enabled as well, since
3702                          * it's the same peer, which we're about to merge
3703                          */
3704                         spin_lock(&lp->lp_lock);
3705                         spin_lock(&new_lp->lp_lock);
3706                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3707                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3708                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3709                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3710                         /* If we're processing a ping reply then we may be
3711                          * about to send a push to the peer that we ping'd.
3712                          * Since the ping reply that we're processing was
3713                          * received by lp, we need to set the discovery source
3714                          * NID for new_lp to the NID stored in lp.
3715                          */
3716                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3717                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3718                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3719                         }
3720                         spin_unlock(&new_lp->lp_lock);
3721                         spin_unlock(&lp->lp_lock);
3722
3723                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3724                         lnet_consolidate_routes_locked(lp, new_lp);
3725                         lnet_peer_ni_decref_locked(lpni);
3726                 }
3727         }
3728 out:
3729         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3730                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3731                lp->lp_state);
3732         mutex_unlock(&the_lnet.ln_api_mutex);
3733
3734         spin_lock(&lp->lp_lock);
3735         /* Tell discovery to re-check the peer immediately. */
3736         if (!rc)
3737                 rc = LNET_REDISCOVER_PEER;
3738         return rc;
3739 }
3740
3741 /*
3742  * A ping failed. Clear the PING_FAILED state and set the
3743  * FORCE_PING state, to ensure a retry even if discovery is
3744  * disabled. This avoids being left with incorrect state.
3745  */
3746 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3747 __must_hold(&lp->lp_lock)
3748 {
3749         struct lnet_handle_md mdh;
3750         int rc;
3751
3752         mdh = lp->lp_ping_mdh;
3753         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3754         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3755         lp->lp_state |= LNET_PEER_FORCE_PING;
3756         rc = lp->lp_ping_error;
3757         lp->lp_ping_error = 0;
3758         spin_unlock(&lp->lp_lock);
3759
3760         if (!LNetMDHandleIsInvalid(mdh))
3761                 LNetMDUnlink(mdh);
3762
3763         CDEBUG(D_NET, "peer %s:%d\n",
3764                libcfs_nidstr(&lp->lp_primary_nid), rc);
3765
3766         spin_lock(&lp->lp_lock);
3767         return rc ? rc : LNET_REDISCOVER_PEER;
3768 }
3769
3770 /* Active side of ping. */
3771 static int lnet_peer_send_ping(struct lnet_peer *lp)
3772 __must_hold(&lp->lp_lock)
3773 {
3774         int bytes;
3775         int rc;
3776         int cpt;
3777
3778         lp->lp_state |= LNET_PEER_PING_SENT;
3779         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3780         spin_unlock(&lp->lp_lock);
3781
3782         cpt = lnet_net_lock_current();
3783         /* Refcount for MD. */
3784         lnet_peer_addref_locked(lp);
3785         lnet_net_unlock(cpt);
3786
3787         bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3788
3789         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3790                             the_lnet.ln_dc_handler, false);
3791         /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3792          * refcount on the peer, otherwise LNetMDUnlink will be called
3793          * which will eventually do that.
3794          */
3795         if (rc > 0) {
3796                 lnet_net_lock(cpt);
3797                 lnet_peer_decref_locked(lp);
3798                 lnet_net_unlock(cpt);
3799                 rc = -rc; /* change the rc to negative value */
3800                 goto fail_error;
3801         } else if (rc < 0) {
3802                 goto fail_error;
3803         }
3804
3805         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3806
3807         spin_lock(&lp->lp_lock);
3808         return 0;
3809
3810 fail_error:
3811         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3812         /*
3813          * The errors that get us here are considered hard errors and
3814          * cause Discovery to terminate. So we clear PING_SENT, but do
3815          * not set either PING_FAILED or FORCE_PING. In fact we need
3816          * to clear PING_FAILED, because the unlink event handler will
3817          * have set it if we called LNetMDUnlink() above.
3818          */
3819         spin_lock(&lp->lp_lock);
3820         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3821         return rc;
3822 }
3823
3824 /*
3825  * This function exists because you cannot call LNetMDUnlink() from an
3826  * event handler.
3827  */
3828 static int lnet_peer_push_failed(struct lnet_peer *lp)
3829 __must_hold(&lp->lp_lock)
3830 {
3831         struct lnet_handle_md mdh;
3832         int rc;
3833
3834         mdh = lp->lp_push_mdh;
3835         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3836         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3837         rc = lp->lp_push_error;
3838         lp->lp_push_error = 0;
3839         spin_unlock(&lp->lp_lock);
3840
3841         if (!LNetMDHandleIsInvalid(mdh))
3842                 LNetMDUnlink(mdh);
3843
3844         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3845         spin_lock(&lp->lp_lock);
3846         return rc ? rc : LNET_REDISCOVER_PEER;
3847 }
3848
3849 /*
3850  * Mark the peer as discovered.
3851  */
3852 static int lnet_peer_discovered(struct lnet_peer *lp)
3853 __must_hold(&lp->lp_lock)
3854 {
3855         lp->lp_state |= LNET_PEER_DISCOVERED;
3856         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3857                           LNET_PEER_REDISCOVER);
3858
3859         lp->lp_dc_error = 0;
3860
3861         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3862
3863         return 0;
3864 }
3865
3866 /* Active side of push. */
3867 static int lnet_peer_send_push(struct lnet_peer *lp)
3868 __must_hold(&lp->lp_lock)
3869 {
3870         struct lnet_ping_buffer *pbuf;
3871         struct lnet_processid id;
3872         struct lnet_md md;
3873         int cpt;
3874         int rc;
3875
3876         /* Don't push to a non-multi-rail peer. */
3877         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3878                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3879                 /* if peer's NIDs are uptodate then peer is discovered */
3880                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3881                         rc = lnet_peer_discovered(lp);
3882                         return rc;
3883                 }
3884
3885                 return 0;
3886         }
3887
3888         lp->lp_state |= LNET_PEER_PUSH_SENT;
3889         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3890         spin_unlock(&lp->lp_lock);
3891
3892         cpt = lnet_net_lock_current();
3893         pbuf = the_lnet.ln_ping_target;
3894         kref_get(&pbuf->pb_refcnt);
3895         lnet_net_unlock(cpt);
3896
3897         /* Push source MD */
3898         md.start     = &pbuf->pb_info;
3899         md.length    = pbuf->pb_nbytes;
3900         md.threshold = 2; /* Put/Ack */
3901         md.max_size  = 0;
3902         md.options   = LNET_MD_TRACK_RESPONSE;
3903         md.handler   = the_lnet.ln_dc_handler;
3904         md.user_ptr  = lp;
3905
3906         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3907         if (rc) {
3908                 kref_put(&pbuf->pb_refcnt, lnet_ping_buffer_free);
3909                 CERROR("Can't bind push source MD: %d\n", rc);
3910                 goto fail_error;
3911         }
3912
3913         cpt = lnet_net_lock_current();
3914         /* Refcount for MD. */
3915         lnet_peer_addref_locked(lp);
3916         id.pid = LNET_PID_LUSTRE;
3917         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3918                 id.nid = lp->lp_disc_dst_nid;
3919         else
3920                 id.nid = lp->lp_primary_nid;
3921         lnet_net_unlock(cpt);
3922
3923         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3924                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3925                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3926
3927         /*
3928          * reset the discovery nid. There is no need to restrict sending
3929          * from that source, if we call lnet_push_update_to_peers(). It'll
3930          * get set to a specific NID, if we initiate discovery from the
3931          * scratch
3932          */
3933         lp->lp_disc_src_nid = LNET_ANY_NID;
3934         lp->lp_disc_dst_nid = LNET_ANY_NID;
3935
3936         if (rc)
3937                 goto fail_unlink;
3938
3939         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3940
3941         spin_lock(&lp->lp_lock);
3942         return 0;
3943
3944 fail_unlink:
3945         LNetMDUnlink(lp->lp_push_mdh);
3946         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3947 fail_error:
3948         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3949                lp, rc);
3950         /*
3951          * The errors that get us here are considered hard errors and
3952          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3953          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3954          * because the unlink event handler will have set it if we
3955          * called LNetMDUnlink() above.
3956          */
3957         spin_lock(&lp->lp_lock);
3958         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3959         return rc;
3960 }
3961
3962 /*
3963  * Wait for work to be queued or some other change that must be
3964  * attended to. Returns non-zero if the discovery thread should shut
3965  * down.
3966  */
3967 static int lnet_peer_discovery_wait_for_work(void)
3968 {
3969         int cpt;
3970         int rc = 0;
3971
3972         DEFINE_WAIT(wait);
3973
3974         cpt = lnet_net_lock_current();
3975         for (;;) {
3976                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3977                                 TASK_INTERRUPTIBLE);
3978                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3979                         break;
3980                 if (lnet_push_target_resize_needed() ||
3981                     the_lnet.ln_push_target->pb_needs_post)
3982                         break;
3983                 if (!list_empty(&the_lnet.ln_dc_request))
3984                         break;
3985                 if (!list_empty(&the_lnet.ln_msg_resend))
3986                         break;
3987                 lnet_net_unlock(cpt);
3988
3989                 /*
3990                  * wakeup max every second to check if there are peers that
3991                  * have been stuck on the working queue for greater than
3992                  * the peer timeout.
3993                  */
3994                 schedule_timeout(cfs_time_seconds(1));
3995                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3996                 cpt = lnet_net_lock_current();
3997         }
3998         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3999
4000         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4001                 rc = -ESHUTDOWN;
4002
4003         lnet_net_unlock(cpt);
4004
4005         CDEBUG(D_NET, "woken: %d\n", rc);
4006
4007         return rc;
4008 }
4009
4010 /*
4011  * Messages that were pending on a destroyed peer will be put on a global
4012  * resend list. The message resend list will be checked by
4013  * the discovery thread when it wakes up, and will resend messages. These
4014  * messages can still be sendable in the case the lpni which was the initial
4015  * cause of the message re-queue was transfered to another peer.
4016  *
4017  * It is possible that LNet could be shutdown while we're iterating
4018  * through the list. lnet_shudown_lndnets() will attempt to access the
4019  * resend list, but will have to wait until the spinlock is released, by
4020  * which time there shouldn't be any more messages on the resend list.
4021  * During shutdown lnet_send() will fail and lnet_finalize() will be called
4022  * for the messages so they can be released. The other case is that
4023  * lnet_shudown_lndnets() can finalize all the messages before this
4024  * function can visit the resend list, in which case this function will be
4025  * a no-op.
4026  */
4027 static void lnet_resend_msgs(void)
4028 {
4029         struct lnet_msg *msg, *tmp;
4030         LIST_HEAD(resend);
4031         int rc;
4032
4033         spin_lock(&the_lnet.ln_msg_resend_lock);
4034         list_splice(&the_lnet.ln_msg_resend, &resend);
4035         spin_unlock(&the_lnet.ln_msg_resend_lock);
4036
4037         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
4038                 list_del_init(&msg->msg_list);
4039                 rc = lnet_send(&msg->msg_src_nid_param, msg,
4040                                &msg->msg_rtr_nid_param);
4041                 if (rc < 0) {
4042                         CNETERR("Error sending %s to %s: %d\n",
4043                                lnet_msgtyp2str(msg->msg_type),
4044                                libcfs_idstr(&msg->msg_target), rc);
4045                         lnet_finalize(msg, rc);
4046                 }
4047         }
4048 }
4049
4050 /* The discovery thread. */
4051 static int lnet_peer_discovery(void *arg)
4052 {
4053         struct lnet_peer *lp;
4054         int rc;
4055
4056         wait_for_completion(&the_lnet.ln_started);
4057
4058         CDEBUG(D_NET, "started\n");
4059
4060         for (;;) {
4061                 if (lnet_peer_discovery_wait_for_work())
4062                         break;
4063
4064                 if (lnet_push_target_resize_needed())
4065                         lnet_push_target_resize();
4066                 else if (the_lnet.ln_push_target->pb_needs_post)
4067                         lnet_push_target_post(the_lnet.ln_push_target,
4068                                               &the_lnet.ln_push_target_md);
4069
4070                 lnet_resend_msgs();
4071
4072                 lnet_net_lock(LNET_LOCK_EX);
4073                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4074                         lnet_net_unlock(LNET_LOCK_EX);
4075                         break;
4076                 }
4077
4078                 /*
4079                  * Process all incoming discovery work requests.  When
4080                  * discovery must wait on a peer to change state, it
4081                  * is added to the tail of the ln_dc_working queue. A
4082                  * timestamp keeps track of when the peer was added,
4083                  * so we can time out discovery requests that take too
4084                  * long.
4085                  */
4086                 while (!list_empty(&the_lnet.ln_dc_request)) {
4087                         lp = list_first_entry(&the_lnet.ln_dc_request,
4088                                               struct lnet_peer, lp_dc_list);
4089                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4090                         /*
4091                          * set the time the peer was put on the dc_working
4092                          * queue. It shouldn't remain on the queue
4093                          * forever, in case the GET message (for ping)
4094                          * doesn't get a REPLY or the PUT message (for
4095                          * push) doesn't get an ACK.
4096                          */
4097                         lp->lp_last_queued = ktime_get_real_seconds();
4098                         lnet_net_unlock(LNET_LOCK_EX);
4099
4100                         if (lnet_push_target_resize_needed())
4101                                 lnet_push_target_resize();
4102                         else if (the_lnet.ln_push_target->pb_needs_post)
4103                                 lnet_push_target_post(the_lnet.ln_push_target,
4104                                                       &the_lnet.ln_push_target_md);
4105
4106                         /*
4107                          * Select an action depending on the state of
4108                          * the peer and whether discovery is disabled.
4109                          * The check whether discovery is disabled is
4110                          * done after the code that handles processing
4111                          * for arrived data, cleanup for failures, and
4112                          * forcing a Ping or Push.
4113                          */
4114                         spin_lock(&lp->lp_lock);
4115                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4116                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4117                                 lp->lp_state);
4118                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4119                                             LNET_PEER_MARK_DELETED))
4120                                 rc = lnet_peer_deletion(lp);
4121                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4122                                 rc = lnet_peer_data_present(lp);
4123                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
4124                                 rc = lnet_peer_ping_failed(lp);
4125                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4126                                 rc = lnet_peer_push_failed(lp);
4127                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
4128                                 rc = lnet_peer_send_ping(lp);
4129                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4130                                 rc = lnet_peer_send_push(lp);
4131                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4132                                 rc = lnet_peer_send_ping(lp);
4133                         else if (lnet_peer_needs_push(lp))
4134                                 rc = lnet_peer_send_push(lp);
4135                         else
4136                                 rc = lnet_peer_discovered(lp);
4137                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4138                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4139                                 lp->lp_state, rc);
4140
4141                         if (rc == LNET_REDISCOVER_PEER) {
4142                                 spin_unlock(&lp->lp_lock);
4143                                 lnet_net_lock(LNET_LOCK_EX);
4144                                 list_move(&lp->lp_dc_list,
4145                                           &the_lnet.ln_dc_request);
4146                         } else if (rc ||
4147                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4148                                 spin_unlock(&lp->lp_lock);
4149                                 lnet_net_lock(LNET_LOCK_EX);
4150                                 lnet_peer_discovery_complete(lp, rc);
4151                         } else {
4152                                 spin_unlock(&lp->lp_lock);
4153                                 lnet_net_lock(LNET_LOCK_EX);
4154                         }
4155
4156                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4157                                 break;
4158
4159                 }
4160
4161                 lnet_net_unlock(LNET_LOCK_EX);
4162         }
4163
4164         CDEBUG(D_NET, "stopping\n");
4165         /*
4166          * Clean up before telling lnet_peer_discovery_stop() that
4167          * we're done. Use wake_up() below to somewhat reduce the
4168          * size of the thundering herd if there are multiple threads
4169          * waiting on discovery of a single peer.
4170          */
4171
4172         /* Queue cleanup 1: stop all pending pings and pushes. */
4173         lnet_net_lock(LNET_LOCK_EX);
4174         while (!list_empty(&the_lnet.ln_dc_working)) {
4175                 lp = list_first_entry(&the_lnet.ln_dc_working,
4176                                       struct lnet_peer, lp_dc_list);
4177                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4178                 lnet_net_unlock(LNET_LOCK_EX);
4179                 lnet_peer_cancel_discovery(lp);
4180                 lnet_net_lock(LNET_LOCK_EX);
4181         }
4182         lnet_net_unlock(LNET_LOCK_EX);
4183
4184         /* Queue cleanup 2: wait for the expired queue to clear. */
4185         while (!list_empty(&the_lnet.ln_dc_expired))
4186                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4187
4188         /* Queue cleanup 3: clear the request queue. */
4189         lnet_net_lock(LNET_LOCK_EX);
4190         while (!list_empty(&the_lnet.ln_dc_request)) {
4191                 lp = list_first_entry(&the_lnet.ln_dc_request,
4192                                       struct lnet_peer, lp_dc_list);
4193                 lnet_net_unlock(LNET_LOCK_EX);
4194                 spin_lock(&lp->lp_lock);
4195                 if (lp->lp_state & LNET_PEER_PING_FAILED)
4196                         (void)lnet_peer_ping_failed(lp);
4197                 if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4198                         (void)lnet_peer_push_failed(lp);
4199                 spin_unlock(&lp->lp_lock);
4200                 lnet_net_lock(LNET_LOCK_EX);
4201                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4202         }
4203         lnet_net_unlock(LNET_LOCK_EX);
4204
4205         the_lnet.ln_dc_handler = NULL;
4206
4207         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4208         wake_up(&the_lnet.ln_dc_waitq);
4209
4210         CDEBUG(D_NET, "stopped\n");
4211
4212         return 0;
4213 }
4214
4215 /* ln_api_mutex is held on entry. */
4216 int lnet_peer_discovery_start(void)
4217 {
4218         struct task_struct *task;
4219         int rc = 0;
4220
4221         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4222                 return -EALREADY;
4223
4224         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4225         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4226         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4227         if (IS_ERR(task)) {
4228                 rc = PTR_ERR(task);
4229                 CERROR("Can't start peer discovery thread: %d\n", rc);
4230
4231                 the_lnet.ln_dc_handler = NULL;
4232
4233                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4234         }
4235
4236         CDEBUG(D_NET, "discovery start: %d\n", rc);
4237
4238         return rc;
4239 }
4240
4241 /* ln_api_mutex is held on entry. */
4242 void lnet_peer_discovery_stop(void)
4243 {
4244         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4245                 return;
4246
4247         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4248         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4249
4250         /* In the LNetNIInit() path we may be stopping discovery before it
4251          * entered its work loop
4252          */
4253         if (!completion_done(&the_lnet.ln_started))
4254                 complete(&the_lnet.ln_started);
4255         else
4256                 wake_up(&the_lnet.ln_dc_waitq);
4257
4258         mutex_unlock(&the_lnet.ln_api_mutex);
4259         wait_event(the_lnet.ln_dc_waitq,
4260                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4261         mutex_lock(&the_lnet.ln_api_mutex);
4262
4263         LASSERT(list_empty(&the_lnet.ln_dc_request));
4264         LASSERT(list_empty(&the_lnet.ln_dc_working));
4265         LASSERT(list_empty(&the_lnet.ln_dc_expired));
4266
4267         CDEBUG(D_NET, "discovery stopped\n");
4268 }
4269
4270 /* Debugging */
4271
4272 void
4273 lnet_debug_peer(struct lnet_nid *nid)
4274 {
4275         char                    *aliveness = "NA";
4276         struct lnet_peer_ni     *lp;
4277         int                     cpt;
4278
4279         cpt = lnet_nid2cpt(nid, NULL);
4280         lnet_net_lock(cpt);
4281
4282         lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4283         if (IS_ERR(lp)) {
4284                 lnet_net_unlock(cpt);
4285                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4286                 return;
4287         }
4288
4289         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4290                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4291
4292         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4293                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4294                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4295                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4296                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4297
4298         lnet_peer_ni_decref_locked(lp);
4299
4300         lnet_net_unlock(cpt);
4301 }
4302
4303 /* Gathering information for userspace. */
4304
4305 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4306                           char aliveness[LNET_MAX_STR_LEN],
4307                           __u32 *cpt_iter, __u32 *refcount,
4308                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4309                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4310                           __u32 *peer_tx_qnob)
4311 {
4312         struct lnet_peer_table          *peer_table;
4313         struct lnet_peer_ni             *lp;
4314         int                             j;
4315         int                             lncpt;
4316         bool                            found = false;
4317
4318         /* get the number of CPTs */
4319         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4320
4321         /* if the cpt number to be examined is >= the number of cpts in
4322          * the system then indicate that there are no more cpts to examin
4323          */
4324         if (*cpt_iter >= lncpt)
4325                 return -ENOENT;
4326
4327         /* get the current table */
4328         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4329         /* if the ptable is NULL then there are no more cpts to examine */
4330         if (peer_table == NULL)
4331                 return -ENOENT;
4332
4333         lnet_net_lock(*cpt_iter);
4334
4335         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4336                 struct list_head *peers = &peer_table->pt_hash[j];
4337
4338                 list_for_each_entry(lp, peers, lpni_hashlist) {
4339                         if (!nid_is_nid4(&lp->lpni_nid))
4340                                 continue;
4341                         if (peer_index-- > 0)
4342                                 continue;
4343
4344                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4345                         if (lnet_isrouter(lp) ||
4346                                 lnet_peer_aliveness_enabled(lp))
4347                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4348                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4349
4350                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4351                         *refcount = kref_read(&lp->lpni_kref);
4352                         *ni_peer_tx_credits =
4353                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4354                         *peer_tx_credits = lp->lpni_txcredits;
4355                         *peer_rtr_credits = lp->lpni_rtrcredits;
4356                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4357                         *peer_tx_qnob = lp->lpni_txqnob;
4358
4359                         found = true;
4360                 }
4361
4362         }
4363         lnet_net_unlock(*cpt_iter);
4364
4365         *cpt_iter = lncpt;
4366
4367         return found ? 0 : -ENOENT;
4368 }
4369
4370 /* ln_api_mutex is held, which keeps the peer list stable */
4371 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4372 {
4373         struct lnet_ioctl_element_stats *lpni_stats;
4374         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4375         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4376         struct lnet_peer_ni_credit_info *lpni_info;
4377         struct lnet_peer_ni *lpni;
4378         struct lnet_peer *lp;
4379         lnet_nid_t nid4;
4380         struct lnet_nid nid;
4381         __u32 size;
4382         int rc;
4383
4384         lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4385         lp = lnet_find_peer(&nid);
4386         if (!lp) {
4387                 rc = -ENOENT;
4388                 goto out;
4389         }
4390
4391         size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4392                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4393         size *= lp->lp_nnis;
4394         if (size > cfg->prcfg_size) {
4395                 cfg->prcfg_size = size;
4396                 rc = -E2BIG;
4397                 goto out_lp_decref;
4398         }
4399
4400         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4401         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4402         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4403         cfg->prcfg_count = lp->lp_nnis;
4404         cfg->prcfg_size = size;
4405         cfg->prcfg_state = lp->lp_state;
4406
4407         /* Allocate helper buffers. */
4408         rc = -ENOMEM;
4409         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4410         if (!lpni_info)
4411                 goto out_lp_decref;
4412         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4413         if (!lpni_stats)
4414                 goto out_free_info;
4415         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4416         if (!lpni_msg_stats)
4417                 goto out_free_stats;
4418         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4419         if (!lpni_hstats)
4420                 goto out_free_msg_stats;
4421
4422
4423         lpni = NULL;
4424         rc = -EFAULT;
4425         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4426                 if (!nid_is_nid4(&lpni->lpni_nid))
4427                         continue;
4428                 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4429                 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4430                         goto out_free_hstats;
4431                 bulk += sizeof(nid4);
4432
4433                 memset(lpni_info, 0, sizeof(*lpni_info));
4434                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4435                 if (lnet_isrouter(lpni) ||
4436                         lnet_peer_aliveness_enabled(lpni))
4437                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4438                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4439
4440                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4441                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4442                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4443                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4444                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4445                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4446                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4447                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4448                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4449                         goto out_free_hstats;
4450                 bulk += sizeof(*lpni_info);
4451
4452                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4453                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4454                                                             LNET_STATS_TYPE_SEND);
4455                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4456                                                             LNET_STATS_TYPE_RECV);
4457                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4458                                                             LNET_STATS_TYPE_DROP);
4459                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4460                         goto out_free_hstats;
4461                 bulk += sizeof(*lpni_stats);
4462                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4463                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4464                         goto out_free_hstats;
4465                 bulk += sizeof(*lpni_msg_stats);
4466                 lpni_hstats->hlpni_network_timeout =
4467                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4468                 lpni_hstats->hlpni_remote_dropped =
4469                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4470                 lpni_hstats->hlpni_remote_timeout =
4471                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4472                 lpni_hstats->hlpni_remote_error =
4473                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4474                 lpni_hstats->hlpni_health_value =
4475                   atomic_read(&lpni->lpni_healthv);
4476                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4477                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4478                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4479                         goto out_free_hstats;
4480                 bulk += sizeof(*lpni_hstats);
4481         }
4482         rc = 0;
4483
4484 out_free_hstats:
4485         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4486 out_free_msg_stats:
4487         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4488 out_free_stats:
4489         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4490 out_free_info:
4491         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4492 out_lp_decref:
4493         lnet_peer_decref_locked(lp);
4494 out:
4495         return rc;
4496 }
4497
4498 /* must hold net_lock/0 */
4499 void
4500 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4501                                      struct list_head *recovery_queue,
4502                                      time64_t now)
4503 {
4504         /* the mt could've shutdown and cleaned up the queues */
4505         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4506                 return;
4507
4508         if (!list_empty(&lpni->lpni_recovery))
4509                 return;
4510
4511         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4512                 return;
4513
4514         if (!lpni->lpni_last_alive) {
4515                 CDEBUG(D_NET,
4516                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4517                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4518                        lpni->lpni_last_alive);
4519                 return;
4520         }
4521
4522         if (lnet_recovery_limit &&
4523             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4524                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4525                        libcfs_nidstr(&lpni->lpni_nid),
4526                        lpni->lpni_last_alive);
4527                 /* Reset the ping count so that if this peer NI is added back to
4528                  * the recovery queue we will send the first ping right away.
4529                  */
4530                 lpni->lpni_ping_count = 0;
4531                 return;
4532         }
4533
4534         /* This peer NI is going on the recovery queue, so take a ref on it */
4535         kref_get(&lpni->lpni_kref);
4536
4537
4538         lnet_peer_ni_set_next_ping(lpni, now);
4539
4540         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4541                libcfs_nidstr(&lpni->lpni_nid),
4542                lpni->lpni_ping_count,
4543                lpni->lpni_next_ping,
4544                lpni->lpni_last_alive,
4545                atomic_read(&lpni->lpni_healthv));
4546
4547         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4548 }
4549
4550 /* Call with the ln_api_mutex held */
4551 void
4552 lnet_peer_ni_set_healthv(struct lnet_nid *nid, int value, bool all)
4553 {
4554         struct lnet_peer_table *ptable;
4555         struct lnet_peer *lp;
4556         struct lnet_peer_net *lpn;
4557         struct lnet_peer_ni *lpni;
4558         int lncpt;
4559         int cpt;
4560         time64_t now;
4561
4562         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4563                 return;
4564
4565         now = ktime_get_seconds();
4566
4567         if (!all) {
4568                 lnet_net_lock(LNET_LOCK_EX);
4569                 lpni = lnet_peer_ni_find_locked(nid);
4570                 if (!lpni) {
4571                         lnet_net_unlock(LNET_LOCK_EX);
4572                         return;
4573                 }
4574                 lnet_set_lpni_healthv_locked(lpni, value);
4575                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4576                                              &the_lnet.ln_mt_peerNIRecovq, now);
4577                 lnet_peer_ni_decref_locked(lpni);
4578                 lnet_net_unlock(LNET_LOCK_EX);
4579                 return;
4580         }
4581
4582         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4583
4584         /*
4585          * Walk all the peers and reset the health value for each one to the
4586          * specified value.
4587          */
4588         lnet_net_lock(LNET_LOCK_EX);
4589         for (cpt = 0; cpt < lncpt; cpt++) {
4590                 ptable = the_lnet.ln_peer_tables[cpt];
4591                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4592                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4593                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4594                                                     lpni_peer_nis) {
4595                                         lnet_set_lpni_healthv_locked(lpni,
4596                                                                      value);
4597                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4598                                              &the_lnet.ln_mt_peerNIRecovq, now);
4599                                 }
4600                         }
4601                 }
4602         }
4603         lnet_net_unlock(LNET_LOCK_EX);
4604 }
4605