Whamcloud - gitweb
LU-16563 lnet: use discovered ni status to set initial health
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
51                             unsigned int flags);
52
53 static void
54 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
55 {
56         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
57                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
58                 lnet_peer_ni_decref_locked(lpni);
59         }
60 }
61
62 void
63 lnet_peer_net_added(struct lnet_net *net)
64 {
65         struct lnet_peer_ni *lpni, *tmp;
66
67         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
68                                  lpni_on_remote_peer_ni_list) {
69
70                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
71                         lpni->lpni_net = net;
72
73                         spin_lock(&lpni->lpni_lock);
74                         lpni->lpni_txcredits =
75                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
76                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
77                         lpni->lpni_rtrcredits =
78                                 lnet_peer_buffer_credits(lpni->lpni_net);
79                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
80                         spin_unlock(&lpni->lpni_lock);
81
82                         lnet_peer_remove_from_remote_list(lpni);
83                 }
84         }
85 }
86
87 static void
88 lnet_peer_tables_destroy(void)
89 {
90         struct lnet_peer_table  *ptable;
91         struct list_head        *hash;
92         int                     i;
93         int                     j;
94
95         if (!the_lnet.ln_peer_tables)
96                 return;
97
98         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
99                 hash = ptable->pt_hash;
100                 if (!hash) /* not intialized */
101                         break;
102
103                 LASSERT(list_empty(&ptable->pt_zombie_list));
104
105                 ptable->pt_hash = NULL;
106                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
107                         LASSERT(list_empty(&hash[j]));
108
109                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110         }
111
112         cfs_percpt_free(the_lnet.ln_peer_tables);
113         the_lnet.ln_peer_tables = NULL;
114 }
115
116 int
117 lnet_peer_tables_create(void)
118 {
119         struct lnet_peer_table  *ptable;
120         struct list_head        *hash;
121         int                     i;
122         int                     j;
123
124         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
125                                                    sizeof(*ptable));
126         if (the_lnet.ln_peer_tables == NULL) {
127                 CERROR("Failed to allocate cpu-partition peer tables\n");
128                 return -ENOMEM;
129         }
130
131         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
132                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
133                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
134                 if (hash == NULL) {
135                         CERROR("Failed to create peer hash table\n");
136                         lnet_peer_tables_destroy();
137                         return -ENOMEM;
138                 }
139
140                 spin_lock_init(&ptable->pt_zombie_lock);
141                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
142
143                 INIT_LIST_HEAD(&ptable->pt_peer_list);
144
145                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
146                         INIT_LIST_HEAD(&hash[j]);
147                 ptable->pt_hash = hash; /* sign of initialization */
148         }
149
150         return 0;
151 }
152
153 static struct lnet_peer_ni *
154 lnet_peer_ni_alloc(struct lnet_nid *nid)
155 {
156         struct lnet_peer_ni *lpni;
157         struct lnet_net *net;
158         int cpt;
159
160         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
161
162         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
163         if (!lpni)
164                 return NULL;
165
166         INIT_LIST_HEAD(&lpni->lpni_txq);
167         INIT_LIST_HEAD(&lpni->lpni_hashlist);
168         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
169         INIT_LIST_HEAD(&lpni->lpni_recovery);
170         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
171         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
172         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
173         kref_init(&lpni->lpni_kref);
174         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
175
176         spin_lock_init(&lpni->lpni_lock);
177
178         if (lnet_peers_start_down())
179                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
180         else
181                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
182         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
183         lpni->lpni_nid = *nid;
184         lpni->lpni_cpt = cpt;
185         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
186
187         net = lnet_get_net_locked(LNET_NID_NET(nid));
188         lpni->lpni_net = net;
189         if (net) {
190                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
191                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
192                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
193                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
194         } else {
195                 /*
196                  * This peer_ni is not on a local network, so we
197                  * cannot add the credits here. In case the net is
198                  * added later, add the peer_ni to the remote peer ni
199                  * list so it can be easily found and revisited.
200                  */
201                 /* FIXME: per-net implementation instead? */
202                 lnet_peer_ni_addref_locked(lpni);
203                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
204                               &the_lnet.ln_remote_peer_ni_list);
205         }
206
207         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
208
209         return lpni;
210 }
211
212 static struct lnet_peer_net *
213 lnet_peer_net_alloc(__u32 net_id)
214 {
215         struct lnet_peer_net *lpn;
216
217         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
218         if (!lpn)
219                 return NULL;
220
221         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
222         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
223         lpn->lpn_net_id = net_id;
224         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
225
226         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
227
228         return lpn;
229 }
230
231 void
232 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
233 {
234         struct lnet_peer *lp;
235
236         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
237
238         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
239         LASSERT(list_empty(&lpn->lpn_peer_nis));
240         LASSERT(list_empty(&lpn->lpn_peer_nets));
241         lp = lpn->lpn_peer;
242         lpn->lpn_peer = NULL;
243         LIBCFS_FREE(lpn, sizeof(*lpn));
244
245         lnet_peer_decref_locked(lp);
246 }
247
248 static struct lnet_peer *
249 lnet_peer_alloc(struct lnet_nid *nid)
250 {
251         struct lnet_peer *lp;
252
253         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
254         if (!lp)
255                 return NULL;
256
257         INIT_LIST_HEAD(&lp->lp_rtrq);
258         INIT_LIST_HEAD(&lp->lp_routes);
259         INIT_LIST_HEAD(&lp->lp_peer_list);
260         INIT_LIST_HEAD(&lp->lp_peer_nets);
261         INIT_LIST_HEAD(&lp->lp_dc_list);
262         INIT_LIST_HEAD(&lp->lp_dc_pendq);
263         INIT_LIST_HEAD(&lp->lp_rtr_list);
264         init_waitqueue_head(&lp->lp_dc_waitq);
265         spin_lock_init(&lp->lp_lock);
266         lp->lp_primary_nid = *nid;
267         lp->lp_disc_src_nid = LNET_ANY_NID;
268         lp->lp_disc_dst_nid = LNET_ANY_NID;
269         if (lnet_peers_start_down())
270                 lp->lp_alive = false;
271         else
272                 lp->lp_alive = true;
273
274         /*
275          * all peers created on a router should have health on
276          * if it's not already on.
277          */
278         if (the_lnet.ln_routing && !lnet_health_sensitivity)
279                 lp->lp_health_sensitivity = 1;
280
281         /*
282          * Turn off discovery for loopback peer. If you're creating a peer
283          * for the loopback interface then that was initiated when we
284          * attempted to send a message over the loopback. There is no need
285          * to ever use a different interface when sending messages to
286          * myself.
287          */
288         if (nid_is_lo0(nid))
289                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
290         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
291
292         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
293
294         return lp;
295 }
296
297 void
298 lnet_destroy_peer_locked(struct lnet_peer *lp)
299 {
300         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
301
302         LASSERT(atomic_read(&lp->lp_refcount) == 0);
303         LASSERT(lp->lp_rtr_refcount == 0);
304         LASSERT(list_empty(&lp->lp_peer_nets));
305         LASSERT(list_empty(&lp->lp_peer_list));
306         LASSERT(list_empty(&lp->lp_dc_list));
307
308         if (lp->lp_data)
309                 lnet_ping_buffer_decref(lp->lp_data);
310
311         /*
312          * if there are messages still on the pending queue, then make
313          * sure to queue them on the ln_msg_resend list so they can be
314          * resent at a later point if the discovery thread is still
315          * running.
316          * If the discovery thread has stopped, then the wakeup will be a
317          * no-op, and it is expected the lnet_shutdown_lndnets() will
318          * eventually be called, which will traverse this list and
319          * finalize the messages on the list.
320          * We can not resend them now because we're holding the cpt lock.
321          * Releasing the lock can cause an inconsistent state
322          */
323         spin_lock(&the_lnet.ln_msg_resend_lock);
324         spin_lock(&lp->lp_lock);
325         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
326         spin_unlock(&lp->lp_lock);
327         spin_unlock(&the_lnet.ln_msg_resend_lock);
328         wake_up(&the_lnet.ln_dc_waitq);
329
330         LIBCFS_FREE(lp, sizeof(*lp));
331 }
332
333 /*
334  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
335  * that peer_net, detach the peer_net from the peer.
336  *
337  * Call with lnet_net_lock/EX held
338  */
339 static void
340 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
341 {
342         struct lnet_peer_table *ptable;
343         struct lnet_peer_net *lpn;
344         struct lnet_peer *lp;
345
346         /*
347          * Belts and suspenders: gracefully handle teardown of a
348          * partially connected peer_ni.
349          */
350         lpn = lpni->lpni_peer_net;
351
352         list_del_init(&lpni->lpni_peer_nis);
353         /*
354          * If there are no lpni's left, we detach lpn from
355          * lp_peer_nets, so it cannot be found anymore.
356          */
357         if (list_empty(&lpn->lpn_peer_nis))
358                 list_del_init(&lpn->lpn_peer_nets);
359
360         /* Update peer NID count. */
361         lp = lpn->lpn_peer;
362         lp->lp_nnis--;
363
364         /*
365          * If there are no more peer nets, make the peer unfindable
366          * via the peer_tables.
367          *
368          * Otherwise, if the peer is DISCOVERED, tell discovery to
369          * take another look at it. This is a no-op if discovery for
370          * this peer did the detaching.
371          */
372         if (list_empty(&lp->lp_peer_nets)) {
373                 list_del_init(&lp->lp_peer_list);
374                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
375                 ptable->pt_peers--;
376         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
377                 /* Discovery isn't running, nothing to do here. */
378         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
379                 lnet_peer_queue_for_discovery(lp);
380                 wake_up(&the_lnet.ln_dc_waitq);
381         }
382         CDEBUG(D_NET, "peer %s NID %s\n",
383                 libcfs_nidstr(&lp->lp_primary_nid),
384                 libcfs_nidstr(&lpni->lpni_nid));
385 }
386
387 /* called with lnet_net_lock LNET_LOCK_EX held */
388 static int
389 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
390 {
391         struct lnet_peer_table *ptable = NULL;
392
393         /* don't remove a peer_ni if it's also a gateway */
394         if (lnet_isrouter(lpni) && !force) {
395                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
396                        libcfs_nidstr(&lpni->lpni_nid));
397                 return -EBUSY;
398         }
399
400         lnet_peer_remove_from_remote_list(lpni);
401
402         /* remove peer ni from the hash list. */
403         list_del_init(&lpni->lpni_hashlist);
404
405         /*
406          * indicate the peer is being deleted so the monitor thread can
407          * remove it from the recovery queue.
408          */
409         spin_lock(&lpni->lpni_lock);
410         lpni->lpni_state |= LNET_PEER_NI_DELETING;
411         spin_unlock(&lpni->lpni_lock);
412
413         /* decrement the ref count on the peer table */
414         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
415
416         /*
417          * The peer_ni can no longer be found with a lookup. But there
418          * can be current users, so keep track of it on the zombie
419          * list until the reference count has gone to zero.
420          *
421          * The last reference may be lost in a place where the
422          * lnet_net_lock locks only a single cpt, and that cpt may not
423          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
424          * has its own lock.
425          */
426         spin_lock(&ptable->pt_zombie_lock);
427         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
428         ptable->pt_zombies++;
429         spin_unlock(&ptable->pt_zombie_lock);
430
431         /* no need to keep this peer_ni on the hierarchy anymore */
432         lnet_peer_detach_peer_ni_locked(lpni);
433
434         /* remove hashlist reference on peer_ni */
435         lnet_peer_ni_decref_locked(lpni);
436
437         return 0;
438 }
439
440 void lnet_peer_uninit(void)
441 {
442         struct lnet_peer_ni *lpni, *tmp;
443
444         lnet_net_lock(LNET_LOCK_EX);
445
446         /* remove all peer_nis from the remote peer and the hash list */
447         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
448                                  lpni_on_remote_peer_ni_list)
449                 lnet_peer_ni_del_locked(lpni, false);
450
451         lnet_peer_tables_destroy();
452
453         lnet_net_unlock(LNET_LOCK_EX);
454 }
455
456 static int
457 lnet_peer_del_locked(struct lnet_peer *peer)
458 {
459         struct lnet_peer_ni *lpni = NULL, *lpni2;
460         int rc = 0, rc2 = 0;
461
462         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
463
464         spin_lock(&peer->lp_lock);
465         peer->lp_state |= LNET_PEER_MARK_DELETED;
466         spin_unlock(&peer->lp_lock);
467
468         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469         while (lpni != NULL) {
470                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
471                 rc = lnet_peer_ni_del_locked(lpni, false);
472                 if (rc != 0)
473                         rc2 = rc;
474                 lpni = lpni2;
475         }
476
477         return rc2;
478 }
479
480 /*
481  * Discovering this peer is taking too long. Cancel any Ping or Push
482  * that discovery is waiting on by unlinking the relevant MDs. The
483  * lnet_discovery_event_handler() will proceed from here and complete
484  * the cleanup.
485  */
486 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
487 {
488         struct lnet_handle_md ping_mdh;
489         struct lnet_handle_md push_mdh;
490
491         LNetInvalidateMDHandle(&ping_mdh);
492         LNetInvalidateMDHandle(&push_mdh);
493
494         spin_lock(&lp->lp_lock);
495         if (lp->lp_state & LNET_PEER_PING_SENT) {
496                 ping_mdh = lp->lp_ping_mdh;
497                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
498         }
499         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
500                 push_mdh = lp->lp_push_mdh;
501                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
502         }
503         spin_unlock(&lp->lp_lock);
504
505         if (!LNetMDHandleIsInvalid(ping_mdh))
506                 LNetMDUnlink(ping_mdh);
507         if (!LNetMDHandleIsInvalid(push_mdh))
508                 LNetMDUnlink(push_mdh);
509 }
510
511 static int
512 lnet_peer_del(struct lnet_peer *peer)
513 {
514         int rc;
515
516         lnet_peer_cancel_discovery(peer);
517         lnet_net_lock(LNET_LOCK_EX);
518         rc = lnet_peer_del_locked(peer);
519         lnet_net_unlock(LNET_LOCK_EX);
520
521         return rc;
522 }
523
524 /*
525  * Delete a NID from a peer. Call with ln_api_mutex held.
526  *
527  * Error codes:
528  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
529  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
530  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
531  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
532  */
533 static int
534 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
535                   unsigned int flags)
536 {
537         struct lnet_peer_ni *lpni;
538         struct lnet_nid primary_nid = lp->lp_primary_nid;
539         int rc = 0;
540         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
541
542         if (!(flags & LNET_PEER_CONFIGURED)) {
543                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
544                         rc = -EPERM;
545                         goto out;
546                 }
547         }
548
549         /* If we're asked to lock down the primary NID we shouldn't be
550          * deleting it
551          */
552         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
553             nid_same(&primary_nid, nid)) {
554                 rc = -EPERM;
555                 goto out;
556         }
557
558         lpni = lnet_peer_ni_find_locked(nid);
559         if (!lpni) {
560                 rc = -ENOENT;
561                 goto out;
562         }
563         lnet_peer_ni_decref_locked(lpni);
564         if (lp != lpni->lpni_peer_net->lpn_peer) {
565                 rc = -ECHILD;
566                 goto out;
567         }
568
569         /*
570          * This function only allows deletion of the primary NID if it
571          * is the only NID.
572          */
573         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
574                 rc = -EBUSY;
575                 goto out;
576         }
577
578         lnet_net_lock(LNET_LOCK_EX);
579
580         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
581                 struct lnet_peer_ni *lpni2;
582                 /* assign the next peer_ni to be the primary */
583                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
584                 LASSERT(lpni2);
585                 lp->lp_primary_nid = lpni2->lpni_nid;
586         }
587         rc = lnet_peer_ni_del_locked(lpni, force);
588
589         lnet_net_unlock(LNET_LOCK_EX);
590
591 out:
592         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
593                libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
594                flags, rc);
595
596         return rc;
597 }
598
599 static void
600 lnet_peer_table_cleanup_locked(struct lnet_net *net,
601                                struct lnet_peer_table *ptable)
602 {
603         int                      i;
604         struct lnet_peer_ni     *next;
605         struct lnet_peer_ni     *lpni;
606         struct lnet_peer        *peer;
607
608         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
609                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
610                                          lpni_hashlist) {
611                         if (net != NULL && net != lpni->lpni_net)
612                                 continue;
613
614                         peer = lpni->lpni_peer_net->lpn_peer;
615                         if (!nid_same(&peer->lp_primary_nid,
616                                        &lpni->lpni_nid)) {
617                                 lnet_peer_ni_del_locked(lpni, false);
618                                 continue;
619                         }
620                         /*
621                          * Removing the primary NID implies removing
622                          * the entire peer. Advance next beyond any
623                          * peer_ni that belongs to the same peer.
624                          */
625                         list_for_each_entry_from(next, &ptable->pt_hash[i],
626                                                  lpni_hashlist) {
627                                 if (next->lpni_peer_net->lpn_peer != peer)
628                                         break;
629                         }
630                         lnet_peer_del_locked(peer);
631                 }
632         }
633 }
634
635 static void
636 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
637 {
638         wait_var_event_warning(&ptable->pt_zombies,
639                                ptable->pt_zombies == 0,
640                                "Waiting for %d zombies on peer table\n",
641                                ptable->pt_zombies);
642 }
643
644 static void
645 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
646                                 struct lnet_peer_table *ptable)
647 {
648         struct lnet_peer_ni     *lp;
649         struct lnet_peer_ni     *tmp;
650         struct lnet_nid         gw_nid;
651         int                     i;
652
653         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
654                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
655                                          lpni_hashlist) {
656                         if (net != lp->lpni_net)
657                                 continue;
658
659                         if (!lnet_isrouter(lp))
660                                 continue;
661
662                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
663
664                         lnet_net_unlock(LNET_LOCK_EX);
665                         lnet_del_route(LNET_NET_ANY, &gw_nid);
666                         lnet_net_lock(LNET_LOCK_EX);
667                 }
668         }
669 }
670
671 void
672 lnet_peer_tables_cleanup(struct lnet_net *net)
673 {
674         int i;
675         struct lnet_peer_table *ptable;
676
677         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
678         /* If just deleting the peers for a NI, get rid of any routes these
679          * peers are gateways for. */
680         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
681                 lnet_net_lock(LNET_LOCK_EX);
682                 lnet_peer_table_del_rtrs_locked(net, ptable);
683                 lnet_net_unlock(LNET_LOCK_EX);
684         }
685
686         /* Start the cleanup process */
687         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
688                 lnet_net_lock(LNET_LOCK_EX);
689                 lnet_peer_table_cleanup_locked(net, ptable);
690                 lnet_net_unlock(LNET_LOCK_EX);
691         }
692
693         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
694                 lnet_peer_ni_finalize_wait(ptable);
695 }
696
697 static struct lnet_peer_ni *
698 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
699 {
700         struct list_head        *peers;
701         struct lnet_peer_ni     *lp;
702
703         if (the_lnet.ln_state != LNET_STATE_RUNNING)
704                 return NULL;
705
706         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
707         list_for_each_entry(lp, peers, lpni_hashlist) {
708                 if (nid_same(&lp->lpni_nid, nid)) {
709                         lnet_peer_ni_addref_locked(lp);
710                         return lp;
711                 }
712         }
713
714         return NULL;
715 }
716
717 struct lnet_peer_ni *
718 lnet_peer_ni_find_locked(struct lnet_nid *nid)
719 {
720         struct lnet_peer_ni *lpni;
721         struct lnet_peer_table *ptable;
722         int cpt;
723
724         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
725
726         ptable = the_lnet.ln_peer_tables[cpt];
727         lpni = lnet_get_peer_ni_locked(ptable, nid);
728
729         return lpni;
730 }
731
732 struct lnet_peer_ni *
733 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
734 {
735         struct lnet_peer_net *lpn;
736         struct lnet_peer_ni *lpni;
737
738         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
739         if (!lpn)
740                 return NULL;
741
742         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
743                 if (nid_same(&lpni->lpni_nid, nid))
744                         return lpni;
745         }
746
747         return NULL;
748 }
749
750 struct lnet_peer *
751 lnet_find_peer(struct lnet_nid *nid)
752 {
753         struct lnet_peer_ni *lpni;
754         struct lnet_peer *lp = NULL;
755         int cpt;
756
757         cpt = lnet_net_lock_current();
758         lpni = lnet_peer_ni_find_locked(nid);
759         if (lpni) {
760                 lp = lpni->lpni_peer_net->lpn_peer;
761                 lnet_peer_addref_locked(lp);
762                 lnet_peer_ni_decref_locked(lpni);
763         }
764         lnet_net_unlock(cpt);
765
766         return lp;
767 }
768
769 struct lnet_peer_net *
770 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
771 {
772         struct lnet_peer_net *net;
773
774         if (!prev_lpn_id) {
775                 /* no net id provided return the first net */
776                 net = list_first_entry_or_null(&lp->lp_peer_nets,
777                                                struct lnet_peer_net,
778                                                lpn_peer_nets);
779
780                 return net;
781         }
782
783         /* find the net after the one provided */
784         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
785                 if (net->lpn_net_id == prev_lpn_id) {
786                         /*
787                          * if we reached the end of the list loop to the
788                          * beginning.
789                          */
790                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
791                                 return list_first_entry_or_null(&lp->lp_peer_nets,
792                                                                 struct lnet_peer_net,
793                                                                 lpn_peer_nets);
794                         else
795                                 return list_next_entry(net, lpn_peer_nets);
796                 }
797         }
798
799         return NULL;
800 }
801
802 struct lnet_peer_ni *
803 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
804                              struct lnet_peer_net *peer_net,
805                              struct lnet_peer_ni *prev)
806 {
807         struct lnet_peer_ni *lpni;
808         struct lnet_peer_net *net = peer_net;
809
810         if (!prev) {
811                 if (!net) {
812                         if (list_empty(&peer->lp_peer_nets))
813                                 return NULL;
814
815                         net = list_first_entry(&peer->lp_peer_nets,
816                                                struct lnet_peer_net,
817                                                lpn_peer_nets);
818                 }
819                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
820                                         lpni_peer_nis);
821
822                 return lpni;
823         }
824
825         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
826                 /*
827                  * if you reached the end of the peer ni list and the peer
828                  * net is specified then there are no more peer nis in that
829                  * net.
830                  */
831                 if (net)
832                         return NULL;
833
834                 /*
835                  * we reached the end of this net ni list. move to the
836                  * next net
837                  */
838                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
839                     &peer->lp_peer_nets)
840                         /* no more nets and no more NIs. */
841                         return NULL;
842
843                 /* get the next net */
844                 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
845                                        struct lnet_peer_net,
846                                        lpn_peer_nets);
847                 /* get the ni on it */
848                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
849                                         lpni_peer_nis);
850
851                 return lpni;
852         }
853
854         /* there are more nis left */
855         lpni = list_first_entry(&prev->lpni_peer_nis,
856                                 struct lnet_peer_ni, lpni_peer_nis);
857
858         return lpni;
859 }
860
861 /* Call with the ln_api_mutex held */
862 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
863 {
864         struct lnet_process_id id;
865         struct lnet_peer_table *ptable;
866         struct lnet_peer *lp;
867         __u32 count = 0;
868         __u32 size = 0;
869         int lncpt;
870         int cpt;
871         __u32 i;
872         int rc;
873
874         rc = -ESHUTDOWN;
875         if (the_lnet.ln_state != LNET_STATE_RUNNING)
876                 goto done;
877
878         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
879
880         /*
881          * Count the number of peers, and return E2BIG if the buffer
882          * is too small. We'll also return the desired size.
883          */
884         rc = -E2BIG;
885         for (cpt = 0; cpt < lncpt; cpt++) {
886                 ptable = the_lnet.ln_peer_tables[cpt];
887                 count += ptable->pt_peers;
888         }
889         size = count * sizeof(*ids);
890         if (size > *sizep)
891                 goto done;
892
893         /*
894          * Walk the peer lists and copy out the primary nids.
895          * This is safe because the peer lists are only modified
896          * while the ln_api_mutex is held. So we don't need to
897          * hold the lnet_net_lock as well, and can therefore
898          * directly call copy_to_user().
899          */
900         rc = -EFAULT;
901         memset(&id, 0, sizeof(id));
902         id.pid = LNET_PID_LUSTRE;
903         i = 0;
904         for (cpt = 0; cpt < lncpt; cpt++) {
905                 ptable = the_lnet.ln_peer_tables[cpt];
906                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
907                         if (!nid_is_nid4(&lp->lp_primary_nid))
908                                 continue;
909                         if (i >= count)
910                                 goto done;
911                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
912                         if (copy_to_user(&ids[i], &id, sizeof(id)))
913                                 goto done;
914                         i++;
915                 }
916         }
917         rc = 0;
918 done:
919         *countp = count;
920         *sizep = size;
921         return rc;
922 }
923
924 /*
925  * Start pushes to peers that need to be updated for a configuration
926  * change on this node.
927  */
928 void
929 lnet_push_update_to_peers(int force)
930 {
931         struct lnet_peer_table *ptable;
932         struct lnet_peer *lp;
933         int lncpt;
934         int cpt;
935
936         lnet_net_lock(LNET_LOCK_EX);
937         if (lnet_peer_discovery_disabled)
938                 force = 0;
939         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
940         for (cpt = 0; cpt < lncpt; cpt++) {
941                 ptable = the_lnet.ln_peer_tables[cpt];
942                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
943                         if (force) {
944                                 spin_lock(&lp->lp_lock);
945                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
946                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
947                                 spin_unlock(&lp->lp_lock);
948                         }
949                         if (lnet_peer_needs_push(lp))
950                                 lnet_peer_queue_for_discovery(lp);
951                 }
952         }
953         lnet_net_unlock(LNET_LOCK_EX);
954         wake_up(&the_lnet.ln_dc_waitq);
955 }
956
957 /* find the NID in the preferred gateways for the remote peer
958  * return:
959  *      false: list is not empty and NID is not preferred
960  *      false: list is empty
961  *      true: nid is found in the list
962  */
963 bool
964 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
965                              struct lnet_nid *gw_nid)
966 {
967         struct lnet_nid_list *ne;
968
969         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
970                libcfs_nidstr(&lpni->lpni_nid),
971                list_empty(&lpni->lpni_rtr_pref_nids));
972
973         if (list_empty(&lpni->lpni_rtr_pref_nids))
974                 return false;
975
976         /* iterate through all the preferred NIDs and see if any of them
977          * matches the provided gw_nid
978          */
979         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
980                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
981                        libcfs_nidstr(&ne->nl_nid),
982                        libcfs_nidstr(gw_nid));
983                 if (nid_same(&ne->nl_nid, gw_nid))
984                         return true;
985         }
986
987         return false;
988 }
989
990 void
991 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
992 {
993         struct list_head zombies;
994         struct lnet_nid_list *ne;
995         struct lnet_nid_list *tmp;
996         int cpt = lpni->lpni_cpt;
997
998         INIT_LIST_HEAD(&zombies);
999
1000         lnet_net_lock(cpt);
1001         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1002         lnet_net_unlock(cpt);
1003
1004         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1005                 list_del(&ne->nl_list);
1006                 LIBCFS_FREE(ne, sizeof(*ne));
1007         }
1008 }
1009
1010 int
1011 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1012                        struct lnet_nid *gw_nid)
1013 {
1014         int cpt = lpni->lpni_cpt;
1015         struct lnet_nid_list *ne = NULL;
1016
1017         /* This function is called with api_mutex held. When the api_mutex
1018          * is held the list can not be modified, as it is only modified as
1019          * a result of applying a UDSP and that happens under api_mutex
1020          * lock.
1021          */
1022         __must_hold(&the_lnet.ln_api_mutex);
1023
1024         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1025                 if (nid_same(&ne->nl_nid, gw_nid))
1026                         return -EEXIST;
1027         }
1028
1029         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1030         if (!ne)
1031                 return -ENOMEM;
1032
1033         ne->nl_nid = *gw_nid;
1034
1035         /* Lock the cpt to protect against addition and checks in the
1036          * selection algorithm
1037          */
1038         lnet_net_lock(cpt);
1039         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1040         lnet_net_unlock(cpt);
1041
1042         return 0;
1043 }
1044
1045 /*
1046  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1047  * this is a preferred point-to-point path. Call with lnet_net_lock in
1048  * shared mmode.
1049  */
1050 bool
1051 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1052 {
1053         struct lnet_nid_list *ne;
1054
1055         if (lpni->lpni_pref_nnids == 0)
1056                 return false;
1057         if (lpni->lpni_pref_nnids == 1)
1058                 return nid_same(&lpni->lpni_pref.nid, nid);
1059         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1060                 if (nid_same(&ne->nl_nid, nid))
1061                         return true;
1062         }
1063         return false;
1064 }
1065
1066 /*
1067  * Set a single ni as preferred, provided no preferred ni is already
1068  * defined. Only to be used for non-multi-rail peer_ni.
1069  */
1070 int
1071 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1072                                   struct lnet_nid *nid)
1073 {
1074         int rc = 0;
1075
1076         if (!nid)
1077                 return -EINVAL;
1078         spin_lock(&lpni->lpni_lock);
1079         if (LNET_NID_IS_ANY(nid)) {
1080                 rc = -EINVAL;
1081         } else if (lpni->lpni_pref_nnids > 0) {
1082                 rc = -EPERM;
1083         } else if (lpni->lpni_pref_nnids == 0) {
1084                 lpni->lpni_pref.nid = *nid;
1085                 lpni->lpni_pref_nnids = 1;
1086                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1087         }
1088         spin_unlock(&lpni->lpni_lock);
1089
1090         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1091                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1092         return rc;
1093 }
1094
1095 /*
1096  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1097  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1098  */
1099 int
1100 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1101 {
1102         int rc = 0;
1103
1104         spin_lock(&lpni->lpni_lock);
1105         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1106                 lpni->lpni_pref_nnids = 0;
1107                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1108         } else if (lpni->lpni_pref_nnids == 0) {
1109                 rc = -ENOENT;
1110         } else {
1111                 rc = -EPERM;
1112         }
1113         spin_unlock(&lpni->lpni_lock);
1114
1115         CDEBUG(D_NET, "peer %s: %d\n",
1116                libcfs_nidstr(&lpni->lpni_nid), rc);
1117         return rc;
1118 }
1119
1120 void
1121 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1122 {
1123         lpni->lpni_sel_priority = priority;
1124 }
1125
1126 /*
1127  * Clear the preferred NIDs from a non-multi-rail peer.
1128  */
1129 void
1130 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1131 {
1132         struct lnet_peer_ni *lpni = NULL;
1133
1134         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1135                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1136 }
1137
1138 int
1139 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1140 {
1141         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1142         struct lnet_nid_list *ne1 = NULL;
1143         struct lnet_nid_list *ne2 = NULL;
1144         struct lnet_nid *tmp_nid = NULL;
1145         int rc = 0;
1146
1147         if (LNET_NID_IS_ANY(nid)) {
1148                 rc = -EINVAL;
1149                 goto out;
1150         }
1151
1152         if (lpni->lpni_pref_nnids == 1 &&
1153             nid_same(&lpni->lpni_pref.nid, nid)) {
1154                 rc = -EEXIST;
1155                 goto out;
1156         }
1157
1158         /* A non-MR node may have only one preferred NI per peer_ni */
1159         if (lpni->lpni_pref_nnids > 0 &&
1160             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1161                 rc = -EPERM;
1162                 goto out;
1163         }
1164
1165         /* add the new preferred nid to the list of preferred nids */
1166         if (lpni->lpni_pref_nnids != 0) {
1167                 size_t alloc_size = sizeof(*ne1);
1168
1169                 if (lpni->lpni_pref_nnids == 1) {
1170                         tmp_nid = &lpni->lpni_pref.nid;
1171                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1172                 }
1173
1174                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1175                         if (nid_same(&ne1->nl_nid, nid)) {
1176                                 rc = -EEXIST;
1177                                 goto out;
1178                         }
1179                 }
1180
1181                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1182                                  alloc_size);
1183                 if (!ne1) {
1184                         rc = -ENOMEM;
1185                         goto out;
1186                 }
1187
1188                 /* move the originally stored nid to the list */
1189                 if (lpni->lpni_pref_nnids == 1) {
1190                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1191                                 lpni->lpni_cpt, alloc_size);
1192                         if (!ne2) {
1193                                 rc = -ENOMEM;
1194                                 goto out;
1195                         }
1196                         INIT_LIST_HEAD(&ne2->nl_list);
1197                         ne2->nl_nid = *tmp_nid;
1198                 }
1199                 ne1->nl_nid = *nid;
1200         }
1201
1202         lnet_net_lock(LNET_LOCK_EX);
1203         spin_lock(&lpni->lpni_lock);
1204         if (lpni->lpni_pref_nnids == 0) {
1205                 lpni->lpni_pref.nid = *nid;
1206         } else {
1207                 if (ne2)
1208                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1209                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1210         }
1211         lpni->lpni_pref_nnids++;
1212         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1213         spin_unlock(&lpni->lpni_lock);
1214         lnet_net_unlock(LNET_LOCK_EX);
1215
1216 out:
1217         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1218                 spin_lock(&lpni->lpni_lock);
1219                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1220                 spin_unlock(&lpni->lpni_lock);
1221         }
1222         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1223                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1224         return rc;
1225 }
1226
1227 int
1228 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1229 {
1230         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1231         struct lnet_nid_list *ne = NULL;
1232         int rc = 0;
1233
1234         if (lpni->lpni_pref_nnids == 0) {
1235                 rc = -ENOENT;
1236                 goto out;
1237         }
1238
1239         if (lpni->lpni_pref_nnids == 1) {
1240                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1241                         rc = -ENOENT;
1242                         goto out;
1243                 }
1244         } else {
1245                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1246                         if (nid_same(&ne->nl_nid, nid))
1247                                 goto remove_nid_entry;
1248                 }
1249                 rc = -ENOENT;
1250                 ne = NULL;
1251                 goto out;
1252         }
1253
1254 remove_nid_entry:
1255         lnet_net_lock(LNET_LOCK_EX);
1256         spin_lock(&lpni->lpni_lock);
1257         if (lpni->lpni_pref_nnids == 1)
1258                 lpni->lpni_pref.nid = LNET_ANY_NID;
1259         else {
1260                 list_del_init(&ne->nl_list);
1261                 if (lpni->lpni_pref_nnids == 2) {
1262                         struct lnet_nid_list *ne, *tmp;
1263
1264                         list_for_each_entry_safe(ne, tmp,
1265                                                  &lpni->lpni_pref.nids,
1266                                                  nl_list) {
1267                                 lpni->lpni_pref.nid = ne->nl_nid;
1268                                 list_del_init(&ne->nl_list);
1269                                 LIBCFS_FREE(ne, sizeof(*ne));
1270                         }
1271                 }
1272         }
1273         lpni->lpni_pref_nnids--;
1274         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1275         spin_unlock(&lpni->lpni_lock);
1276         lnet_net_unlock(LNET_LOCK_EX);
1277
1278         if (ne)
1279                 LIBCFS_FREE(ne, sizeof(*ne));
1280 out:
1281         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1282                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1283         return rc;
1284 }
1285
1286 void
1287 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1288 {
1289         struct list_head zombies;
1290         struct lnet_nid_list *ne;
1291         struct lnet_nid_list *tmp;
1292
1293         INIT_LIST_HEAD(&zombies);
1294
1295         lnet_net_lock(LNET_LOCK_EX);
1296         if (lpni->lpni_pref_nnids == 1)
1297                 lpni->lpni_pref.nid = LNET_ANY_NID;
1298         else if (lpni->lpni_pref_nnids > 1)
1299                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1300         lpni->lpni_pref_nnids = 0;
1301         lnet_net_unlock(LNET_LOCK_EX);
1302
1303         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1304                 list_del_init(&ne->nl_list);
1305                 LIBCFS_FREE(ne, sizeof(*ne));
1306         }
1307 }
1308
1309 void
1310 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1311 {
1312         struct lnet_peer_ni *lpni;
1313
1314         *result = *nid;
1315         lpni = lnet_peer_ni_find_locked(nid);
1316         if (lpni) {
1317                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1318                 lnet_peer_ni_decref_locked(lpni);
1319         }
1320 }
1321
1322 bool
1323 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1324 __must_hold(&lp->lp_lock)
1325 {
1326         if (lnet_peer_discovery_disabled)
1327                 return true;
1328
1329         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1330             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1331                 return true;
1332         }
1333
1334         return false;
1335 }
1336
1337 /*
1338  * Peer Discovery
1339  */
1340 bool
1341 lnet_is_discovery_disabled(struct lnet_peer *lp)
1342 {
1343         bool rc = false;
1344
1345         spin_lock(&lp->lp_lock);
1346         rc = lnet_is_discovery_disabled_locked(lp);
1347         spin_unlock(&lp->lp_lock);
1348
1349         return rc;
1350 }
1351
1352 int
1353 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1354 {
1355         struct lnet_nid pnid = LNET_ANY_NID;
1356         bool mr;
1357         int i, rc;
1358
1359         if (!nids || num_nids < 1)
1360                 return -EINVAL;
1361
1362         rc = LNetNIInit(LNET_PID_ANY);
1363         if (rc < 0)
1364                 return rc;
1365
1366         mutex_lock(&the_lnet.ln_api_mutex);
1367
1368         mr = lnet_peer_discovery_disabled == 0;
1369
1370         rc = 0;
1371         for (i = 0; i < num_nids; i++) {
1372                 struct lnet_nid nid;
1373
1374                 if (nids[i] == LNET_NID_LO_0)
1375                         continue;
1376
1377                 lnet_nid4_to_nid(nids[i], &nid);
1378                 if (LNET_NID_IS_ANY(&pnid)) {
1379                         lnet_nid4_to_nid(nids[i], &pnid);
1380                         rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr,
1381                                               LNET_PEER_LOCK_PRIMARY);
1382                         if (rc == -EALREADY) {
1383                                 struct lnet_peer *lp;
1384
1385                                 CDEBUG(D_NET, "A peer exists for NID %s\n",
1386                                        libcfs_nidstr(&pnid));
1387                                 rc = 0;
1388                                 /* Adds a refcount */
1389                                 lp = lnet_find_peer(&pnid);
1390                                 LASSERT(lp);
1391                                 pnid = lp->lp_primary_nid;
1392                                 /* Drop refcount from lookup */
1393                                 lnet_peer_decref_locked(lp);
1394                         }
1395                 } else if (lnet_peer_discovery_disabled) {
1396                         lnet_nid4_to_nid(nids[i], &nid);
1397                         rc = lnet_add_peer_ni(&nid, &LNET_ANY_NID, mr,
1398                                               LNET_PEER_LOCK_PRIMARY);
1399                 } else {
1400                         lnet_nid4_to_nid(nids[i], &nid);
1401                         rc = lnet_add_peer_ni(&pnid, &nid, mr,
1402                                               LNET_PEER_LOCK_PRIMARY);
1403                 }
1404
1405                 if (rc && rc != -EEXIST)
1406                         goto unlock;
1407         }
1408
1409 unlock:
1410         mutex_unlock(&the_lnet.ln_api_mutex);
1411
1412         LNetNIFini();
1413
1414         return rc == -EEXIST ? 0 : rc;
1415 }
1416 EXPORT_SYMBOL(LNetAddPeer);
1417
1418 void LNetPrimaryNID(struct lnet_nid *nid)
1419 {
1420         struct lnet_peer *lp;
1421         struct lnet_peer_ni *lpni;
1422         struct lnet_nid orig;
1423         int rc = 0;
1424         int cpt;
1425
1426         if (!nid || nid_is_lo0(nid))
1427                 return;
1428         orig = *nid;
1429
1430         cpt = lnet_net_lock_current();
1431         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1432         if (IS_ERR(lpni)) {
1433                 rc = PTR_ERR(lpni);
1434                 goto out_unlock;
1435         }
1436         lp = lpni->lpni_peer_net->lpn_peer;
1437
1438         /* If discovery is disabled locally then we needn't bother running
1439          * discovery here because discovery will not modify whatever
1440          * primary NID is currently set for this peer. If the specified peer is
1441          * down then this discovery can introduce long delays into the mount
1442          * process, so skip it if it isn't necessary.
1443          */
1444         spin_lock(&lp->lp_lock);
1445         if (!lnet_peer_discovery_disabled &&
1446             (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY) ||
1447              !lnet_peer_is_uptodate_locked(lp))) {
1448                 /* force a full discovery cycle */
1449                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH |
1450                                 LNET_PEER_LOCK_PRIMARY;
1451                 spin_unlock(&lp->lp_lock);
1452
1453                 /* start discovery in the background. Messages to that
1454                  * peer will not go through until the discovery is
1455                  * complete
1456                  */
1457                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1458                 if (rc)
1459                         goto out_decref;
1460                 /* The lpni (or lp) for this NID may have changed and our ref is
1461                  * the only thing keeping the old one around. Release the ref
1462                  * and lookup the lpni again
1463                  */
1464                 lnet_peer_ni_decref_locked(lpni);
1465                 lpni = lnet_peer_ni_find_locked(nid);
1466                 if (!lpni) {
1467                         rc = -ENOENT;
1468                         goto out_unlock;
1469                 }
1470                 lp = lpni->lpni_peer_net->lpn_peer;
1471         } else {
1472                 spin_unlock(&lp->lp_lock);
1473         }
1474         *nid = lp->lp_primary_nid;
1475 out_decref:
1476         lnet_peer_ni_decref_locked(lpni);
1477 out_unlock:
1478         lnet_net_unlock(cpt);
1479
1480         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1481                libcfs_nidstr(nid), rc);
1482 }
1483 EXPORT_SYMBOL(LNetPrimaryNID);
1484
1485 struct lnet_peer_net *
1486 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1487 {
1488         struct lnet_peer_net *peer_net;
1489         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1490                 if (peer_net->lpn_net_id == net_id)
1491                         return peer_net;
1492         }
1493         return NULL;
1494 }
1495
1496 /*
1497  * Attach a peer_ni to a peer_net and peer. This function assumes
1498  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1499  * may be attached to a different peer, in which case it will be
1500  * properly detached first. The whole operation is done atomically.
1501  *
1502  * This function consumes the reference on lpni and Always returns 0.
1503  * This is the last function called from functions that do return an
1504  * int, so returning 0 here allows the compiler to do a tail call.
1505  */
1506 static int
1507 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1508                          struct lnet_peer_net *lpn,
1509                          struct lnet_peer_ni *lpni,
1510                          unsigned flags)
1511 {
1512         struct lnet_peer_table *ptable;
1513         bool new_lpn = false;
1514         int rc;
1515
1516         /* Install the new peer_ni */
1517         lnet_net_lock(LNET_LOCK_EX);
1518         /* Add peer_ni to global peer table hash, if necessary. */
1519         if (list_empty(&lpni->lpni_hashlist)) {
1520                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1521
1522                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1523                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1524                 ptable->pt_version++;
1525                 lnet_peer_ni_addref_locked(lpni);
1526         }
1527
1528         /* Detach the peer_ni from an existing peer, if necessary. */
1529         if (lpni->lpni_peer_net) {
1530                 LASSERT(lpni->lpni_peer_net != lpn);
1531                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1532                 lnet_peer_detach_peer_ni_locked(lpni);
1533                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1534                 lpni->lpni_peer_net = NULL;
1535         }
1536
1537         /* Add peer_ni to peer_net */
1538         lpni->lpni_peer_net = lpn;
1539         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1540                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1541         else
1542                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1543         lnet_update_peer_net_healthv(lpni);
1544         lnet_peer_net_addref_locked(lpn);
1545
1546         /* Add peer_net to peer */
1547         if (!lpn->lpn_peer) {
1548                 new_lpn = true;
1549                 lpn->lpn_peer = lp;
1550                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1551                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1552                 else
1553                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1554                 lnet_peer_addref_locked(lp);
1555         }
1556
1557         /* Add peer to global peer list, if necessary */
1558         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1559         if (list_empty(&lp->lp_peer_list)) {
1560                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1561                 ptable->pt_peers++;
1562         }
1563
1564
1565         /* Update peer state */
1566         spin_lock(&lp->lp_lock);
1567         if (flags & LNET_PEER_CONFIGURED) {
1568                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1569                         lp->lp_state |= LNET_PEER_CONFIGURED;
1570         }
1571         if (flags & LNET_PEER_MULTI_RAIL) {
1572                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1573                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1574                         lnet_peer_clr_non_mr_pref_nids(lp);
1575                 }
1576         }
1577         if (flags & LNET_PEER_LOCK_PRIMARY)
1578                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1579         spin_unlock(&lp->lp_lock);
1580
1581         lp->lp_nnis++;
1582
1583         /* apply UDSPs */
1584         if (new_lpn) {
1585                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1586                 if (rc)
1587                         CERROR("Failed to apply UDSPs on lpn %s\n",
1588                                libcfs_net2str(lpn->lpn_net_id));
1589         }
1590         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1591         if (rc)
1592                 CERROR("Failed to apply UDSPs on lpni %s\n",
1593                        libcfs_nidstr(&lpni->lpni_nid));
1594
1595         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1596                libcfs_nidstr(&lp->lp_primary_nid),
1597                libcfs_nidstr(&lpni->lpni_nid), flags);
1598         lnet_peer_ni_decref_locked(lpni);
1599         lnet_net_unlock(LNET_LOCK_EX);
1600
1601         return 0;
1602 }
1603
1604 /*
1605  * Create a new peer, with nid as its primary nid.
1606  *
1607  * Call with the lnet_api_mutex held.
1608  */
1609 static int
1610 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1611 {
1612         struct lnet_peer *lp;
1613         struct lnet_peer_net *lpn;
1614         struct lnet_peer_ni *lpni;
1615         int rc = 0;
1616
1617         LASSERT(nid);
1618
1619         /*
1620          * No need for the lnet_net_lock here, because the
1621          * lnet_api_mutex is held.
1622          */
1623         lpni = lnet_peer_ni_find_locked(nid);
1624         if (lpni) {
1625                 /* A peer with this NID already exists. */
1626                 lp = lpni->lpni_peer_net->lpn_peer;
1627                 lnet_peer_ni_decref_locked(lpni);
1628                 /*
1629                  * This is an error if the peer was configured and the
1630                  * primary NID differs or an attempt is made to change
1631                  * the Multi-Rail flag. Otherwise the assumption is
1632                  * that an existing peer is being modified.
1633                  */
1634                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1635                         if (!nid_same(&lp->lp_primary_nid, nid))
1636                                 rc = -EEXIST;
1637                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1638                                 rc = -EPERM;
1639                         goto out;
1640                 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1641                         if (nid_same(&lp->lp_primary_nid, nid)) {
1642                                 rc = -EEXIST;
1643                                 goto out;
1644                         }
1645                         /* we're trying to recreate an existing peer which
1646                          * has already been created and its primary
1647                          * locked. This is likely due to two servers
1648                          * existing on the same node. So we'll just refer
1649                          * to that node with the primary NID which was
1650                          * first added by Lustre
1651                          */
1652                         rc = -EALREADY;
1653                         goto out;
1654                 }
1655                 /* Delete and recreate the peer.
1656                  * We can get here:
1657                  * 1. If the peer is being recreated as a configured NID
1658                  * 2. if there already exists a peer which
1659                  *    was discovered manually, but is recreated via Lustre
1660                  *    with PRIMARY_lock
1661                  */
1662                 rc = lnet_peer_del(lp);
1663                 if (rc)
1664                         goto out;
1665         }
1666
1667         /* Create peer, peer_net, and peer_ni. */
1668         rc = -ENOMEM;
1669         lp = lnet_peer_alloc(nid);
1670         if (!lp)
1671                 goto out;
1672         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1673         if (!lpn)
1674                 goto out_free_lp;
1675         lpni = lnet_peer_ni_alloc(nid);
1676         if (!lpni)
1677                 goto out_free_lpn;
1678
1679         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1680
1681 out_free_lpn:
1682         LIBCFS_FREE(lpn, sizeof(*lpn));
1683 out_free_lp:
1684         LIBCFS_FREE(lp, sizeof(*lp));
1685 out:
1686         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1687                libcfs_nidstr(nid), flags, rc);
1688         return rc;
1689 }
1690
1691 /*
1692  * Add a NID to a peer. Call with ln_api_mutex held.
1693  *
1694  * Error codes:
1695  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1696  *  -EEXIST:   The NID was configured by DLC for a different peer.
1697  *  -ENOMEM:   Out of memory.
1698  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1699  *             non-multi-rail peer.
1700  */
1701 static int
1702 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1703                   unsigned int flags)
1704 {
1705         struct lnet_peer_net *lpn;
1706         struct lnet_peer_ni *lpni;
1707         int rc = 0;
1708
1709         LASSERT(lp);
1710         LASSERT(nid);
1711
1712         /* A configured peer can only be updated through configuration. */
1713         if (!(flags & LNET_PEER_CONFIGURED)) {
1714                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1715                         rc = -EPERM;
1716                         goto out;
1717                 }
1718         }
1719
1720         /*
1721          * The MULTI_RAIL flag can be set but not cleared, because
1722          * that would leave the peer struct in an invalid state.
1723          */
1724         if (flags & LNET_PEER_MULTI_RAIL) {
1725                 spin_lock(&lp->lp_lock);
1726                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1727                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1728                         lnet_peer_clr_non_mr_pref_nids(lp);
1729                 }
1730                 spin_unlock(&lp->lp_lock);
1731         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1732                 rc = -EPERM;
1733                 goto out;
1734         }
1735
1736         lpni = lnet_peer_ni_find_locked(nid);
1737         if (lpni) {
1738                 /*
1739                  * A peer_ni already exists. This is only a problem if
1740                  * it is not connected to this peer and was configured
1741                  * by DLC.
1742                  */
1743                 if (lpni->lpni_peer_net->lpn_peer == lp)
1744                         goto out_free_lpni;
1745                 if (lnet_peer_ni_is_configured(lpni)) {
1746                         rc = -EEXIST;
1747                         goto out_free_lpni;
1748                 }
1749                 /* If this is the primary NID, destroy the peer. */
1750                 if (lnet_peer_ni_is_primary(lpni)) {
1751                         struct lnet_peer *lp2 =
1752                                 lpni->lpni_peer_net->lpn_peer;
1753                         int rtr_refcount = lp2->lp_rtr_refcount;
1754
1755                         /* If the new peer that this NID belongs to is
1756                          * a primary NID for another peer which we're
1757                          * suppose to preserve the Primary for then we
1758                          * don't want to mess with it. But the
1759                          * configuration is wrong at this point, so we
1760                          * should flag both of these peers as in a bad
1761                          * state
1762                          */
1763                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY) {
1764                                 spin_lock(&lp->lp_lock);
1765                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1766                                 spin_unlock(&lp->lp_lock);
1767                                 spin_lock(&lp2->lp_lock);
1768                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1769                                 spin_unlock(&lp2->lp_lock);
1770                                 goto out_free_lpni;
1771                         }
1772                         /*
1773                          * if we're trying to delete a router it means
1774                          * we're moving this peer NI to a new peer so must
1775                          * transfer router properties to the new peer
1776                          */
1777                         if (rtr_refcount > 0) {
1778                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1779                                 lnet_rtr_transfer_to_peer(lp2, lp);
1780                         }
1781                         lnet_peer_del(lp2);
1782                         lnet_peer_ni_decref_locked(lpni);
1783                         lpni = lnet_peer_ni_alloc(nid);
1784                         if (!lpni) {
1785                                 rc = -ENOMEM;
1786                                 goto out_free_lpni;
1787                         }
1788                 }
1789         } else {
1790                 lpni = lnet_peer_ni_alloc(nid);
1791                 if (!lpni) {
1792                         rc = -ENOMEM;
1793                         goto out_free_lpni;
1794                 }
1795         }
1796
1797         /*
1798          * Get the peer_net. Check that we're not adding a second
1799          * peer_ni on a peer_net of a non-multi-rail peer.
1800          */
1801         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1802         if (!lpn) {
1803                 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1804                 if (!lpn) {
1805                         rc = -ENOMEM;
1806                         goto out_free_lpni;
1807                 }
1808         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1809                 rc = -ENOTUNIQ;
1810                 goto out_free_lpni;
1811         }
1812
1813         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1814
1815 out_free_lpni:
1816         lnet_peer_ni_decref_locked(lpni);
1817 out:
1818         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1819                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1820                flags, rc);
1821         return rc;
1822 }
1823
1824 /*
1825  * Update the primary NID of a peer, if possible.
1826  *
1827  * Call with the lnet_api_mutex held.
1828  */
1829 static int
1830 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1831                           unsigned int flags)
1832 {
1833         struct lnet_nid old = lp->lp_primary_nid;
1834         int rc = 0;
1835
1836         if (nid_same(&lp->lp_primary_nid, nid))
1837                 goto out;
1838
1839         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1840                 lp->lp_primary_nid = *nid;
1841
1842         rc = lnet_peer_add_nid(lp, nid, flags);
1843         if (rc) {
1844                 lp->lp_primary_nid = old;
1845                 goto out;
1846         }
1847 out:
1848         /* if this is a configured peer or the primary for that peer has
1849          * been locked, then we don't want to flag this scenario as
1850          * a failure
1851          */
1852         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1853             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1854                 return 0;
1855
1856         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1857                libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1858
1859         return rc;
1860 }
1861
1862 /*
1863  * lpni creation initiated due to traffic either sending or receiving.
1864  * Callers must hold ln_api_mutex
1865  * Ref taken on lnet_peer_ni returned by this function
1866  */
1867 static struct lnet_peer_ni *
1868 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1869 __must_hold(&the_lnet.ln_api_mutex)
1870 {
1871         struct lnet_peer *lp = NULL;
1872         struct lnet_peer_net *lpn = NULL;
1873         struct lnet_peer_ni *lpni;
1874         unsigned flags = 0;
1875         int rc = 0;
1876
1877         if (LNET_NID_IS_ANY(nid)) {
1878                 rc = -EINVAL;
1879                 goto out_err;
1880         }
1881
1882         /* lnet_net_lock is not needed here because ln_api_lock is held */
1883         lpni = lnet_peer_ni_find_locked(nid);
1884         if (lpni) {
1885                 /*
1886                  * We must have raced with another thread. Since we
1887                  * know next to nothing about a peer_ni created by
1888                  * traffic, we just assume everything is ok and
1889                  * return.
1890                  */
1891                 goto out;
1892         }
1893
1894         /* Create peer, peer_net, and peer_ni. */
1895         rc = -ENOMEM;
1896         lp = lnet_peer_alloc(nid);
1897         if (!lp)
1898                 goto out_err;
1899         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1900         if (!lpn)
1901                 goto out_err;
1902         lpni = lnet_peer_ni_alloc(nid);
1903         if (!lpni)
1904                 goto out_err;
1905         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1906
1907         /* lnet_peer_attach_peer_ni() always returns 0 */
1908         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1909
1910         lnet_peer_ni_addref_locked(lpni);
1911
1912 out_err:
1913         if (rc) {
1914                 if (lpn)
1915                         LIBCFS_FREE(lpn, sizeof(*lpn));
1916                 if (lp)
1917                         LIBCFS_FREE(lp, sizeof(*lp));
1918                 lpni = ERR_PTR(rc);
1919         }
1920 out:
1921         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1922         return lpni;
1923 }
1924
1925 /*
1926  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1927  *
1928  * This API handles the following combinations:
1929  *   Create a peer with its primary NI if only the prim_nid is provided
1930  *   Add a NID to a peer identified by the prim_nid. The peer identified
1931  *   by the prim_nid must already exist.
1932  *   The peer being created may be non-MR.
1933  *
1934  * The caller must hold ln_api_mutex. This prevents the peer from
1935  * being created/modified/deleted by a different thread.
1936  */
1937 static int
1938 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
1939                  unsigned int flags)
1940 __must_hold(&the_lnet.ln_api_mutex)
1941 {
1942         struct lnet_peer *lp = NULL;
1943         struct lnet_peer_ni *lpni;
1944
1945         /* The prim_nid must always be specified */
1946         if (LNET_NID_IS_ANY(prim_nid))
1947                 return -EINVAL;
1948
1949         if (mr)
1950                 flags |= LNET_PEER_MULTI_RAIL;
1951
1952         /*
1953          * If nid isn't specified, we must create a new peer with
1954          * prim_nid as its primary nid.
1955          */
1956         if (LNET_NID_IS_ANY(nid))
1957                 return lnet_peer_add(prim_nid, flags);
1958
1959         /* Look up the prim_nid, which must exist. */
1960         lpni = lnet_peer_ni_find_locked(prim_nid);
1961         if (!lpni)
1962                 return -ENOENT;
1963         lnet_peer_ni_decref_locked(lpni);
1964         lp = lpni->lpni_peer_net->lpn_peer;
1965
1966         /* Primary NID must match */
1967         if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
1968                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1969                        libcfs_nidstr(prim_nid),
1970                        libcfs_nidstr(&lp->lp_primary_nid));
1971                 return -ENODEV;
1972         }
1973
1974         /* Multi-Rail flag must match. */
1975         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1976                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1977                        libcfs_nidstr(prim_nid));
1978                 return -EPERM;
1979         }
1980
1981         if ((flags & LNET_PEER_LOCK_PRIMARY) &&
1982             (lnet_peer_is_uptodate(lp) && (lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1983                 CDEBUG(D_NET,
1984                        "Don't add temporary peer NI for uptodate peer %s\n",
1985                        libcfs_nidstr(&lp->lp_primary_nid));
1986                 return -EINVAL;
1987         }
1988
1989         return lnet_peer_add_nid(lp, nid, flags);
1990 }
1991
1992 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
1993                           bool mr, bool lock_prim)
1994 {
1995         int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
1996
1997         return lnet_add_peer_ni(prim_nid, nid, mr, fl);
1998 }
1999
2000 static int
2001 lnet_reset_peer(struct lnet_peer *lp)
2002 {
2003         struct lnet_peer_net *lpn, *lpntmp;
2004         struct lnet_peer_ni *lpni, *lpnitmp;
2005         unsigned int flags;
2006         int rc;
2007
2008         lnet_peer_cancel_discovery(lp);
2009
2010         flags = LNET_PEER_CONFIGURED;
2011         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2012                 flags |= LNET_PEER_MULTI_RAIL;
2013
2014         list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2015                 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2016                                          lpni_peer_nis) {
2017                         if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2018                                 continue;
2019
2020                         rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2021                         if (rc) {
2022                                 CERROR("Failed to delete %s from peer %s\n",
2023                                        libcfs_nidstr(&lpni->lpni_nid),
2024                                        libcfs_nidstr(&lp->lp_primary_nid));
2025                         }
2026                 }
2027         }
2028
2029         /* mark it for discovery the next time we use it */
2030         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2031         return 0;
2032 }
2033
2034 /*
2035  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2036  *
2037  * This API handles the following combinations:
2038  *   Delete a NI from a peer if both prim_nid and nid are provided.
2039  *   Delete a peer if only prim_nid is provided.
2040  *   Delete a peer if its primary nid is provided.
2041  *
2042  * The caller must hold ln_api_mutex. This prevents the peer from
2043  * being modified/deleted by a different thread.
2044  */
2045 int
2046 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2047                  int force)
2048 {
2049         struct lnet_peer *lp;
2050         struct lnet_peer_ni *lpni;
2051         unsigned int flags;
2052
2053         if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2054                 return -EINVAL;
2055
2056         lpni = lnet_peer_ni_find_locked(prim_nid);
2057         if (!lpni)
2058                 return -ENOENT;
2059         lnet_peer_ni_decref_locked(lpni);
2060         lp = lpni->lpni_peer_net->lpn_peer;
2061
2062         if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2063                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2064                        libcfs_nidstr(prim_nid),
2065                        libcfs_nidstr(&lp->lp_primary_nid));
2066                 return -ENODEV;
2067         }
2068
2069         lnet_net_lock(LNET_LOCK_EX);
2070         if (lp->lp_rtr_refcount > 0) {
2071                 lnet_net_unlock(LNET_LOCK_EX);
2072                 CERROR("%s is a router. Can not be deleted\n",
2073                        libcfs_nidstr(prim_nid));
2074                 return -EBUSY;
2075         }
2076         lnet_net_unlock(LNET_LOCK_EX);
2077
2078         if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2079                 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2080                         CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2081                                libcfs_nidstr(&lp->lp_primary_nid));
2082                         return lnet_reset_peer(lp);
2083                 } else {
2084                         return lnet_peer_del(lp);
2085                 }
2086         }
2087
2088         flags = LNET_PEER_CONFIGURED;
2089         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2090                 flags |= LNET_PEER_MULTI_RAIL;
2091
2092         return lnet_peer_del_nid(lp, nid, flags);
2093 }
2094
2095 void
2096 lnet_destroy_peer_ni_locked(struct kref *ref)
2097 {
2098         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2099                                                  lpni_kref);
2100         struct lnet_peer_table *ptable;
2101         struct lnet_peer_net *lpn;
2102
2103         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2104
2105         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2106         LASSERT(list_empty(&lpni->lpni_txq));
2107         LASSERT(lpni->lpni_txqnob == 0);
2108         LASSERT(list_empty(&lpni->lpni_peer_nis));
2109         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2110
2111         lpn = lpni->lpni_peer_net;
2112         lpni->lpni_peer_net = NULL;
2113         lpni->lpni_net = NULL;
2114
2115         if (!list_empty(&lpni->lpni_hashlist)) {
2116                 /* remove the peer ni from the zombie list */
2117                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2118                 spin_lock(&ptable->pt_zombie_lock);
2119                 list_del_init(&lpni->lpni_hashlist);
2120                 ptable->pt_zombies--;
2121                 spin_unlock(&ptable->pt_zombie_lock);
2122         }
2123
2124         if (lpni->lpni_pref_nnids > 1) {
2125                 struct lnet_nid_list *ne, *tmp;
2126
2127                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2128                                          nl_list) {
2129                         list_del_init(&ne->nl_list);
2130                         LIBCFS_FREE(ne, sizeof(*ne));
2131                 }
2132         }
2133         LIBCFS_FREE(lpni, sizeof(*lpni));
2134
2135         if (lpn)
2136                 lnet_peer_net_decref_locked(lpn);
2137 }
2138
2139 struct lnet_peer_ni *
2140 lnet_nid2peerni_ex(struct lnet_nid *nid)
2141 __must_hold(&the_lnet.ln_api_mutex)
2142 {
2143         struct lnet_peer_ni *lpni = NULL;
2144
2145         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2146                 return ERR_PTR(-ESHUTDOWN);
2147
2148         /*
2149          * find if a peer_ni already exists.
2150          * If so then just return that.
2151          */
2152         lpni = lnet_peer_ni_find_locked(nid);
2153         if (lpni)
2154                 return lpni;
2155
2156         lnet_net_unlock(LNET_LOCK_EX);
2157
2158         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2159
2160         lnet_net_lock(LNET_LOCK_EX);
2161
2162         return lpni;
2163 }
2164
2165 /*
2166  * Get a peer_ni for the given nid, create it if necessary. Takes a
2167  * hold on the peer_ni.
2168  */
2169 struct lnet_peer_ni *
2170 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2171                         struct lnet_nid *pref, int cpt)
2172 {
2173         struct lnet_peer_ni *lpni = NULL;
2174
2175         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2176                 return ERR_PTR(-ESHUTDOWN);
2177
2178         /*
2179          * find if a peer_ni already exists.
2180          * If so then just return that.
2181          */
2182         lpni = lnet_peer_ni_find_locked(nid);
2183         if (lpni)
2184                 return lpni;
2185
2186         /*
2187          * Slow path:
2188          * use the lnet_api_mutex to serialize the creation of the peer_ni
2189          * and the creation/deletion of the local ni/net. When a local ni is
2190          * created, if there exists a set of peer_nis on that network,
2191          * they need to be traversed and updated. When a local NI is
2192          * deleted, which could result in a network being deleted, then
2193          * all peer nis on that network need to be removed as well.
2194          *
2195          * Creation through traffic should also be serialized with
2196          * creation through DLC.
2197          */
2198         lnet_net_unlock(cpt);
2199         mutex_lock(&the_lnet.ln_api_mutex);
2200         /*
2201          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2202          * check here is sufficent.
2203          */
2204         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2205                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2206
2207         mutex_unlock(&the_lnet.ln_api_mutex);
2208         lnet_net_lock(cpt);
2209
2210         /* Lock has been dropped, check again for shutdown. */
2211         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2212                 if (!IS_ERR_OR_NULL(lpni))
2213                         lnet_peer_ni_decref_locked(lpni);
2214                 lpni = ERR_PTR(-ESHUTDOWN);
2215         }
2216
2217         return lpni;
2218 }
2219
2220 bool
2221 lnet_peer_gw_discovery(struct lnet_peer *lp)
2222 {
2223         bool rc = false;
2224
2225         spin_lock(&lp->lp_lock);
2226         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2227                 rc = true;
2228         spin_unlock(&lp->lp_lock);
2229
2230         return rc;
2231 }
2232
2233 bool
2234 lnet_peer_is_uptodate(struct lnet_peer *lp)
2235 {
2236         bool rc;
2237
2238         spin_lock(&lp->lp_lock);
2239         rc = lnet_peer_is_uptodate_locked(lp);
2240         spin_unlock(&lp->lp_lock);
2241         return rc;
2242 }
2243
2244 /*
2245  * Is a peer uptodate from the point of view of discovery?
2246  *
2247  * If it is currently being processed, obviously not.
2248  * A forced Ping or Push is also handled by the discovery thread.
2249  *
2250  * Otherwise look at whether the peer needs rediscovering.
2251  */
2252 bool
2253 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2254 __must_hold(&lp->lp_lock)
2255 {
2256         bool rc;
2257
2258         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2259                             LNET_PEER_FORCE_PING |
2260                             LNET_PEER_FORCE_PUSH)) {
2261                 rc = false;
2262         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2263                 rc = false;
2264         } else if (lnet_peer_needs_push(lp)) {
2265                 rc = false;
2266         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2267                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2268                         rc = true;
2269                 else
2270                         rc = false;
2271         } else {
2272                 rc = false;
2273         }
2274
2275         return rc;
2276 }
2277
2278 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2279 void
2280 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2281 {
2282         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2283          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2284          * when adding to the list and queuing the peer to ensure that we do not
2285          * strand any messages on the lp_dc_pendq. This scheme ensures the
2286          * message will be resent even if the peer is already being discovered.
2287          * Therefore we needn't check the return value of
2288          * lnet_peer_queue_for_discovery(lp).
2289          */
2290         lnet_net_lock(LNET_LOCK_EX);
2291         spin_lock(&lp->lp_lock);
2292         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2293         spin_unlock(&lp->lp_lock);
2294         lnet_peer_queue_for_discovery(lp);
2295         lnet_net_unlock(LNET_LOCK_EX);
2296 }
2297
2298 /*
2299  * Queue a peer for the attention of the discovery thread.  Call with
2300  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2301  * -EALREADY if the peer was already queued.
2302  */
2303 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2304 {
2305         int rc;
2306
2307         spin_lock(&lp->lp_lock);
2308         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2309                 lp->lp_state |= LNET_PEER_DISCOVERING;
2310         spin_unlock(&lp->lp_lock);
2311         if (list_empty(&lp->lp_dc_list)) {
2312                 lnet_peer_addref_locked(lp);
2313                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2314                 wake_up(&the_lnet.ln_dc_waitq);
2315                 rc = 0;
2316         } else {
2317                 rc = -EALREADY;
2318         }
2319
2320         CDEBUG(D_NET, "Queue peer %s: %d\n",
2321                libcfs_nidstr(&lp->lp_primary_nid), rc);
2322
2323         return rc;
2324 }
2325
2326 /*
2327  * Discovery of a peer is complete. Wake all waiters on the peer.
2328  * Call with lnet_net_lock/EX held.
2329  */
2330 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2331 {
2332         struct lnet_msg *msg, *tmp;
2333         int rc = 0;
2334         LIST_HEAD(pending_msgs);
2335
2336         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2337                libcfs_nidstr(&lp->lp_primary_nid));
2338
2339         spin_lock(&lp->lp_lock);
2340         /* Our caller dropped lp_lock which may have allowed another thread to
2341          * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2342          * Ensure it is cleared.
2343          */
2344         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2345         if (dc_error) {
2346                 lp->lp_dc_error = dc_error;
2347                 lp->lp_state |= LNET_PEER_REDISCOVER;
2348         }
2349         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2350         spin_unlock(&lp->lp_lock);
2351         list_del_init(&lp->lp_dc_list);
2352         wake_up(&lp->lp_dc_waitq);
2353
2354         if (lp->lp_rtr_refcount > 0)
2355                 lnet_router_discovery_complete(lp);
2356
2357         lnet_net_unlock(LNET_LOCK_EX);
2358
2359         /* iterate through all pending messages and send them again */
2360         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2361                 list_del_init(&msg->msg_list);
2362                 if (dc_error) {
2363                         lnet_finalize(msg, dc_error);
2364                         continue;
2365                 }
2366
2367                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2368                        lnet_msgtyp2str(msg->msg_type),
2369                        libcfs_idstr(&msg->msg_target));
2370                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2371                                &msg->msg_rtr_nid_param);
2372                 if (rc < 0) {
2373                         CNETERR("Error sending %s to %s: %d\n",
2374                                lnet_msgtyp2str(msg->msg_type),
2375                                libcfs_idstr(&msg->msg_target), rc);
2376                         lnet_finalize(msg, rc);
2377                 }
2378         }
2379         lnet_net_lock(LNET_LOCK_EX);
2380         lnet_peer_decref_locked(lp);
2381 }
2382
2383 /*
2384  * Handle inbound push.
2385  * Like any event handler, called with lnet_res_lock/CPT held.
2386  */
2387 void lnet_peer_push_event(struct lnet_event *ev)
2388 {
2389         struct lnet_ping_buffer *pbuf;
2390         struct lnet_peer *lp;
2391         int infobytes;
2392
2393         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2394
2395         /* lnet_find_peer() adds a refcount */
2396         lp = lnet_find_peer(&ev->source.nid);
2397         if (!lp) {
2398                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2399                        libcfs_nidstr(&ev->initiator.nid),
2400                        libcfs_nidstr(&ev->source.nid));
2401                 pbuf->pb_needs_post = true;
2402                 return;
2403         }
2404
2405         /* Ensure peer state remains consistent while we modify it. */
2406         spin_lock(&lp->lp_lock);
2407
2408         /*
2409          * If some kind of error happened the contents of the message
2410          * cannot be used. Clear the NIDS_UPTODATE and set the
2411          * FORCE_PING flag to trigger a ping.
2412          */
2413         if (ev->status) {
2414                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2415                 lp->lp_state |= LNET_PEER_FORCE_PING;
2416                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2417                        ev->status,
2418                        libcfs_nidstr(&lp->lp_primary_nid),
2419                        libcfs_nidstr(&ev->source.nid));
2420                 goto out;
2421         }
2422
2423         /*
2424          * A push with invalid or corrupted info. Clear the UPTODATE
2425          * flag to trigger a ping.
2426          */
2427         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2428                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2429                 lp->lp_state |= LNET_PEER_FORCE_PING;
2430                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2431                        libcfs_nidstr(&lp->lp_primary_nid));
2432                 goto out;
2433         }
2434
2435         /* Make sure we'll allocate the correct size ping buffer when
2436          * pinging the peer.
2437          */
2438         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2439         if (lp->lp_data_bytes < infobytes)
2440                 lp->lp_data_bytes = infobytes;
2441
2442         /*
2443          * A non-Multi-Rail peer is not supposed to be capable of
2444          * sending a push.
2445          */
2446         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2447                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2448                        libcfs_nidstr(&lp->lp_primary_nid));
2449                 goto out;
2450         }
2451
2452         /*
2453          * The peer may have discovery disabled at its end. Set
2454          * NO_DISCOVERY as appropriate.
2455          */
2456         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2457                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2458                        libcfs_nidstr(&lp->lp_primary_nid));
2459                 /*
2460                  * Mark the peer for deletion if we already know about it
2461                  * and it's going from discovery set to no discovery set
2462                  */
2463                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2464                                       LNET_PEER_DISCOVERING)) &&
2465                      lp->lp_state & LNET_PEER_DISCOVERED) {
2466                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2467                                libcfs_nidstr(&lp->lp_primary_nid),
2468                                lp->lp_state);
2469                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2470                 }
2471                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2472         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2473                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2474                        libcfs_nidstr(&lp->lp_primary_nid));
2475                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2476         }
2477
2478         /*
2479          * Update the MULTI_RAIL flag based on the push. If the peer
2480          * was configured with DLC then the setting should match what
2481          * DLC put in.
2482          * NB: We verified above that the MR feature bit is set in pi_features
2483          */
2484         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2485                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2486                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2487         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2488                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2489                       libcfs_nidstr(&lp->lp_primary_nid));
2490         } else if (lnet_peer_discovery_disabled) {
2491                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2492                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2493         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2494                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2495                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2496         } else {
2497                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2498                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2499                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2500                 lnet_peer_clr_non_mr_pref_nids(lp);
2501         }
2502
2503         /* Check for truncation of the Put message. Clear the
2504          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2505          * and tell discovery to allocate a bigger buffer.
2506          */
2507         if (ev->mlength < ev->rlength) {
2508                 if (the_lnet.ln_push_target_nbytes < infobytes)
2509                         the_lnet.ln_push_target_nbytes = infobytes;
2510                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2511                 lp->lp_state |= LNET_PEER_FORCE_PING;
2512                 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2513                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2514                 goto out;
2515         }
2516
2517         /* always assume new data */
2518         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2519         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2520
2521         /* If there is data present that hasn't been processed yet,
2522          * we'll replace it if the Put contained newer data and it
2523          * fits. We're racing with a Ping or earlier Push in this
2524          * case.
2525          */
2526         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2527                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2528                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2529                     infobytes <= lp->lp_data->pb_nbytes) {
2530                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2531                                infobytes);
2532                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2533                               libcfs_nidstr(&lp->lp_primary_nid),
2534                               LNET_PING_BUFFER_SEQNO(pbuf),
2535                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2536                 }
2537                 goto out;
2538         }
2539
2540         /*
2541          * Allocate a buffer to copy the data. On a failure we drop
2542          * the Push and set FORCE_PING to force the discovery
2543          * thread to fix the problem by pinging the peer.
2544          */
2545         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2546         if (!lp->lp_data) {
2547                 lp->lp_state |= LNET_PEER_FORCE_PING;
2548                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2549                        libcfs_nidstr(&lp->lp_primary_nid),
2550                        LNET_PING_BUFFER_SEQNO(pbuf));
2551                 goto out;
2552         }
2553
2554         /* Success */
2555         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2556                       FLEXIBLE_OBJECT);
2557         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2558         CDEBUG(D_NET, "Received Push %s %u\n",
2559                libcfs_nidstr(&lp->lp_primary_nid),
2560                LNET_PING_BUFFER_SEQNO(pbuf));
2561
2562 out:
2563         /* We've processed this buffer. It can be reposted */
2564         pbuf->pb_needs_post = true;
2565
2566         /*
2567          * Queue the peer for discovery if not done, force it on the request
2568          * queue and wake the discovery thread if the peer was already queued,
2569          * because its status changed.
2570          */
2571         spin_unlock(&lp->lp_lock);
2572         lnet_net_lock(LNET_LOCK_EX);
2573         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2574                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2575                 wake_up(&the_lnet.ln_dc_waitq);
2576         }
2577         /* Drop refcount from lookup */
2578         lnet_peer_decref_locked(lp);
2579         lnet_net_unlock(LNET_LOCK_EX);
2580 }
2581
2582 /*
2583  * Clear the discovery error state, unless we're already discovering
2584  * this peer, in which case the error is current.
2585  */
2586 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2587 {
2588         spin_lock(&lp->lp_lock);
2589         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2590                 lp->lp_dc_error = 0;
2591         spin_unlock(&lp->lp_lock);
2592 }
2593
2594 /*
2595  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2596  * dropped/retaken within this function. An lnet_peer_ni is passed in
2597  * because discovery could tear down an lnet_peer.
2598  */
2599 int
2600 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2601 {
2602         DEFINE_WAIT(wait);
2603         struct lnet_peer *lp;
2604         int rc = 0;
2605         int count = 0;
2606
2607 again:
2608         lnet_net_unlock(cpt);
2609         lnet_net_lock(LNET_LOCK_EX);
2610         lp = lpni->lpni_peer_net->lpn_peer;
2611         lnet_peer_clear_discovery_error(lp);
2612
2613         /*
2614          * We're willing to be interrupted. The lpni can become a
2615          * zombie if we race with DLC, so we must check for that.
2616          */
2617         for (;;) {
2618                 /* Keep lp alive when the lnet_net_lock is unlocked */
2619                 lnet_peer_addref_locked(lp);
2620                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2621                 if (signal_pending(current))
2622                         break;
2623                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2624                         break;
2625                 /*
2626                  * Don't repeat discovery if discovery is disabled. This is
2627                  * done to ensure we can use discovery as a standard ping as
2628                  * well for backwards compatibility with routers which do not
2629                  * have discovery or have discovery disabled
2630                  */
2631                 if (lnet_is_discovery_disabled(lp) && count > 0)
2632                         break;
2633                 if (lp->lp_dc_error)
2634                         break;
2635                 if (lnet_peer_is_uptodate(lp))
2636                         break;
2637                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2638                         break;
2639                 lnet_peer_queue_for_discovery(lp);
2640                 count++;
2641                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2642
2643                 /*
2644                  * If caller requested a non-blocking operation then
2645                  * return immediately. Once discovery is complete any
2646                  * pending messages that were stopped due to discovery
2647                  * will be transmitted.
2648                  */
2649                 if (!block)
2650                         break;
2651
2652                 lnet_net_unlock(LNET_LOCK_EX);
2653                 schedule();
2654                 finish_wait(&lp->lp_dc_waitq, &wait);
2655                 lnet_net_lock(LNET_LOCK_EX);
2656                 lnet_peer_decref_locked(lp);
2657                 /* Peer may have changed */
2658                 lp = lpni->lpni_peer_net->lpn_peer;
2659         }
2660         finish_wait(&lp->lp_dc_waitq, &wait);
2661
2662         lnet_net_unlock(LNET_LOCK_EX);
2663         lnet_net_lock(cpt);
2664         lnet_peer_decref_locked(lp);
2665         /*
2666          * The peer may have changed, so re-check and rediscover if that turns
2667          * out to have been the case. The reference count on lp ensured that
2668          * even if it was unlinked from lpni the memory could not be recycled.
2669          * Thus the check below is sufficient to determine whether the peer
2670          * changed. If the peer changed, then lp must not be dereferenced.
2671          */
2672         if (lp != lpni->lpni_peer_net->lpn_peer)
2673                 goto again;
2674
2675         if (signal_pending(current))
2676                 rc = -EINTR;
2677         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2678                 rc = -ESHUTDOWN;
2679         else if (lp->lp_dc_error)
2680                 rc = lp->lp_dc_error;
2681         else if (!block)
2682                 CDEBUG(D_NET, "non-blocking discovery\n");
2683         else if (!lnet_peer_is_uptodate(lp) &&
2684                  !(lnet_is_discovery_disabled(lp) ||
2685                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2686                 goto again;
2687
2688         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2689                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2690                libcfs_nidstr(&lpni->lpni_nid), rc,
2691                (!block) ? "pending discovery" : "discovery complete");
2692
2693         return rc;
2694 }
2695
2696 /* Handle an incoming ack for a push. */
2697 static void
2698 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2699 {
2700         struct lnet_ping_buffer *pbuf;
2701
2702         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2703         spin_lock(&lp->lp_lock);
2704         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2705         lp->lp_push_error = ev->status;
2706         if (ev->status)
2707                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2708         else
2709                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2710         spin_unlock(&lp->lp_lock);
2711
2712         CDEBUG(D_NET, "peer %s ev->status %d\n",
2713                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2714 }
2715
2716 static bool find_primary(struct lnet_nid *nid,
2717                          struct lnet_ping_buffer *pbuf)
2718 {
2719         struct lnet_ping_info *pi = &pbuf->pb_info;
2720         struct lnet_ping_iter piter;
2721         __u32 *stp;
2722
2723         if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2724                 /* First large nid is primary */
2725                 for (stp = ping_iter_first(&piter, pbuf, nid);
2726                      stp;
2727                      stp = ping_iter_next(&piter, nid)) {
2728                         if (nid_is_nid4(nid))
2729                                 continue;
2730                         /* nid has already been copied in */
2731                         return true;
2732                 }
2733                 /* no large nids ... weird ... ignore the flag
2734                  * and use first nid.
2735                  */
2736         }
2737         /* pi_nids[1] is primary */
2738         if (pi->pi_nnis < 2)
2739                 return false;
2740         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2741         return true;
2742 }
2743
2744 /* Handle a Reply message. This is the reply to a Ping message. */
2745 static void
2746 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2747 {
2748         struct lnet_ping_buffer *pbuf;
2749         struct lnet_nid primary;
2750         int infobytes;
2751         int rc;
2752         bool ping_feat_disc;
2753
2754         spin_lock(&lp->lp_lock);
2755
2756         lp->lp_disc_src_nid = ev->target.nid;
2757         lp->lp_disc_dst_nid = ev->source.nid;
2758
2759         /*
2760          * If some kind of error happened the contents of message
2761          * cannot be used. Set PING_FAILED to trigger a retry.
2762          */
2763         if (ev->status) {
2764                 lp->lp_state |= LNET_PEER_PING_FAILED;
2765                 lp->lp_ping_error = ev->status;
2766                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2767                        ev->status,
2768                        libcfs_nidstr(&lp->lp_primary_nid),
2769                        libcfs_nidstr(&ev->source.nid));
2770                 goto out;
2771         }
2772
2773         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2774         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2775                 lnet_swap_pinginfo(pbuf);
2776
2777         /*
2778          * A reply with invalid or corrupted info. Set PING_FAILED to
2779          * trigger a retry.
2780          */
2781         rc = lnet_ping_info_validate(&pbuf->pb_info);
2782         if (rc) {
2783                 lp->lp_state |= LNET_PEER_PING_FAILED;
2784                 lp->lp_ping_error = 0;
2785                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2786                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2787                 goto out;
2788         }
2789
2790         /*
2791          * The peer may have discovery disabled at its end. Set
2792          * NO_DISCOVERY as appropriate.
2793          */
2794         ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2795         if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2796                 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2797                        libcfs_nidstr(&lp->lp_primary_nid),
2798                        ping_feat_disc ? "enabled" : "disabled",
2799                        lnet_peer_discovery_disabled ? "disabled" : "enabled");
2800
2801                 /* Detect whether this peer has toggled discovery from on to
2802                  * off and whether we can delete and re-create the peer. Peers
2803                  * that were manually configured cannot be deleted by discovery.
2804                  * We need to delete this peer and re-create it if the peer was
2805                  * not configured manually, is currently considered DD capable,
2806                  * and either:
2807                  * 1. We've already discovered the peer (the peer has toggled
2808                  *    the discovery feature from on to off), or
2809                  * 2. The peer is considered MR, but it was not user configured
2810                  *    (this was a "temporary" peer created via the kernel APIs
2811                  *     that we're discovering for the first time)
2812                  */
2813                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2814                                       LNET_PEER_NO_DISCOVERY)) &&
2815                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2816                                      LNET_PEER_MULTI_RAIL))) {
2817                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2818                                libcfs_nidstr(&lp->lp_primary_nid),
2819                                lp->lp_state);
2820                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2821                 }
2822                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2823         } else {
2824                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2825                        libcfs_nidstr(&lp->lp_primary_nid));
2826                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2827         }
2828
2829         /*
2830          * Update the MULTI_RAIL flag based on the reply. If the peer
2831          * was configured with DLC then the setting should match what
2832          * DLC put in.
2833          */
2834         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2835                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2836                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2837                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2838                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2839                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2840                               libcfs_nidstr(&lp->lp_primary_nid));
2841                 } else if (lnet_peer_discovery_disabled) {
2842                         CDEBUG(D_NET,
2843                                "peer %s(%p) not MR: DD disabled locally\n",
2844                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2845                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2846                         CDEBUG(D_NET,
2847                                "peer %s(%p) not MR: DD disabled remotely\n",
2848                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2849                 } else {
2850                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2851                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2852                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2853                         lnet_peer_clr_non_mr_pref_nids(lp);
2854                 }
2855         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2856                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2857                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2858                               libcfs_nidstr(&lp->lp_primary_nid));
2859                 } else {
2860                         CERROR("Multi-Rail state vanished from %s\n",
2861                                libcfs_nidstr(&lp->lp_primary_nid));
2862                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2863                 }
2864         }
2865
2866         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2867         /*
2868          * Make sure we'll allocate the correct size ping buffer when
2869          * pinging the peer.
2870          */
2871         if (lp->lp_data_bytes < infobytes)
2872                 lp->lp_data_bytes = infobytes;
2873
2874         /* Check for truncation of the Reply. Clear PING_SENT and set
2875          * PING_FAILED to trigger a retry.
2876          */
2877         if (pbuf->pb_nbytes < infobytes) {
2878                 if (the_lnet.ln_push_target_nbytes < infobytes)
2879                         the_lnet.ln_push_target_nbytes = infobytes;
2880                 lp->lp_state |= LNET_PEER_PING_FAILED;
2881                 lp->lp_ping_error = 0;
2882                 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2883                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2884                 goto out;
2885         }
2886
2887         /*
2888          * Check the sequence numbers in the reply. These are only
2889          * available if the reply came from a Multi-Rail peer.
2890          */
2891         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2892             find_primary(&primary, pbuf) &&
2893             nid_same(&lp->lp_primary_nid, &primary)) {
2894                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2895                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2896                                 libcfs_nidstr(&lp->lp_primary_nid),
2897                                 LNET_PING_BUFFER_SEQNO(pbuf),
2898                                 lp->lp_peer_seqno);
2899
2900                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2901         }
2902
2903         /* We're happy with the state of the data in the buffer. */
2904         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2905                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2906                lp->lp_state);
2907         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2908                 lnet_ping_buffer_decref(lp->lp_data);
2909         else
2910                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2911         lnet_ping_buffer_addref(pbuf);
2912         lp->lp_data = pbuf;
2913 out:
2914         lp->lp_state &= ~LNET_PEER_PING_SENT;
2915         spin_unlock(&lp->lp_lock);
2916 }
2917
2918 /*
2919  * Send event handling. Only matters for error cases, where we clean
2920  * up state on the peer and peer_ni that would otherwise be updated in
2921  * the REPLY event handler for a successful Ping, and the ACK event
2922  * handler for a successful Push.
2923  */
2924 static int
2925 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2926 {
2927         int rc = 0;
2928
2929         if (!ev->status)
2930                 goto out;
2931
2932         spin_lock(&lp->lp_lock);
2933         if (ev->msg_type == LNET_MSG_GET) {
2934                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2935                 lp->lp_state |= LNET_PEER_PING_FAILED;
2936                 lp->lp_ping_error = ev->status;
2937         } else { /* ev->msg_type == LNET_MSG_PUT */
2938                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2939                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2940                 lp->lp_push_error = ev->status;
2941         }
2942         spin_unlock(&lp->lp_lock);
2943         rc = LNET_REDISCOVER_PEER;
2944 out:
2945         CDEBUG(D_NET, "%s Send to %s: %d\n",
2946                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2947                 libcfs_nidstr(&ev->target.nid), rc);
2948         return rc;
2949 }
2950
2951 /*
2952  * Unlink event handling. This event is only seen if a call to
2953  * LNetMDUnlink() caused the event to be unlinked. If this call was
2954  * made after the event was set up in LNetGet() or LNetPut() then we
2955  * assume the Ping or Push timed out.
2956  */
2957 static void
2958 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2959 {
2960         spin_lock(&lp->lp_lock);
2961         /* We've passed through LNetGet() */
2962         if (lp->lp_state & LNET_PEER_PING_SENT) {
2963                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2964                 lp->lp_state |= LNET_PEER_PING_FAILED;
2965                 lp->lp_ping_error = -ETIMEDOUT;
2966                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2967                         libcfs_nidstr(&lp->lp_primary_nid));
2968         }
2969         /* We've passed through LNetPut() */
2970         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2971                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2972                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2973                 lp->lp_push_error = -ETIMEDOUT;
2974                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2975                         libcfs_nidstr(&lp->lp_primary_nid));
2976         }
2977         spin_unlock(&lp->lp_lock);
2978 }
2979
2980 /*
2981  * Event handler for the discovery EQ.
2982  *
2983  * Called with lnet_res_lock(cpt) held. The cpt is the
2984  * lnet_cpt_of_cookie() of the md handle cookie.
2985  */
2986 static void lnet_discovery_event_handler(struct lnet_event *event)
2987 {
2988         struct lnet_peer *lp = event->md_user_ptr;
2989         struct lnet_ping_buffer *pbuf;
2990         int rc;
2991
2992         /* discovery needs to take another look */
2993         rc = LNET_REDISCOVER_PEER;
2994
2995         CDEBUG(D_NET, "Received event: %d\n", event->type);
2996
2997         switch (event->type) {
2998         case LNET_EVENT_ACK:
2999                 lnet_discovery_event_ack(lp, event);
3000                 break;
3001         case LNET_EVENT_REPLY:
3002                 lnet_discovery_event_reply(lp, event);
3003                 break;
3004         case LNET_EVENT_SEND:
3005                 /* Only send failure triggers a retry. */
3006                 rc = lnet_discovery_event_send(lp, event);
3007                 break;
3008         case LNET_EVENT_UNLINK:
3009                 /* LNetMDUnlink() was called */
3010                 lnet_discovery_event_unlink(lp, event);
3011                 break;
3012         default:
3013                 /* Invalid events. */
3014                 LBUG();
3015         }
3016         lnet_net_lock(LNET_LOCK_EX);
3017         if (event->unlinked) {
3018                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3019                 lnet_ping_buffer_decref(pbuf);
3020                 lnet_peer_decref_locked(lp);
3021         }
3022
3023         /* put peer back at end of request queue, if discovery not already
3024          * done */
3025         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3026             lnet_peer_queue_for_discovery(lp)) {
3027                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3028                 wake_up(&the_lnet.ln_dc_waitq);
3029         }
3030         lnet_net_unlock(LNET_LOCK_EX);
3031 }
3032
3033 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3034                      struct lnet_ping_buffer *pbuf,
3035                      struct lnet_nid *nid)
3036 {
3037         pi->pinfo = &pbuf->pb_info;
3038         pi->pos = &pbuf->pb_info.pi_ni;
3039         pi->end = (void *)pi->pinfo +
3040                   min_t(int, pbuf->pb_nbytes,
3041                         lnet_ping_info_size(pi->pinfo));
3042         /* lnet_ping_info_validiate ensures there will be one
3043          * lnet_ni_status at the start
3044          */
3045         if (nid)
3046                 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3047
3048         pi->pos += sizeof(struct lnet_ni_status);
3049         return &pbuf->pb_info.pi_ni[0].ns_status;
3050 }
3051
3052 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3053 {
3054         int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3055
3056         if (pi->pos < ((void *)pi->pinfo + off)) {
3057                 struct lnet_ni_status *ns = pi->pos;
3058
3059                 pi->pos = ns + 1;
3060                 if (pi->pos > pi->end)
3061                         return NULL;
3062                 if (nid)
3063                         lnet_nid4_to_nid(ns->ns_nid, nid);
3064                 return &ns->ns_status;
3065         }
3066
3067         while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3068                 struct lnet_ni_large_status *lns = pi->pos;
3069
3070                 if (pi->pos + 8 > pi->end)
3071                         /* Not safe to examine next */
3072                         return NULL;
3073                 pi->pos = lnet_ping_sts_next(lns);
3074                 if (pi->pos > pi->end)
3075                         return NULL;
3076                 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3077                         continue;
3078                 if (nid)
3079                         *nid = lns->ns_nid;
3080                 return &lns->ns_status;
3081         }
3082         return NULL;
3083 }
3084
3085 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3086 {
3087         struct lnet_ping_iter pi;
3088         u32 *st;
3089         int nnis = 0;
3090
3091         for (st = ping_iter_first(&pi, pbuf, NULL); st;
3092              st = ping_iter_next(&pi, NULL))
3093                 nnis += 1;
3094
3095         return nnis;
3096 }
3097
3098 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3099 {
3100         if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN)
3101                 lnet_handle_remote_failure_locked(lpni);
3102         else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3103                  !lpni->lpni_last_alive)
3104                 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3105 }
3106
3107 /*
3108  * Build a peer from incoming data.
3109  *
3110  * The NIDs in the incoming data are supposed to be structured as follows:
3111  *  - loopback
3112  *  - primary NID
3113  *  - other NIDs in same net
3114  *  - NIDs in second net
3115  *  - NIDs in third net
3116  *  - ...
3117  * This due to the way the list of NIDs in the data is created.
3118  *
3119  * Note that this function will mark the peer uptodate unless an
3120  * ENOMEM is encontered. All other errors are due to a conflict
3121  * between the DLC configuration and what discovery sees. We treat DLC
3122  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3123  * peer from becoming stuck in discovery.
3124  */
3125 static int lnet_peer_merge_data(struct lnet_peer *lp,
3126                                 struct lnet_ping_buffer *pbuf)
3127 {
3128         struct lnet_peer_net *lpn;
3129         struct lnet_peer_ni *lpni;
3130         struct lnet_nid *curnis = NULL;
3131         struct lnet_ni_large_status *addnis = NULL;
3132         struct lnet_nid *delnis = NULL;
3133         struct lnet_ping_iter pi;
3134         struct lnet_nid nid;
3135         u32 *stp;
3136         struct lnet_nid primary = {};
3137         bool want_large_primary;
3138         unsigned int flags;
3139         int ncurnis;
3140         int naddnis;
3141         int ndelnis;
3142         int nnis = 0;
3143         int i;
3144         int j;
3145         int rc;
3146         __u32 old_st;
3147
3148         flags = LNET_PEER_DISCOVERED;
3149         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3150                 flags |= LNET_PEER_MULTI_RAIL;
3151
3152         /*
3153          * Cache the routing feature for the peer; whether it is enabled
3154          * for disabled as reported by the remote peer.
3155          */
3156         spin_lock(&lp->lp_lock);
3157         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3158                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3159         else
3160                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3161         spin_unlock(&lp->lp_lock);
3162
3163         nnis = ping_info_count_entries(pbuf);
3164         nnis = max_t(int, lp->lp_nnis, nnis);
3165         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3166         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3167         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3168         if (!curnis || !addnis || !delnis) {
3169                 rc = -ENOMEM;
3170                 goto out;
3171         }
3172         ncurnis = 0;
3173         naddnis = 0;
3174         ndelnis = 0;
3175
3176         /* Construct the list of NIDs present in peer. */
3177         lpni = NULL;
3178         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3179                 curnis[ncurnis++] = lpni->lpni_nid;
3180
3181         /* Check for NIDs in pbuf not present in curnis[].
3182          * Skip the first, which is loop-back.  Take second as
3183          * primary, unless a large primary is found.
3184          */
3185         ping_iter_first(&pi, pbuf, NULL);
3186         stp = ping_iter_next(&pi, &nid);
3187         if (stp)
3188                 primary = nid;
3189         want_large_primary = (pbuf->pb_info.pi_features &
3190                               LNET_PING_FEAT_PRIMARY_LARGE);
3191         for (; stp; stp = ping_iter_next(&pi, &nid)) {
3192                 for (j = 0; j < ncurnis; j++)
3193                         if (nid_same(&nid, &curnis[j]))
3194                                 break;
3195                 if (j == ncurnis) {
3196                         addnis[naddnis].ns_nid = nid;
3197                         addnis[naddnis].ns_status = *stp;
3198                         naddnis += 1;
3199                 }
3200                 if (want_large_primary && nid.nid_size) {
3201                         primary = nid;
3202                         want_large_primary = false;
3203                 }
3204         }
3205         /*
3206          * Check for NIDs in curnis[] not present in pbuf.
3207          * The nested loop starts at 1 to skip the loopback NID.
3208          *
3209          * But never add the loopback NID to delnis[]: if it is
3210          * present in curnis[] then this peer is for this node.
3211          */
3212         for (i = 0; i < ncurnis; i++) {
3213                 if (nid_is_lo0(&curnis[i]))
3214                         continue;
3215                 ping_iter_first(&pi, pbuf, NULL);
3216                 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3217                         if (nid_same(&curnis[i], &nid)) {
3218                                 /*
3219                                  * update the information we cache for the
3220                                  * peer with the latest information we
3221                                  * received
3222                                  */
3223                                 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3224                                 if (lpni) {
3225                                         old_st = lpni->lpni_ns_status;
3226                                         lpni->lpni_ns_status = *stp;
3227                                         if (old_st != lpni->lpni_ns_status)
3228                                                 handle_disc_lpni_health(lpni);
3229                                         lnet_peer_ni_decref_locked(lpni);
3230                                 }
3231                                 break;
3232                         }
3233                 }
3234                 if (!stp)
3235                         delnis[ndelnis++] = curnis[i];
3236         }
3237
3238         /*
3239          * If we get here and the discovery is disabled then we don't want
3240          * to add or delete any NIs. We just updated the ones we have some
3241          * information on, and call it a day
3242          */
3243         rc = 0;
3244         if (lnet_is_discovery_disabled(lp))
3245                 goto out;
3246
3247         for (i = 0; i < naddnis; i++) {
3248                 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3249                 if (rc) {
3250                         CERROR("Error adding NID %s to peer %s: %d\n",
3251                                libcfs_nidstr(&addnis[i].ns_nid),
3252                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3253                         if (rc == -ENOMEM)
3254                                 goto out;
3255                 }
3256                 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3257                 if (lpni) {
3258                         lpni->lpni_ns_status = addnis[i].ns_status;
3259                         handle_disc_lpni_health(lpni);
3260                         lnet_peer_ni_decref_locked(lpni);
3261                 }
3262         }
3263
3264         for (i = 0; i < ndelnis; i++) {
3265                 /*
3266                  * for routers it's okay to delete the primary_nid because
3267                  * the upper layers don't really rely on it. So if we're
3268                  * being told that the router changed its primary_nid
3269                  * then it's okay to delete it.
3270                  */
3271                 if (lp->lp_rtr_refcount > 0)
3272                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3273                 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3274                 if (rc) {
3275                         CERROR("Error deleting NID %s from peer %s: %d\n",
3276                                libcfs_nidstr(&delnis[i]),
3277                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3278                         if (rc == -ENOMEM)
3279                                 goto out;
3280                 }
3281         }
3282
3283         /* The peer net for the primary NID should be the first entry in the
3284          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3285          * be the first entry in its peer net's lpn_peer_nis list.
3286          */
3287         find_primary(&nid, pbuf);
3288         lpni = lnet_peer_ni_find_locked(&nid);
3289         if (!lpni) {
3290                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3291                        libcfs_nidstr(&nid));
3292                 goto out;
3293         }
3294
3295         lnet_peer_ni_decref_locked(lpni);
3296
3297         lpn = lpni->lpni_peer_net;
3298         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3299                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3300
3301         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3302                 list_move(&lpni->lpni_peer_nis,
3303                           &lpni->lpni_peer_net->lpn_peer_nis);
3304
3305         /*
3306          * Errors other than -ENOMEM are due to peers having been
3307          * configured with DLC. Ignore these because DLC overrides
3308          * Discovery.
3309          */
3310         rc = 0;
3311 out:
3312         /* If this peer is a gateway, invoke the routing callback to update
3313          * the associated route status
3314          */
3315         if (lp->lp_rtr_refcount > 0)
3316                 lnet_router_discovery_ping_reply(lp, pbuf);
3317
3318         CFS_FREE_PTR_ARRAY(curnis, nnis);
3319         CFS_FREE_PTR_ARRAY(addnis, nnis);
3320         CFS_FREE_PTR_ARRAY(delnis, nnis);
3321         lnet_ping_buffer_decref(pbuf);
3322         CDEBUG(D_NET, "peer %s (%p): %d\n",
3323                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3324
3325         if (rc) {
3326                 spin_lock(&lp->lp_lock);
3327                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3328                 lp->lp_state |= LNET_PEER_FORCE_PING;
3329                 spin_unlock(&lp->lp_lock);
3330         }
3331         return rc;
3332 }
3333
3334 /*
3335  * The data in pbuf says lp is its primary peer, but the data was
3336  * received by a different peer. Try to update lp with the data.
3337  */
3338 static int
3339 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3340 {
3341         struct lnet_handle_md mdh;
3342
3343         /* Queue lp for discovery, and force it on the request queue. */
3344         lnet_net_lock(LNET_LOCK_EX);
3345         if (lnet_peer_queue_for_discovery(lp))
3346                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3347         lnet_net_unlock(LNET_LOCK_EX);
3348
3349         LNetInvalidateMDHandle(&mdh);
3350
3351         /*
3352          * Decide whether we can move the peer to the DATA_PRESENT state.
3353          *
3354          * We replace stale data for a multi-rail peer, repair PING_FAILED
3355          * status, and preempt FORCE_PING.
3356          *
3357          * If after that we have DATA_PRESENT, we merge it into this peer.
3358          */
3359         spin_lock(&lp->lp_lock);
3360         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3361                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3362                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3363                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3364                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3365                         lnet_ping_buffer_decref(pbuf);
3366                         pbuf = lp->lp_data;
3367                         lp->lp_data = NULL;
3368                 }
3369         }
3370         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3371                 lnet_ping_buffer_decref(lp->lp_data);
3372                 lp->lp_data = NULL;
3373                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3374         }
3375         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3376                 mdh = lp->lp_ping_mdh;
3377                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3378                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3379                 lp->lp_ping_error = 0;
3380         }
3381         if (lp->lp_state & LNET_PEER_FORCE_PING)
3382                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3383         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3384         spin_unlock(&lp->lp_lock);
3385
3386         if (!LNetMDHandleIsInvalid(mdh))
3387                 LNetMDUnlink(mdh);
3388
3389         if (pbuf)
3390                 return lnet_peer_merge_data(lp, pbuf);
3391
3392         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3393         return 0;
3394 }
3395
3396 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3397                                      struct lnet_ping_buffer *pbuf)
3398 {
3399         struct lnet_ping_iter pi;
3400         struct lnet_nid pnid;
3401         u32 *st;
3402
3403         for (st = ping_iter_first(&pi, pbuf, &pnid);
3404              st;
3405              st = ping_iter_next(&pi, &pnid))
3406                 if (nid_same(nid, &pnid))
3407                         return true;
3408         return false;
3409 }
3410
3411 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3412  * to the discovery queue a reference was taken that will prevent the peer from
3413  * actually being freed by this function. After this function exits the
3414  * discovery thread should call lnet_peer_discovery_complete() which will
3415  * drop that reference as well as wake any waiters that may also be holding a
3416  * ref on the peer
3417  */
3418 static int lnet_peer_deletion(struct lnet_peer *lp)
3419 __must_hold(&lp->lp_lock)
3420 {
3421         struct list_head rlist;
3422         struct lnet_route *route, *tmp;
3423         int sensitivity = lp->lp_health_sensitivity;
3424         int rc = 0;
3425
3426         INIT_LIST_HEAD(&rlist);
3427
3428         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3429                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3430
3431         /* no-op if lnet_peer_del() has already been called on this peer */
3432         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3433                 goto clear_discovering;
3434
3435         spin_unlock(&lp->lp_lock);
3436
3437         mutex_lock(&the_lnet.ln_api_mutex);
3438         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3439             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3440                 mutex_unlock(&the_lnet.ln_api_mutex);
3441                 spin_lock(&lp->lp_lock);
3442                 rc = -ESHUTDOWN;
3443                 goto clear_discovering;
3444         }
3445
3446         lnet_peer_cancel_discovery(lp);
3447         lnet_net_lock(LNET_LOCK_EX);
3448         list_for_each_entry_safe(route, tmp,
3449                                  &lp->lp_routes,
3450                                  lr_gwlist)
3451                 lnet_move_route(route, NULL, &rlist);
3452
3453         /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3454         rc = lnet_peer_del_locked(lp);
3455         if (rc)
3456                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3457                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3458
3459         lnet_net_unlock(LNET_LOCK_EX);
3460
3461         list_for_each_entry_safe(route, tmp,
3462                                  &rlist, lr_list) {
3463                 /* re-add these routes */
3464                 lnet_add_route(route->lr_net,
3465                                route->lr_hops,
3466                                &route->lr_nid,
3467                                route->lr_priority,
3468                                sensitivity);
3469                 LIBCFS_FREE(route, sizeof(*route));
3470         }
3471
3472         mutex_unlock(&the_lnet.ln_api_mutex);
3473
3474         spin_lock(&lp->lp_lock);
3475
3476         rc = 0;
3477
3478 clear_discovering:
3479         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3480                           LNET_PEER_FORCE_PUSH);
3481
3482         return rc;
3483 }
3484
3485 /*
3486  * Update a peer using the data received.
3487  */
3488 static int lnet_peer_data_present(struct lnet_peer *lp)
3489 __must_hold(&lp->lp_lock)
3490 {
3491         struct lnet_ping_buffer *pbuf;
3492         struct lnet_peer_ni *lpni;
3493         struct lnet_nid nid;
3494         unsigned int flags;
3495         int rc = 0;
3496
3497         pbuf = lp->lp_data;
3498         lp->lp_data = NULL;
3499         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3500         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3501         spin_unlock(&lp->lp_lock);
3502
3503         /*
3504          * Modifications of peer structures are done while holding the
3505          * ln_api_mutex. A global lock is required because we may be
3506          * modifying multiple peer structures, and a mutex greatly
3507          * simplifies memory management.
3508          *
3509          * The actual changes to the data structures must also protect
3510          * against concurrent lookups, for which the lnet_net_lock in
3511          * LNET_LOCK_EX mode is used.
3512          */
3513         mutex_lock(&the_lnet.ln_api_mutex);
3514         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3515                 rc = -ESHUTDOWN;
3516                 goto out;
3517         }
3518
3519         /*
3520          * If this peer is not on the peer list then it is being torn
3521          * down, and our reference count may be all that is keeping it
3522          * alive. Don't do any work on it.
3523          */
3524         if (list_empty(&lp->lp_peer_list)) {
3525                 lnet_ping_buffer_decref(pbuf);
3526                 goto out;
3527         }
3528
3529         flags = LNET_PEER_DISCOVERED;
3530         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3531                 flags |= LNET_PEER_MULTI_RAIL;
3532
3533         /*
3534          * Check whether the primary NID in the message matches the
3535          * primary NID of the peer. If it does, update the peer, if
3536          * it it does not, check whether there is already a peer with
3537          * that primary NID. If no such peer exists, try to update
3538          * the primary NID of the current peer (allowed if it was
3539          * created due to message traffic) and complete the update.
3540          * If the peer did exist, hand off the data to it.
3541          *
3542          * The peer for the loopback interface is a special case: this
3543          * is the peer for the local node, and we want to set its
3544          * primary NID to the correct value here. Moreover, this peer
3545          * can show up with only the loopback NID in the ping buffer.
3546          */
3547         if (!find_primary(&nid, pbuf)) {
3548                 lnet_ping_buffer_decref(pbuf);
3549                 goto out;
3550         }
3551         if (nid_is_lo0(&lp->lp_primary_nid)) {
3552                 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3553                 if (rc)
3554                         lnet_ping_buffer_decref(pbuf);
3555                 else
3556                         rc = lnet_peer_merge_data(lp, pbuf);
3557         /*
3558          * if the primary nid of the peer is present in the ping info returned
3559          * from the peer, but it's not the local primary peer we have
3560          * cached and discovery is disabled, then we don't want to update
3561          * our local peer info, by adding or removing NIDs, we just want
3562          * to update the status of the nids that we currently have
3563          * recorded in that peer.
3564          */
3565         } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3566                    (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3567                     lnet_is_discovery_disabled(lp))) {
3568                 rc = lnet_peer_merge_data(lp, pbuf);
3569         } else {
3570                 lpni = lnet_peer_ni_find_locked(&nid);
3571                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3572                         rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3573                         if (rc) {
3574                                 CERROR("Primary NID error %s versus %s: %d\n",
3575                                        libcfs_nidstr(&lp->lp_primary_nid),
3576                                        libcfs_nidstr(&nid), rc);
3577                                 lnet_ping_buffer_decref(pbuf);
3578                         } else {
3579                                 rc = lnet_peer_merge_data(lp, pbuf);
3580                         }
3581                         if (lpni)
3582                                 lnet_peer_ni_decref_locked(lpni);
3583                 } else {
3584                         struct lnet_peer *new_lp;
3585                         new_lp = lpni->lpni_peer_net->lpn_peer;
3586                         /*
3587                          * if lp has discovery/MR enabled that means new_lp
3588                          * should have discovery/MR enabled as well, since
3589                          * it's the same peer, which we're about to merge
3590                          */
3591                         spin_lock(&lp->lp_lock);
3592                         spin_lock(&new_lp->lp_lock);
3593                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3594                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3595                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3596                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3597                         /* If we're processing a ping reply then we may be
3598                          * about to send a push to the peer that we ping'd.
3599                          * Since the ping reply that we're processing was
3600                          * received by lp, we need to set the discovery source
3601                          * NID for new_lp to the NID stored in lp.
3602                          */
3603                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3604                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3605                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3606                         }
3607                         spin_unlock(&new_lp->lp_lock);
3608                         spin_unlock(&lp->lp_lock);
3609
3610                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3611                         lnet_consolidate_routes_locked(lp, new_lp);
3612                         lnet_peer_ni_decref_locked(lpni);
3613                 }
3614         }
3615 out:
3616         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3617                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3618                lp->lp_state);
3619         mutex_unlock(&the_lnet.ln_api_mutex);
3620
3621         spin_lock(&lp->lp_lock);
3622         /* Tell discovery to re-check the peer immediately. */
3623         if (!rc)
3624                 rc = LNET_REDISCOVER_PEER;
3625         return rc;
3626 }
3627
3628 /*
3629  * A ping failed. Clear the PING_FAILED state and set the
3630  * FORCE_PING state, to ensure a retry even if discovery is
3631  * disabled. This avoids being left with incorrect state.
3632  */
3633 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3634 __must_hold(&lp->lp_lock)
3635 {
3636         struct lnet_handle_md mdh;
3637         int rc;
3638
3639         mdh = lp->lp_ping_mdh;
3640         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3641         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3642         lp->lp_state |= LNET_PEER_FORCE_PING;
3643         rc = lp->lp_ping_error;
3644         lp->lp_ping_error = 0;
3645         spin_unlock(&lp->lp_lock);
3646
3647         if (!LNetMDHandleIsInvalid(mdh))
3648                 LNetMDUnlink(mdh);
3649
3650         CDEBUG(D_NET, "peer %s:%d\n",
3651                libcfs_nidstr(&lp->lp_primary_nid), rc);
3652
3653         spin_lock(&lp->lp_lock);
3654         return rc ? rc : LNET_REDISCOVER_PEER;
3655 }
3656
3657 /* Active side of ping. */
3658 static int lnet_peer_send_ping(struct lnet_peer *lp)
3659 __must_hold(&lp->lp_lock)
3660 {
3661         int bytes;
3662         int rc;
3663         int cpt;
3664
3665         lp->lp_state |= LNET_PEER_PING_SENT;
3666         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3667         spin_unlock(&lp->lp_lock);
3668
3669         cpt = lnet_net_lock_current();
3670         /* Refcount for MD. */
3671         lnet_peer_addref_locked(lp);
3672         lnet_net_unlock(cpt);
3673
3674         bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3675
3676         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3677                             the_lnet.ln_dc_handler, false);
3678         /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3679          * refcount on the peer, otherwise LNetMDUnlink will be called
3680          * which will eventually do that.
3681          */
3682         if (rc > 0) {
3683                 lnet_net_lock(cpt);
3684                 lnet_peer_decref_locked(lp);
3685                 lnet_net_unlock(cpt);
3686                 rc = -rc; /* change the rc to negative value */
3687                 goto fail_error;
3688         } else if (rc < 0) {
3689                 goto fail_error;
3690         }
3691
3692         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3693
3694         spin_lock(&lp->lp_lock);
3695         return 0;
3696
3697 fail_error:
3698         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3699         /*
3700          * The errors that get us here are considered hard errors and
3701          * cause Discovery to terminate. So we clear PING_SENT, but do
3702          * not set either PING_FAILED or FORCE_PING. In fact we need
3703          * to clear PING_FAILED, because the unlink event handler will
3704          * have set it if we called LNetMDUnlink() above.
3705          */
3706         spin_lock(&lp->lp_lock);
3707         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3708         return rc;
3709 }
3710
3711 /*
3712  * This function exists because you cannot call LNetMDUnlink() from an
3713  * event handler.
3714  */
3715 static int lnet_peer_push_failed(struct lnet_peer *lp)
3716 __must_hold(&lp->lp_lock)
3717 {
3718         struct lnet_handle_md mdh;
3719         int rc;
3720
3721         mdh = lp->lp_push_mdh;
3722         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3723         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3724         rc = lp->lp_push_error;
3725         lp->lp_push_error = 0;
3726         spin_unlock(&lp->lp_lock);
3727
3728         if (!LNetMDHandleIsInvalid(mdh))
3729                 LNetMDUnlink(mdh);
3730
3731         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3732         spin_lock(&lp->lp_lock);
3733         return rc ? rc : LNET_REDISCOVER_PEER;
3734 }
3735
3736 /*
3737  * Mark the peer as discovered.
3738  */
3739 static int lnet_peer_discovered(struct lnet_peer *lp)
3740 __must_hold(&lp->lp_lock)
3741 {
3742         lp->lp_state |= LNET_PEER_DISCOVERED;
3743         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3744                           LNET_PEER_REDISCOVER);
3745
3746         lp->lp_dc_error = 0;
3747
3748         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3749
3750         return 0;
3751 }
3752
3753 /* Active side of push. */
3754 static int lnet_peer_send_push(struct lnet_peer *lp)
3755 __must_hold(&lp->lp_lock)
3756 {
3757         struct lnet_ping_buffer *pbuf;
3758         struct lnet_processid id;
3759         struct lnet_md md;
3760         int cpt;
3761         int rc;
3762
3763         /* Don't push to a non-multi-rail peer. */
3764         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3765                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3766                 /* if peer's NIDs are uptodate then peer is discovered */
3767                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3768                         rc = lnet_peer_discovered(lp);
3769                         return rc;
3770                 }
3771
3772                 return 0;
3773         }
3774
3775         lp->lp_state |= LNET_PEER_PUSH_SENT;
3776         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3777         spin_unlock(&lp->lp_lock);
3778
3779         cpt = lnet_net_lock_current();
3780         pbuf = the_lnet.ln_ping_target;
3781         lnet_ping_buffer_addref(pbuf);
3782         lnet_net_unlock(cpt);
3783
3784         /* Push source MD */
3785         md.start     = &pbuf->pb_info;
3786         md.length    = pbuf->pb_nbytes;
3787         md.threshold = 2; /* Put/Ack */
3788         md.max_size  = 0;
3789         md.options   = LNET_MD_TRACK_RESPONSE;
3790         md.handler   = the_lnet.ln_dc_handler;
3791         md.user_ptr  = lp;
3792
3793         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3794         if (rc) {
3795                 lnet_ping_buffer_decref(pbuf);
3796                 CERROR("Can't bind push source MD: %d\n", rc);
3797                 goto fail_error;
3798         }
3799
3800         cpt = lnet_net_lock_current();
3801         /* Refcount for MD. */
3802         lnet_peer_addref_locked(lp);
3803         id.pid = LNET_PID_LUSTRE;
3804         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3805                 id.nid = lp->lp_disc_dst_nid;
3806         else
3807                 id.nid = lp->lp_primary_nid;
3808         lnet_net_unlock(cpt);
3809
3810         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3811                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3812                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3813
3814         /*
3815          * reset the discovery nid. There is no need to restrict sending
3816          * from that source, if we call lnet_push_update_to_peers(). It'll
3817          * get set to a specific NID, if we initiate discovery from the
3818          * scratch
3819          */
3820         lp->lp_disc_src_nid = LNET_ANY_NID;
3821         lp->lp_disc_dst_nid = LNET_ANY_NID;
3822
3823         if (rc)
3824                 goto fail_unlink;
3825
3826         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3827
3828         spin_lock(&lp->lp_lock);
3829         return 0;
3830
3831 fail_unlink:
3832         LNetMDUnlink(lp->lp_push_mdh);
3833         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3834 fail_error:
3835         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3836                lp, rc);
3837         /*
3838          * The errors that get us here are considered hard errors and
3839          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3840          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3841          * because the unlink event handler will have set it if we
3842          * called LNetMDUnlink() above.
3843          */
3844         spin_lock(&lp->lp_lock);
3845         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3846         return rc;
3847 }
3848
3849 /*
3850  * Wait for work to be queued or some other change that must be
3851  * attended to. Returns non-zero if the discovery thread should shut
3852  * down.
3853  */
3854 static int lnet_peer_discovery_wait_for_work(void)
3855 {
3856         int cpt;
3857         int rc = 0;
3858
3859         DEFINE_WAIT(wait);
3860
3861         cpt = lnet_net_lock_current();
3862         for (;;) {
3863                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3864                                 TASK_INTERRUPTIBLE);
3865                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3866                         break;
3867                 if (lnet_push_target_resize_needed() ||
3868                     the_lnet.ln_push_target->pb_needs_post)
3869                         break;
3870                 if (!list_empty(&the_lnet.ln_dc_request))
3871                         break;
3872                 if (!list_empty(&the_lnet.ln_msg_resend))
3873                         break;
3874                 lnet_net_unlock(cpt);
3875
3876                 /*
3877                  * wakeup max every second to check if there are peers that
3878                  * have been stuck on the working queue for greater than
3879                  * the peer timeout.
3880                  */
3881                 schedule_timeout(cfs_time_seconds(1));
3882                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3883                 cpt = lnet_net_lock_current();
3884         }
3885         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3886
3887         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3888                 rc = -ESHUTDOWN;
3889
3890         lnet_net_unlock(cpt);
3891
3892         CDEBUG(D_NET, "woken: %d\n", rc);
3893
3894         return rc;
3895 }
3896
3897 /*
3898  * Messages that were pending on a destroyed peer will be put on a global
3899  * resend list. The message resend list will be checked by
3900  * the discovery thread when it wakes up, and will resend messages. These
3901  * messages can still be sendable in the case the lpni which was the initial
3902  * cause of the message re-queue was transfered to another peer.
3903  *
3904  * It is possible that LNet could be shutdown while we're iterating
3905  * through the list. lnet_shudown_lndnets() will attempt to access the
3906  * resend list, but will have to wait until the spinlock is released, by
3907  * which time there shouldn't be any more messages on the resend list.
3908  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3909  * for the messages so they can be released. The other case is that
3910  * lnet_shudown_lndnets() can finalize all the messages before this
3911  * function can visit the resend list, in which case this function will be
3912  * a no-op.
3913  */
3914 static void lnet_resend_msgs(void)
3915 {
3916         struct lnet_msg *msg, *tmp;
3917         LIST_HEAD(resend);
3918         int rc;
3919
3920         spin_lock(&the_lnet.ln_msg_resend_lock);
3921         list_splice(&the_lnet.ln_msg_resend, &resend);
3922         spin_unlock(&the_lnet.ln_msg_resend_lock);
3923
3924         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3925                 list_del_init(&msg->msg_list);
3926                 rc = lnet_send(&msg->msg_src_nid_param, msg,
3927                                &msg->msg_rtr_nid_param);
3928                 if (rc < 0) {
3929                         CNETERR("Error sending %s to %s: %d\n",
3930                                lnet_msgtyp2str(msg->msg_type),
3931                                libcfs_idstr(&msg->msg_target), rc);
3932                         lnet_finalize(msg, rc);
3933                 }
3934         }
3935 }
3936
3937 /* The discovery thread. */
3938 static int lnet_peer_discovery(void *arg)
3939 {
3940         struct lnet_peer *lp;
3941         int rc;
3942
3943         wait_for_completion(&the_lnet.ln_started);
3944
3945         CDEBUG(D_NET, "started\n");
3946
3947         for (;;) {
3948                 if (lnet_peer_discovery_wait_for_work())
3949                         break;
3950
3951                 if (lnet_push_target_resize_needed())
3952                         lnet_push_target_resize();
3953                 else if (the_lnet.ln_push_target->pb_needs_post)
3954                         lnet_push_target_post(the_lnet.ln_push_target,
3955                                               &the_lnet.ln_push_target_md);
3956
3957                 lnet_resend_msgs();
3958
3959                 lnet_net_lock(LNET_LOCK_EX);
3960                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3961                         lnet_net_unlock(LNET_LOCK_EX);
3962                         break;
3963                 }
3964
3965                 /*
3966                  * Process all incoming discovery work requests.  When
3967                  * discovery must wait on a peer to change state, it
3968                  * is added to the tail of the ln_dc_working queue. A
3969                  * timestamp keeps track of when the peer was added,
3970                  * so we can time out discovery requests that take too
3971                  * long.
3972                  */
3973                 while (!list_empty(&the_lnet.ln_dc_request)) {
3974                         lp = list_first_entry(&the_lnet.ln_dc_request,
3975                                               struct lnet_peer, lp_dc_list);
3976                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3977                         /*
3978                          * set the time the peer was put on the dc_working
3979                          * queue. It shouldn't remain on the queue
3980                          * forever, in case the GET message (for ping)
3981                          * doesn't get a REPLY or the PUT message (for
3982                          * push) doesn't get an ACK.
3983                          */
3984                         lp->lp_last_queued = ktime_get_real_seconds();
3985                         lnet_net_unlock(LNET_LOCK_EX);
3986
3987                         if (lnet_push_target_resize_needed())
3988                                 lnet_push_target_resize();
3989                         else if (the_lnet.ln_push_target->pb_needs_post)
3990                                 lnet_push_target_post(the_lnet.ln_push_target,
3991                                                       &the_lnet.ln_push_target_md);
3992
3993                         /*
3994                          * Select an action depending on the state of
3995                          * the peer and whether discovery is disabled.
3996                          * The check whether discovery is disabled is
3997                          * done after the code that handles processing
3998                          * for arrived data, cleanup for failures, and
3999                          * forcing a Ping or Push.
4000                          */
4001                         spin_lock(&lp->lp_lock);
4002                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4003                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4004                                 lp->lp_state);
4005                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4006                                             LNET_PEER_MARK_DELETED))
4007                                 rc = lnet_peer_deletion(lp);
4008                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4009                                 rc = lnet_peer_data_present(lp);
4010                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
4011                                 rc = lnet_peer_ping_failed(lp);
4012                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4013                                 rc = lnet_peer_push_failed(lp);
4014                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
4015                                 rc = lnet_peer_send_ping(lp);
4016                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4017                                 rc = lnet_peer_send_push(lp);
4018                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4019                                 rc = lnet_peer_send_ping(lp);
4020                         else if (lnet_peer_needs_push(lp))
4021                                 rc = lnet_peer_send_push(lp);
4022                         else
4023                                 rc = lnet_peer_discovered(lp);
4024                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4025                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4026                                 lp->lp_state, rc);
4027
4028                         if (rc == LNET_REDISCOVER_PEER) {
4029                                 spin_unlock(&lp->lp_lock);
4030                                 lnet_net_lock(LNET_LOCK_EX);
4031                                 list_move(&lp->lp_dc_list,
4032                                           &the_lnet.ln_dc_request);
4033                         } else if (rc ||
4034                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4035                                 spin_unlock(&lp->lp_lock);
4036                                 lnet_net_lock(LNET_LOCK_EX);
4037                                 lnet_peer_discovery_complete(lp, rc);
4038                         } else {
4039                                 spin_unlock(&lp->lp_lock);
4040                                 lnet_net_lock(LNET_LOCK_EX);
4041                         }
4042
4043                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4044                                 break;
4045
4046                 }
4047
4048                 lnet_net_unlock(LNET_LOCK_EX);
4049         }
4050
4051         CDEBUG(D_NET, "stopping\n");
4052         /*
4053          * Clean up before telling lnet_peer_discovery_stop() that
4054          * we're done. Use wake_up() below to somewhat reduce the
4055          * size of the thundering herd if there are multiple threads
4056          * waiting on discovery of a single peer.
4057          */
4058
4059         /* Queue cleanup 1: stop all pending pings and pushes. */
4060         lnet_net_lock(LNET_LOCK_EX);
4061         while (!list_empty(&the_lnet.ln_dc_working)) {
4062                 lp = list_first_entry(&the_lnet.ln_dc_working,
4063                                       struct lnet_peer, lp_dc_list);
4064                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4065                 lnet_net_unlock(LNET_LOCK_EX);
4066                 lnet_peer_cancel_discovery(lp);
4067                 lnet_net_lock(LNET_LOCK_EX);
4068         }
4069         lnet_net_unlock(LNET_LOCK_EX);
4070
4071         /* Queue cleanup 2: wait for the expired queue to clear. */
4072         while (!list_empty(&the_lnet.ln_dc_expired))
4073                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4074
4075         /* Queue cleanup 3: clear the request queue. */
4076         lnet_net_lock(LNET_LOCK_EX);
4077         while (!list_empty(&the_lnet.ln_dc_request)) {
4078                 lp = list_first_entry(&the_lnet.ln_dc_request,
4079                                       struct lnet_peer, lp_dc_list);
4080                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4081         }
4082         lnet_net_unlock(LNET_LOCK_EX);
4083
4084         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
4085         the_lnet.ln_dc_handler = NULL;
4086
4087         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4088         wake_up(&the_lnet.ln_dc_waitq);
4089
4090         CDEBUG(D_NET, "stopped\n");
4091
4092         return 0;
4093 }
4094
4095 /* ln_api_mutex is held on entry. */
4096 int lnet_peer_discovery_start(void)
4097 {
4098         struct task_struct *task;
4099         int rc = 0;
4100
4101         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4102                 return -EALREADY;
4103
4104         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4105         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4106         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4107         if (IS_ERR(task)) {
4108                 rc = PTR_ERR(task);
4109                 CERROR("Can't start peer discovery thread: %d\n", rc);
4110
4111                 the_lnet.ln_dc_handler = NULL;
4112
4113                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4114         }
4115
4116         CDEBUG(D_NET, "discovery start: %d\n", rc);
4117
4118         return rc;
4119 }
4120
4121 /* ln_api_mutex is held on entry. */
4122 void lnet_peer_discovery_stop(void)
4123 {
4124         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4125                 return;
4126
4127         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4128         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4129
4130         /* In the LNetNIInit() path we may be stopping discovery before it
4131          * entered its work loop
4132          */
4133         if (!completion_done(&the_lnet.ln_started))
4134                 complete(&the_lnet.ln_started);
4135         else
4136                 wake_up(&the_lnet.ln_dc_waitq);
4137
4138         mutex_unlock(&the_lnet.ln_api_mutex);
4139         wait_event(the_lnet.ln_dc_waitq,
4140                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4141         mutex_lock(&the_lnet.ln_api_mutex);
4142
4143         LASSERT(list_empty(&the_lnet.ln_dc_request));
4144         LASSERT(list_empty(&the_lnet.ln_dc_working));
4145         LASSERT(list_empty(&the_lnet.ln_dc_expired));
4146
4147         CDEBUG(D_NET, "discovery stopped\n");
4148 }
4149
4150 /* Debugging */
4151
4152 void
4153 lnet_debug_peer(struct lnet_nid *nid)
4154 {
4155         char                    *aliveness = "NA";
4156         struct lnet_peer_ni     *lp;
4157         int                     cpt;
4158
4159         cpt = lnet_nid2cpt(nid, NULL);
4160         lnet_net_lock(cpt);
4161
4162         lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4163         if (IS_ERR(lp)) {
4164                 lnet_net_unlock(cpt);
4165                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4166                 return;
4167         }
4168
4169         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4170                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4171
4172         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4173                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4174                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4175                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4176                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4177
4178         lnet_peer_ni_decref_locked(lp);
4179
4180         lnet_net_unlock(cpt);
4181 }
4182
4183 /* Gathering information for userspace. */
4184
4185 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4186                           char aliveness[LNET_MAX_STR_LEN],
4187                           __u32 *cpt_iter, __u32 *refcount,
4188                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4189                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4190                           __u32 *peer_tx_qnob)
4191 {
4192         struct lnet_peer_table          *peer_table;
4193         struct lnet_peer_ni             *lp;
4194         int                             j;
4195         int                             lncpt;
4196         bool                            found = false;
4197
4198         /* get the number of CPTs */
4199         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4200
4201         /* if the cpt number to be examined is >= the number of cpts in
4202          * the system then indicate that there are no more cpts to examin
4203          */
4204         if (*cpt_iter >= lncpt)
4205                 return -ENOENT;
4206
4207         /* get the current table */
4208         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4209         /* if the ptable is NULL then there are no more cpts to examine */
4210         if (peer_table == NULL)
4211                 return -ENOENT;
4212
4213         lnet_net_lock(*cpt_iter);
4214
4215         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4216                 struct list_head *peers = &peer_table->pt_hash[j];
4217
4218                 list_for_each_entry(lp, peers, lpni_hashlist) {
4219                         if (!nid_is_nid4(&lp->lpni_nid))
4220                                 continue;
4221                         if (peer_index-- > 0)
4222                                 continue;
4223
4224                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4225                         if (lnet_isrouter(lp) ||
4226                                 lnet_peer_aliveness_enabled(lp))
4227                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4228                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4229
4230                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4231                         *refcount = kref_read(&lp->lpni_kref);
4232                         *ni_peer_tx_credits =
4233                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4234                         *peer_tx_credits = lp->lpni_txcredits;
4235                         *peer_rtr_credits = lp->lpni_rtrcredits;
4236                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4237                         *peer_tx_qnob = lp->lpni_txqnob;
4238
4239                         found = true;
4240                 }
4241
4242         }
4243         lnet_net_unlock(*cpt_iter);
4244
4245         *cpt_iter = lncpt;
4246
4247         return found ? 0 : -ENOENT;
4248 }
4249
4250 /* ln_api_mutex is held, which keeps the peer list stable */
4251 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4252 {
4253         struct lnet_ioctl_element_stats *lpni_stats;
4254         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4255         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4256         struct lnet_peer_ni_credit_info *lpni_info;
4257         struct lnet_peer_ni *lpni;
4258         struct lnet_peer *lp;
4259         lnet_nid_t nid4;
4260         struct lnet_nid nid;
4261         __u32 size;
4262         int rc;
4263
4264         lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4265         lp = lnet_find_peer(&nid);
4266
4267         if (!lp) {
4268                 rc = -ENOENT;
4269                 goto out;
4270         }
4271
4272         size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4273                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4274         size *= lp->lp_nnis;
4275         if (size > cfg->prcfg_size) {
4276                 cfg->prcfg_size = size;
4277                 rc = -E2BIG;
4278                 goto out_lp_decref;
4279         }
4280
4281         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4282         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4283         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4284         cfg->prcfg_count = lp->lp_nnis;
4285         cfg->prcfg_size = size;
4286         cfg->prcfg_state = lp->lp_state;
4287
4288         /* Allocate helper buffers. */
4289         rc = -ENOMEM;
4290         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4291         if (!lpni_info)
4292                 goto out_lp_decref;
4293         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4294         if (!lpni_stats)
4295                 goto out_free_info;
4296         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4297         if (!lpni_msg_stats)
4298                 goto out_free_stats;
4299         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4300         if (!lpni_hstats)
4301                 goto out_free_msg_stats;
4302
4303
4304         lpni = NULL;
4305         rc = -EFAULT;
4306         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4307                 if (!nid_is_nid4(&lpni->lpni_nid))
4308                         continue;
4309                 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4310                 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4311                         goto out_free_hstats;
4312                 bulk += sizeof(nid4);
4313
4314                 memset(lpni_info, 0, sizeof(*lpni_info));
4315                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4316                 if (lnet_isrouter(lpni) ||
4317                         lnet_peer_aliveness_enabled(lpni))
4318                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4319                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4320
4321                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4322                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4323                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4324                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4325                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4326                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4327                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4328                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4329                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4330                         goto out_free_hstats;
4331                 bulk += sizeof(*lpni_info);
4332
4333                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4334                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4335                                                             LNET_STATS_TYPE_SEND);
4336                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4337                                                             LNET_STATS_TYPE_RECV);
4338                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4339                                                             LNET_STATS_TYPE_DROP);
4340                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4341                         goto out_free_hstats;
4342                 bulk += sizeof(*lpni_stats);
4343                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4344                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4345                         goto out_free_hstats;
4346                 bulk += sizeof(*lpni_msg_stats);
4347                 lpni_hstats->hlpni_network_timeout =
4348                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4349                 lpni_hstats->hlpni_remote_dropped =
4350                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4351                 lpni_hstats->hlpni_remote_timeout =
4352                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4353                 lpni_hstats->hlpni_remote_error =
4354                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4355                 lpni_hstats->hlpni_health_value =
4356                   atomic_read(&lpni->lpni_healthv);
4357                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4358                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4359                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4360                         goto out_free_hstats;
4361                 bulk += sizeof(*lpni_hstats);
4362         }
4363         rc = 0;
4364
4365 out_free_hstats:
4366         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4367 out_free_msg_stats:
4368         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4369 out_free_stats:
4370         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4371 out_free_info:
4372         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4373 out_lp_decref:
4374         lnet_peer_decref_locked(lp);
4375 out:
4376         return rc;
4377 }
4378
4379 /* must hold net_lock/0 */
4380 void
4381 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4382                                      struct list_head *recovery_queue,
4383                                      time64_t now)
4384 {
4385         /* the mt could've shutdown and cleaned up the queues */
4386         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4387                 return;
4388
4389         if (!list_empty(&lpni->lpni_recovery))
4390                 return;
4391
4392         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4393                 return;
4394
4395         if (!lpni->lpni_last_alive) {
4396                 CDEBUG(D_NET,
4397                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4398                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4399                        lpni->lpni_last_alive);
4400                 return;
4401         }
4402
4403         if (lnet_recovery_limit &&
4404             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4405                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4406                        libcfs_nidstr(&lpni->lpni_nid),
4407                        lpni->lpni_last_alive);
4408                 /* Reset the ping count so that if this peer NI is added back to
4409                  * the recovery queue we will send the first ping right away.
4410                  */
4411                 lpni->lpni_ping_count = 0;
4412                 return;
4413         }
4414
4415         /* This peer NI is going on the recovery queue, so take a ref on it */
4416         lnet_peer_ni_addref_locked(lpni);
4417
4418         lnet_peer_ni_set_next_ping(lpni, now);
4419
4420         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4421                libcfs_nidstr(&lpni->lpni_nid),
4422                lpni->lpni_ping_count,
4423                lpni->lpni_next_ping,
4424                lpni->lpni_last_alive,
4425                atomic_read(&lpni->lpni_healthv));
4426
4427         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4428 }
4429
4430 /* Call with the ln_api_mutex held */
4431 void
4432 lnet_peer_ni_set_healthv(lnet_nid_t nid4, int value, bool all)
4433 {
4434         struct lnet_peer_table *ptable;
4435         struct lnet_peer *lp;
4436         struct lnet_peer_net *lpn;
4437         struct lnet_peer_ni *lpni;
4438         struct lnet_nid nid;
4439         int lncpt;
4440         int cpt;
4441         time64_t now;
4442
4443         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4444                 return;
4445
4446         lnet_nid4_to_nid(nid4, &nid);
4447         now = ktime_get_seconds();
4448
4449         if (!all) {
4450                 lnet_net_lock(LNET_LOCK_EX);
4451                 lpni = lnet_peer_ni_find_locked(&nid);
4452                 if (!lpni) {
4453                         lnet_net_unlock(LNET_LOCK_EX);
4454                         return;
4455                 }
4456                 lnet_set_lpni_healthv_locked(lpni, value);
4457                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4458                                              &the_lnet.ln_mt_peerNIRecovq, now);
4459                 lnet_peer_ni_decref_locked(lpni);
4460                 lnet_net_unlock(LNET_LOCK_EX);
4461                 return;
4462         }
4463
4464         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4465
4466         /*
4467          * Walk all the peers and reset the health value for each one to the
4468          * specified value.
4469          */
4470         lnet_net_lock(LNET_LOCK_EX);
4471         for (cpt = 0; cpt < lncpt; cpt++) {
4472                 ptable = the_lnet.ln_peer_tables[cpt];
4473                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4474                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4475                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4476                                                     lpni_peer_nis) {
4477                                         lnet_set_lpni_healthv_locked(lpni,
4478                                                                      value);
4479                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4480                                              &the_lnet.ln_mt_peerNIRecovq, now);
4481                                 }
4482                         }
4483                 }
4484         }
4485         lnet_net_unlock(LNET_LOCK_EX);
4486 }
4487