Whamcloud - gitweb
LU-12678 lnet: convert lpni_refcount to a kref
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/peer.c
33  */
34
35 #define DEBUG_SUBSYSTEM S_LNET
36
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
40 #endif
41 #include <linux/uaccess.h>
42
43 #include <lnet/udsp.h>
44 #include <lnet/lib-lnet.h>
45 #include <uapi/linux/lnet/lnet-dlc.h>
46
47 /* Value indicating that recovery needs to re-check a peer immediately. */
48 #define LNET_REDISCOVER_PEER    (1)
49
50 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
51
52 static void
53 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 {
55         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
56                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
57                 lnet_peer_ni_decref_locked(lpni);
58         }
59 }
60
61 void
62 lnet_peer_net_added(struct lnet_net *net)
63 {
64         struct lnet_peer_ni *lpni, *tmp;
65
66         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
67                                  lpni_on_remote_peer_ni_list) {
68
69                 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
70                         lpni->lpni_net = net;
71
72                         spin_lock(&lpni->lpni_lock);
73                         lpni->lpni_txcredits =
74                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
75                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
76                         lpni->lpni_rtrcredits =
77                                 lnet_peer_buffer_credits(lpni->lpni_net);
78                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
79                         spin_unlock(&lpni->lpni_lock);
80
81                         lnet_peer_remove_from_remote_list(lpni);
82                 }
83         }
84 }
85
86 static void
87 lnet_peer_tables_destroy(void)
88 {
89         struct lnet_peer_table  *ptable;
90         struct list_head        *hash;
91         int                     i;
92         int                     j;
93
94         if (!the_lnet.ln_peer_tables)
95                 return;
96
97         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
98                 hash = ptable->pt_hash;
99                 if (!hash) /* not intialized */
100                         break;
101
102                 LASSERT(list_empty(&ptable->pt_zombie_list));
103
104                 ptable->pt_hash = NULL;
105                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
106                         LASSERT(list_empty(&hash[j]));
107
108                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
109         }
110
111         cfs_percpt_free(the_lnet.ln_peer_tables);
112         the_lnet.ln_peer_tables = NULL;
113 }
114
115 int
116 lnet_peer_tables_create(void)
117 {
118         struct lnet_peer_table  *ptable;
119         struct list_head        *hash;
120         int                     i;
121         int                     j;
122
123         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124                                                    sizeof(*ptable));
125         if (the_lnet.ln_peer_tables == NULL) {
126                 CERROR("Failed to allocate cpu-partition peer tables\n");
127                 return -ENOMEM;
128         }
129
130         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
131                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
132                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
133                 if (hash == NULL) {
134                         CERROR("Failed to create peer hash table\n");
135                         lnet_peer_tables_destroy();
136                         return -ENOMEM;
137                 }
138
139                 spin_lock_init(&ptable->pt_zombie_lock);
140                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141
142                 INIT_LIST_HEAD(&ptable->pt_peer_list);
143
144                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
145                         INIT_LIST_HEAD(&hash[j]);
146                 ptable->pt_hash = hash; /* sign of initialization */
147         }
148
149         return 0;
150 }
151
152 static struct lnet_peer_ni *
153 lnet_peer_ni_alloc(lnet_nid_t nid)
154 {
155         struct lnet_peer_ni *lpni;
156         struct lnet_net *net;
157         int cpt;
158
159         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160
161         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
162         if (!lpni)
163                 return NULL;
164
165         INIT_LIST_HEAD(&lpni->lpni_txq);
166         INIT_LIST_HEAD(&lpni->lpni_hashlist);
167         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
168         INIT_LIST_HEAD(&lpni->lpni_recovery);
169         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
170         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
171         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
172         kref_init(&lpni->lpni_kref);
173         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
174
175         spin_lock_init(&lpni->lpni_lock);
176
177         if (lnet_peers_start_down())
178                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
179         else
180                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
181         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
182         lpni->lpni_nid = nid;
183         lpni->lpni_cpt = cpt;
184         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
185
186         net = lnet_get_net_locked(LNET_NIDNET(nid));
187         lpni->lpni_net = net;
188         if (net) {
189                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
190                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
191                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
192                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
193         } else {
194                 /*
195                  * This peer_ni is not on a local network, so we
196                  * cannot add the credits here. In case the net is
197                  * added later, add the peer_ni to the remote peer ni
198                  * list so it can be easily found and revisited.
199                  */
200                 /* FIXME: per-net implementation instead? */
201                 lnet_peer_ni_addref_locked(lpni);
202                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
203                               &the_lnet.ln_remote_peer_ni_list);
204         }
205
206         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
207
208         return lpni;
209 }
210
211 static struct lnet_peer_net *
212 lnet_peer_net_alloc(__u32 net_id)
213 {
214         struct lnet_peer_net *lpn;
215
216         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
217         if (!lpn)
218                 return NULL;
219
220         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
221         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
222         lpn->lpn_net_id = net_id;
223         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
224
225         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
226
227         return lpn;
228 }
229
230 void
231 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
232 {
233         struct lnet_peer *lp;
234
235         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
236
237         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
238         LASSERT(list_empty(&lpn->lpn_peer_nis));
239         LASSERT(list_empty(&lpn->lpn_peer_nets));
240         lp = lpn->lpn_peer;
241         lpn->lpn_peer = NULL;
242         LIBCFS_FREE(lpn, sizeof(*lpn));
243
244         lnet_peer_decref_locked(lp);
245 }
246
247 static struct lnet_peer *
248 lnet_peer_alloc(lnet_nid_t nid)
249 {
250         struct lnet_peer *lp;
251
252         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
253         if (!lp)
254                 return NULL;
255
256         INIT_LIST_HEAD(&lp->lp_rtrq);
257         INIT_LIST_HEAD(&lp->lp_routes);
258         INIT_LIST_HEAD(&lp->lp_peer_list);
259         INIT_LIST_HEAD(&lp->lp_peer_nets);
260         INIT_LIST_HEAD(&lp->lp_dc_list);
261         INIT_LIST_HEAD(&lp->lp_dc_pendq);
262         INIT_LIST_HEAD(&lp->lp_rtr_list);
263         init_waitqueue_head(&lp->lp_dc_waitq);
264         spin_lock_init(&lp->lp_lock);
265         lp->lp_primary_nid = nid;
266         lp->lp_disc_src_nid = LNET_NID_ANY;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid == LNET_NID_LO_0)
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nid2str(lp->lp_primary_nid),
382                 libcfs_nid2str(lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nid2str(lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
461
462         spin_lock(&peer->lp_lock);
463         peer->lp_state |= LNET_PEER_MARK_DELETED;
464         spin_unlock(&peer->lp_lock);
465
466         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467         while (lpni != NULL) {
468                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469                 rc = lnet_peer_ni_del_locked(lpni, false);
470                 if (rc != 0)
471                         rc2 = rc;
472                 lpni = lpni2;
473         }
474
475         return rc2;
476 }
477
478 /*
479  * Discovering this peer is taking too long. Cancel any Ping or Push
480  * that discovery is waiting on by unlinking the relevant MDs. The
481  * lnet_discovery_event_handler() will proceed from here and complete
482  * the cleanup.
483  */
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
485 {
486         struct lnet_handle_md ping_mdh;
487         struct lnet_handle_md push_mdh;
488
489         LNetInvalidateMDHandle(&ping_mdh);
490         LNetInvalidateMDHandle(&push_mdh);
491
492         spin_lock(&lp->lp_lock);
493         if (lp->lp_state & LNET_PEER_PING_SENT) {
494                 ping_mdh = lp->lp_ping_mdh;
495                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
496         }
497         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498                 push_mdh = lp->lp_push_mdh;
499                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
500         }
501         spin_unlock(&lp->lp_lock);
502
503         if (!LNetMDHandleIsInvalid(ping_mdh))
504                 LNetMDUnlink(ping_mdh);
505         if (!LNetMDHandleIsInvalid(push_mdh))
506                 LNetMDUnlink(push_mdh);
507 }
508
509 static int
510 lnet_peer_del(struct lnet_peer *peer)
511 {
512         lnet_peer_cancel_discovery(peer);
513         lnet_net_lock(LNET_LOCK_EX);
514         lnet_peer_del_locked(peer);
515         lnet_net_unlock(LNET_LOCK_EX);
516
517         return 0;
518 }
519
520 /*
521  * Delete a NID from a peer. Call with ln_api_mutex held.
522  *
523  * Error codes:
524  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
525  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
526  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
527  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
528  */
529 static int
530 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
531 {
532         struct lnet_peer_ni *lpni;
533         lnet_nid_t primary_nid = lp->lp_primary_nid;
534         int rc = 0;
535         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
536
537         if (!(flags & LNET_PEER_CONFIGURED)) {
538                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
539                         rc = -EPERM;
540                         goto out;
541                 }
542         }
543         lpni = lnet_find_peer_ni_locked(nid);
544         if (!lpni) {
545                 rc = -ENOENT;
546                 goto out;
547         }
548         lnet_peer_ni_decref_locked(lpni);
549         if (lp != lpni->lpni_peer_net->lpn_peer) {
550                 rc = -ECHILD;
551                 goto out;
552         }
553
554         /*
555          * This function only allows deletion of the primary NID if it
556          * is the only NID.
557          */
558         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
559                 rc = -EBUSY;
560                 goto out;
561         }
562
563         lnet_net_lock(LNET_LOCK_EX);
564
565         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
566                 struct lnet_peer_ni *lpni2;
567                 /* assign the next peer_ni to be the primary */
568                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
569                 LASSERT(lpni2);
570                 lp->lp_primary_nid = lpni2->lpni_nid;
571         }
572         rc = lnet_peer_ni_del_locked(lpni, force);
573
574         lnet_net_unlock(LNET_LOCK_EX);
575
576 out:
577         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
578                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
579
580         return rc;
581 }
582
583 static void
584 lnet_peer_table_cleanup_locked(struct lnet_net *net,
585                                struct lnet_peer_table *ptable)
586 {
587         int                      i;
588         struct lnet_peer_ni     *next;
589         struct lnet_peer_ni     *lpni;
590         struct lnet_peer        *peer;
591
592         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
593                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
594                                          lpni_hashlist) {
595                         if (net != NULL && net != lpni->lpni_net)
596                                 continue;
597
598                         peer = lpni->lpni_peer_net->lpn_peer;
599                         if (peer->lp_primary_nid != lpni->lpni_nid) {
600                                 lnet_peer_ni_del_locked(lpni, false);
601                                 continue;
602                         }
603                         /*
604                          * Removing the primary NID implies removing
605                          * the entire peer. Advance next beyond any
606                          * peer_ni that belongs to the same peer.
607                          */
608                         list_for_each_entry_from(next, &ptable->pt_hash[i],
609                                                  lpni_hashlist) {
610                                 if (next->lpni_peer_net->lpn_peer != peer)
611                                         break;
612                         }
613                         lnet_peer_del_locked(peer);
614                 }
615         }
616 }
617
618 static void
619 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
620 {
621         wait_var_event_warning(&ptable->pt_zombies,
622                                ptable->pt_zombies == 0,
623                                "Waiting for %d zombies on peer table\n",
624                                ptable->pt_zombies);
625 }
626
627 static void
628 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
629                                 struct lnet_peer_table *ptable)
630 {
631         struct lnet_peer_ni     *lp;
632         struct lnet_peer_ni     *tmp;
633         lnet_nid_t              gw_nid;
634         int                     i;
635
636         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
637                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
638                                          lpni_hashlist) {
639                         if (net != lp->lpni_net)
640                                 continue;
641
642                         if (!lnet_isrouter(lp))
643                                 continue;
644
645                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
646
647                         lnet_net_unlock(LNET_LOCK_EX);
648                         lnet_del_route(LNET_NET_ANY, gw_nid);
649                         lnet_net_lock(LNET_LOCK_EX);
650                 }
651         }
652 }
653
654 void
655 lnet_peer_tables_cleanup(struct lnet_net *net)
656 {
657         int i;
658         struct lnet_peer_table *ptable;
659
660         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
661         /* If just deleting the peers for a NI, get rid of any routes these
662          * peers are gateways for. */
663         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
664                 lnet_net_lock(LNET_LOCK_EX);
665                 lnet_peer_table_del_rtrs_locked(net, ptable);
666                 lnet_net_unlock(LNET_LOCK_EX);
667         }
668
669         /* Start the cleanup process */
670         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
671                 lnet_net_lock(LNET_LOCK_EX);
672                 lnet_peer_table_cleanup_locked(net, ptable);
673                 lnet_net_unlock(LNET_LOCK_EX);
674         }
675
676         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
677                 lnet_peer_ni_finalize_wait(ptable);
678 }
679
680 static struct lnet_peer_ni *
681 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
682 {
683         struct list_head        *peers;
684         struct lnet_peer_ni     *lp;
685
686         if (the_lnet.ln_state != LNET_STATE_RUNNING)
687                 return NULL;
688
689         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
690         list_for_each_entry(lp, peers, lpni_hashlist) {
691                 if (lp->lpni_nid == nid) {
692                         lnet_peer_ni_addref_locked(lp);
693                         return lp;
694                 }
695         }
696
697         return NULL;
698 }
699
700 struct lnet_peer_ni *
701 lnet_find_peer_ni_locked(lnet_nid_t nid)
702 {
703         struct lnet_peer_ni *lpni;
704         struct lnet_peer_table *ptable;
705         int cpt;
706
707         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
708
709         ptable = the_lnet.ln_peer_tables[cpt];
710         lpni = lnet_get_peer_ni_locked(ptable, nid);
711
712         return lpni;
713 }
714
715 struct lnet_peer_ni *
716 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
717 {
718         struct lnet_peer_net *lpn;
719         struct lnet_peer_ni *lpni;
720
721         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
722         if (!lpn)
723                 return NULL;
724
725         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
726                 if (lpni->lpni_nid == nid)
727                         return lpni;
728         }
729
730         return NULL;
731 }
732
733 struct lnet_peer *
734 lnet_find_peer(lnet_nid_t nid)
735 {
736         struct lnet_peer_ni *lpni;
737         struct lnet_peer *lp = NULL;
738         int cpt;
739
740         cpt = lnet_net_lock_current();
741         lpni = lnet_find_peer_ni_locked(nid);
742         if (lpni) {
743                 lp = lpni->lpni_peer_net->lpn_peer;
744                 lnet_peer_addref_locked(lp);
745                 lnet_peer_ni_decref_locked(lpni);
746         }
747         lnet_net_unlock(cpt);
748
749         return lp;
750 }
751
752 struct lnet_peer_net *
753 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
754 {
755         struct lnet_peer_net *net;
756
757         if (!prev_lpn_id) {
758                 /* no net id provided return the first net */
759                 net = list_first_entry_or_null(&lp->lp_peer_nets,
760                                                struct lnet_peer_net,
761                                                lpn_peer_nets);
762
763                 return net;
764         }
765
766         /* find the net after the one provided */
767         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
768                 if (net->lpn_net_id == prev_lpn_id) {
769                         /*
770                          * if we reached the end of the list loop to the
771                          * beginning.
772                          */
773                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
774                                 return list_first_entry_or_null(&lp->lp_peer_nets,
775                                                                 struct lnet_peer_net,
776                                                                 lpn_peer_nets);
777                         else
778                                 return list_next_entry(net, lpn_peer_nets);
779                 }
780         }
781
782         return NULL;
783 }
784
785 struct lnet_peer_ni *
786 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
787                              struct lnet_peer_net *peer_net,
788                              struct lnet_peer_ni *prev)
789 {
790         struct lnet_peer_ni *lpni;
791         struct lnet_peer_net *net = peer_net;
792
793         if (!prev) {
794                 if (!net) {
795                         if (list_empty(&peer->lp_peer_nets))
796                                 return NULL;
797
798                         net = list_entry(peer->lp_peer_nets.next,
799                                          struct lnet_peer_net,
800                                          lpn_peer_nets);
801                 }
802                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
803                                   lpni_peer_nis);
804
805                 return lpni;
806         }
807
808         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
809                 /*
810                  * if you reached the end of the peer ni list and the peer
811                  * net is specified then there are no more peer nis in that
812                  * net.
813                  */
814                 if (net)
815                         return NULL;
816
817                 /*
818                  * we reached the end of this net ni list. move to the
819                  * next net
820                  */
821                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
822                     &peer->lp_peer_nets)
823                         /* no more nets and no more NIs. */
824                         return NULL;
825
826                 /* get the next net */
827                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
828                                  struct lnet_peer_net,
829                                  lpn_peer_nets);
830                 /* get the ni on it */
831                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
832                                   lpni_peer_nis);
833
834                 return lpni;
835         }
836
837         /* there are more nis left */
838         lpni = list_entry(prev->lpni_peer_nis.next,
839                           struct lnet_peer_ni, lpni_peer_nis);
840
841         return lpni;
842 }
843
844 /* Call with the ln_api_mutex held */
845 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
846 {
847         struct lnet_process_id id;
848         struct lnet_peer_table *ptable;
849         struct lnet_peer *lp;
850         __u32 count = 0;
851         __u32 size = 0;
852         int lncpt;
853         int cpt;
854         __u32 i;
855         int rc;
856
857         rc = -ESHUTDOWN;
858         if (the_lnet.ln_state != LNET_STATE_RUNNING)
859                 goto done;
860
861         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
862
863         /*
864          * Count the number of peers, and return E2BIG if the buffer
865          * is too small. We'll also return the desired size.
866          */
867         rc = -E2BIG;
868         for (cpt = 0; cpt < lncpt; cpt++) {
869                 ptable = the_lnet.ln_peer_tables[cpt];
870                 count += ptable->pt_peers;
871         }
872         size = count * sizeof(*ids);
873         if (size > *sizep)
874                 goto done;
875
876         /*
877          * Walk the peer lists and copy out the primary nids.
878          * This is safe because the peer lists are only modified
879          * while the ln_api_mutex is held. So we don't need to
880          * hold the lnet_net_lock as well, and can therefore
881          * directly call copy_to_user().
882          */
883         rc = -EFAULT;
884         memset(&id, 0, sizeof(id));
885         id.pid = LNET_PID_LUSTRE;
886         i = 0;
887         for (cpt = 0; cpt < lncpt; cpt++) {
888                 ptable = the_lnet.ln_peer_tables[cpt];
889                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
890                         if (i >= count)
891                                 goto done;
892                         id.nid = lp->lp_primary_nid;
893                         if (copy_to_user(&ids[i], &id, sizeof(id)))
894                                 goto done;
895                         i++;
896                 }
897         }
898         rc = 0;
899 done:
900         *countp = count;
901         *sizep = size;
902         return rc;
903 }
904
905 /*
906  * Start pushes to peers that need to be updated for a configuration
907  * change on this node.
908  */
909 void
910 lnet_push_update_to_peers(int force)
911 {
912         struct lnet_peer_table *ptable;
913         struct lnet_peer *lp;
914         int lncpt;
915         int cpt;
916
917         lnet_net_lock(LNET_LOCK_EX);
918         if (lnet_peer_discovery_disabled)
919                 force = 0;
920         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
921         for (cpt = 0; cpt < lncpt; cpt++) {
922                 ptable = the_lnet.ln_peer_tables[cpt];
923                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
924                         if (force) {
925                                 spin_lock(&lp->lp_lock);
926                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
927                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
928                                 spin_unlock(&lp->lp_lock);
929                         }
930                         if (lnet_peer_needs_push(lp))
931                                 lnet_peer_queue_for_discovery(lp);
932                 }
933         }
934         lnet_net_unlock(LNET_LOCK_EX);
935         wake_up(&the_lnet.ln_dc_waitq);
936 }
937
938 /* find the NID in the preferred gateways for the remote peer
939  * return:
940  *      false: list is not empty and NID is not preferred
941  *      false: list is empty
942  *      true: nid is found in the list
943  */
944 bool
945 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
946                              lnet_nid_t gw_nid)
947 {
948         struct lnet_nid_list *ne;
949
950         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
951                libcfs_nid2str(lpni->lpni_nid),
952                list_empty(&lpni->lpni_rtr_pref_nids));
953
954         if (list_empty(&lpni->lpni_rtr_pref_nids))
955                 return false;
956
957         /* iterate through all the preferred NIDs and see if any of them
958          * matches the provided gw_nid
959          */
960         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
961                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
962                        libcfs_nid2str(ne->nl_nid),
963                        libcfs_nid2str(gw_nid));
964                 if (ne->nl_nid == gw_nid)
965                         return true;
966         }
967
968         return false;
969 }
970
971 void
972 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
973 {
974         struct list_head zombies;
975         struct lnet_nid_list *ne;
976         struct lnet_nid_list *tmp;
977         int cpt = lpni->lpni_cpt;
978
979         INIT_LIST_HEAD(&zombies);
980
981         lnet_net_lock(cpt);
982         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
983         lnet_net_unlock(cpt);
984
985         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
986                 list_del(&ne->nl_list);
987                 LIBCFS_FREE(ne, sizeof(*ne));
988         }
989 }
990
991 int
992 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
993                        lnet_nid_t gw_nid)
994 {
995         int cpt = lpni->lpni_cpt;
996         struct lnet_nid_list *ne = NULL;
997
998         /* This function is called with api_mutex held. When the api_mutex
999          * is held the list can not be modified, as it is only modified as
1000          * a result of applying a UDSP and that happens under api_mutex
1001          * lock.
1002          */
1003         __must_hold(&the_lnet.ln_api_mutex);
1004
1005         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1006                 if (ne->nl_nid == gw_nid)
1007                         return -EEXIST;
1008         }
1009
1010         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1011         if (!ne)
1012                 return -ENOMEM;
1013
1014         ne->nl_nid = gw_nid;
1015
1016         /* Lock the cpt to protect against addition and checks in the
1017          * selection algorithm
1018          */
1019         lnet_net_lock(cpt);
1020         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1021         lnet_net_unlock(cpt);
1022
1023         return 0;
1024 }
1025
1026 /*
1027  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1028  * this is a preferred point-to-point path. Call with lnet_net_lock in
1029  * shared mmode.
1030  */
1031 bool
1032 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1033 {
1034         struct lnet_nid_list *ne;
1035
1036         if (lpni->lpni_pref_nnids == 0)
1037                 return false;
1038         if (lpni->lpni_pref_nnids == 1)
1039                 return lpni->lpni_pref.nid == nid;
1040         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1041                 if (ne->nl_nid == nid)
1042                         return true;
1043         }
1044         return false;
1045 }
1046
1047 /*
1048  * Set a single ni as preferred, provided no preferred ni is already
1049  * defined. Only to be used for non-multi-rail peer_ni.
1050  */
1051 int
1052 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1053 {
1054         int rc = 0;
1055
1056         spin_lock(&lpni->lpni_lock);
1057         if (nid == LNET_NID_ANY) {
1058                 rc = -EINVAL;
1059         } else if (lpni->lpni_pref_nnids > 0) {
1060                 rc = -EPERM;
1061         } else if (lpni->lpni_pref_nnids == 0) {
1062                 lpni->lpni_pref.nid = nid;
1063                 lpni->lpni_pref_nnids = 1;
1064                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1065         }
1066         spin_unlock(&lpni->lpni_lock);
1067
1068         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1069                libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1070         return rc;
1071 }
1072
1073 /*
1074  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1075  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1076  */
1077 int
1078 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1079 {
1080         int rc = 0;
1081
1082         spin_lock(&lpni->lpni_lock);
1083         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1084                 lpni->lpni_pref_nnids = 0;
1085                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1086         } else if (lpni->lpni_pref_nnids == 0) {
1087                 rc = -ENOENT;
1088         } else {
1089                 rc = -EPERM;
1090         }
1091         spin_unlock(&lpni->lpni_lock);
1092
1093         CDEBUG(D_NET, "peer %s: %d\n",
1094                libcfs_nid2str(lpni->lpni_nid), rc);
1095         return rc;
1096 }
1097
1098 void
1099 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1100 {
1101         lpni->lpni_sel_priority = priority;
1102 }
1103
1104 /*
1105  * Clear the preferred NIDs from a non-multi-rail peer.
1106  */
1107 void
1108 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1109 {
1110         struct lnet_peer_ni *lpni = NULL;
1111
1112         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1113                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1114 }
1115
1116 int
1117 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1118 {
1119         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1120         struct lnet_nid_list *ne1 = NULL;
1121         struct lnet_nid_list *ne2 = NULL;
1122         lnet_nid_t tmp_nid = LNET_NID_ANY;
1123         int rc = 0;
1124
1125         if (nid == LNET_NID_ANY) {
1126                 rc = -EINVAL;
1127                 goto out;
1128         }
1129
1130         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1131                 rc = -EEXIST;
1132                 goto out;
1133         }
1134
1135         /* A non-MR node may have only one preferred NI per peer_ni */
1136         if (lpni->lpni_pref_nnids > 0 &&
1137             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1138                 rc = -EPERM;
1139                 goto out;
1140         }
1141
1142         /* add the new preferred nid to the list of preferred nids */
1143         if (lpni->lpni_pref_nnids != 0) {
1144                 size_t alloc_size = sizeof(*ne1);
1145
1146                 if (lpni->lpni_pref_nnids == 1) {
1147                         tmp_nid = lpni->lpni_pref.nid;
1148                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1149                 }
1150
1151                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1152                         if (ne1->nl_nid == nid) {
1153                                 rc = -EEXIST;
1154                                 goto out;
1155                         }
1156                 }
1157
1158                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1159                                  alloc_size);
1160                 if (!ne1) {
1161                         rc = -ENOMEM;
1162                         goto out;
1163                 }
1164
1165                 /* move the originally stored nid to the list */
1166                 if (lpni->lpni_pref_nnids == 1) {
1167                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1168                                 lpni->lpni_cpt, alloc_size);
1169                         if (!ne2) {
1170                                 rc = -ENOMEM;
1171                                 goto out;
1172                         }
1173                         INIT_LIST_HEAD(&ne2->nl_list);
1174                         ne2->nl_nid = tmp_nid;
1175                 }
1176                 ne1->nl_nid = nid;
1177         }
1178
1179         lnet_net_lock(LNET_LOCK_EX);
1180         spin_lock(&lpni->lpni_lock);
1181         if (lpni->lpni_pref_nnids == 0) {
1182                 lpni->lpni_pref.nid = nid;
1183         } else {
1184                 if (ne2)
1185                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1186                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1187         }
1188         lpni->lpni_pref_nnids++;
1189         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1190         spin_unlock(&lpni->lpni_lock);
1191         lnet_net_unlock(LNET_LOCK_EX);
1192
1193 out:
1194         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1195                 spin_lock(&lpni->lpni_lock);
1196                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1197                 spin_unlock(&lpni->lpni_lock);
1198         }
1199         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1200                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1201         return rc;
1202 }
1203
1204 int
1205 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1206 {
1207         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1208         struct lnet_nid_list *ne = NULL;
1209         int rc = 0;
1210
1211         if (lpni->lpni_pref_nnids == 0) {
1212                 rc = -ENOENT;
1213                 goto out;
1214         }
1215
1216         if (lpni->lpni_pref_nnids == 1) {
1217                 if (lpni->lpni_pref.nid != nid) {
1218                         rc = -ENOENT;
1219                         goto out;
1220                 }
1221         } else {
1222                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1223                         if (ne->nl_nid == nid)
1224                                 goto remove_nid_entry;
1225                 }
1226                 rc = -ENOENT;
1227                 ne = NULL;
1228                 goto out;
1229         }
1230
1231 remove_nid_entry:
1232         lnet_net_lock(LNET_LOCK_EX);
1233         spin_lock(&lpni->lpni_lock);
1234         if (lpni->lpni_pref_nnids == 1)
1235                 lpni->lpni_pref.nid = LNET_NID_ANY;
1236         else {
1237                 list_del_init(&ne->nl_list);
1238                 if (lpni->lpni_pref_nnids == 2) {
1239                         struct lnet_nid_list *ne, *tmp;
1240
1241                         list_for_each_entry_safe(ne, tmp,
1242                                                  &lpni->lpni_pref.nids,
1243                                                  nl_list) {
1244                                 lpni->lpni_pref.nid = ne->nl_nid;
1245                                 list_del_init(&ne->nl_list);
1246                                 LIBCFS_FREE(ne, sizeof(*ne));
1247                         }
1248                 }
1249         }
1250         lpni->lpni_pref_nnids--;
1251         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1252         spin_unlock(&lpni->lpni_lock);
1253         lnet_net_unlock(LNET_LOCK_EX);
1254
1255         if (ne)
1256                 LIBCFS_FREE(ne, sizeof(*ne));
1257 out:
1258         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1259                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1260         return rc;
1261 }
1262
1263 void
1264 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1265 {
1266         struct list_head zombies;
1267         struct lnet_nid_list *ne;
1268         struct lnet_nid_list *tmp;
1269
1270         INIT_LIST_HEAD(&zombies);
1271
1272         lnet_net_lock(LNET_LOCK_EX);
1273         if (lpni->lpni_pref_nnids == 1)
1274                 lpni->lpni_pref.nid = LNET_NID_ANY;
1275         else if (lpni->lpni_pref_nnids > 1)
1276                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1277         lpni->lpni_pref_nnids = 0;
1278         lnet_net_unlock(LNET_LOCK_EX);
1279
1280         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1281                 list_del_init(&ne->nl_list);
1282                 LIBCFS_FREE(ne, sizeof(*ne));
1283         }
1284 }
1285
1286 lnet_nid_t
1287 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1288 {
1289         struct lnet_peer_ni *lpni;
1290         lnet_nid_t primary_nid = nid;
1291
1292         lpni = lnet_find_peer_ni_locked(nid);
1293         if (lpni) {
1294                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1295                 lnet_peer_ni_decref_locked(lpni);
1296         }
1297
1298         return primary_nid;
1299 }
1300
1301 bool
1302 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1303 __must_hold(&lp->lp_lock)
1304 {
1305         if (lnet_peer_discovery_disabled)
1306                 return true;
1307
1308         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1309             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1310                 return true;
1311         }
1312
1313         return false;
1314 }
1315
1316 /*
1317  * Peer Discovery
1318  */
1319 bool
1320 lnet_is_discovery_disabled(struct lnet_peer *lp)
1321 {
1322         bool rc = false;
1323
1324         spin_lock(&lp->lp_lock);
1325         rc = lnet_is_discovery_disabled_locked(lp);
1326         spin_unlock(&lp->lp_lock);
1327
1328         return rc;
1329 }
1330
1331 lnet_nid_t
1332 LNetPrimaryNID(lnet_nid_t nid)
1333 {
1334         struct lnet_peer *lp;
1335         struct lnet_peer_ni *lpni;
1336         lnet_nid_t primary_nid = nid;
1337         int rc = 0;
1338         int cpt;
1339
1340         if (nid == LNET_NID_LO_0)
1341                 return LNET_NID_LO_0;
1342
1343         cpt = lnet_net_lock_current();
1344         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1345         if (IS_ERR(lpni)) {
1346                 rc = PTR_ERR(lpni);
1347                 goto out_unlock;
1348         }
1349         lp = lpni->lpni_peer_net->lpn_peer;
1350
1351         while (!lnet_peer_is_uptodate(lp)) {
1352                 spin_lock(&lp->lp_lock);
1353                 /* force a full discovery cycle */
1354                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1355                 spin_unlock(&lp->lp_lock);
1356
1357                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1358                 if (rc)
1359                         goto out_decref;
1360                 /* The lpni (or lp) for this NID may have changed and our ref is
1361                  * the only thing keeping the old one around. Release the ref
1362                  * and lookup the lpni again
1363                  */
1364                 lnet_peer_ni_decref_locked(lpni);
1365                 lpni = lnet_find_peer_ni_locked(nid);
1366                 if (!lpni) {
1367                         rc = -ENOENT;
1368                         goto out_unlock;
1369                 }
1370                 lp = lpni->lpni_peer_net->lpn_peer;
1371
1372                 /* Only try once if discovery is disabled */
1373                 if (lnet_is_discovery_disabled(lp))
1374                         break;
1375         }
1376         primary_nid = lp->lp_primary_nid;
1377 out_decref:
1378         lnet_peer_ni_decref_locked(lpni);
1379 out_unlock:
1380         lnet_net_unlock(cpt);
1381
1382         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1383                libcfs_nid2str(primary_nid), rc);
1384         return primary_nid;
1385 }
1386 EXPORT_SYMBOL(LNetPrimaryNID);
1387
1388 struct lnet_peer_net *
1389 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1390 {
1391         struct lnet_peer_net *peer_net;
1392         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1393                 if (peer_net->lpn_net_id == net_id)
1394                         return peer_net;
1395         }
1396         return NULL;
1397 }
1398
1399 /*
1400  * Attach a peer_ni to a peer_net and peer. This function assumes
1401  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1402  * may be attached to a different peer, in which case it will be
1403  * properly detached first. The whole operation is done atomically.
1404  *
1405  * This function consumes the reference on lpni and Always returns 0.
1406  * This is the last function called from functions that do return an
1407  * int, so returning 0 here allows the compiler to do a tail call.
1408  */
1409 static int
1410 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1411                                 struct lnet_peer_net *lpn,
1412                                 struct lnet_peer_ni *lpni,
1413                                 unsigned flags)
1414 {
1415         struct lnet_peer_table *ptable;
1416         bool new_lpn = false;
1417         int rc;
1418
1419         /* Install the new peer_ni */
1420         lnet_net_lock(LNET_LOCK_EX);
1421         /* Add peer_ni to global peer table hash, if necessary. */
1422         if (list_empty(&lpni->lpni_hashlist)) {
1423                 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1424
1425                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1426                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1427                 ptable->pt_version++;
1428                 lnet_peer_ni_addref_locked(lpni);
1429         }
1430
1431         /* Detach the peer_ni from an existing peer, if necessary. */
1432         if (lpni->lpni_peer_net) {
1433                 LASSERT(lpni->lpni_peer_net != lpn);
1434                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1435                 lnet_peer_detach_peer_ni_locked(lpni);
1436                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1437                 lpni->lpni_peer_net = NULL;
1438         }
1439
1440         /* Add peer_ni to peer_net */
1441         lpni->lpni_peer_net = lpn;
1442         list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1443         lnet_update_peer_net_healthv(lpni);
1444         lnet_peer_net_addref_locked(lpn);
1445
1446         /* Add peer_net to peer */
1447         if (!lpn->lpn_peer) {
1448                 new_lpn = true;
1449                 lpn->lpn_peer = lp;
1450                 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1451                 lnet_peer_addref_locked(lp);
1452         }
1453
1454         /* Add peer to global peer list, if necessary */
1455         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1456         if (list_empty(&lp->lp_peer_list)) {
1457                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1458                 ptable->pt_peers++;
1459         }
1460
1461
1462         /* Update peer state */
1463         spin_lock(&lp->lp_lock);
1464         if (flags & LNET_PEER_CONFIGURED) {
1465                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1466                         lp->lp_state |= LNET_PEER_CONFIGURED;
1467         }
1468         if (flags & LNET_PEER_MULTI_RAIL) {
1469                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1470                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1471                         lnet_peer_clr_non_mr_pref_nids(lp);
1472                 }
1473         }
1474         spin_unlock(&lp->lp_lock);
1475
1476         lp->lp_nnis++;
1477
1478         /* apply UDSPs */
1479         if (new_lpn) {
1480                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1481                 if (rc)
1482                         CERROR("Failed to apply UDSPs on lpn %s\n",
1483                                libcfs_net2str(lpn->lpn_net_id));
1484         }
1485         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1486         if (rc)
1487                 CERROR("Failed to apply UDSPs on lpni %s\n",
1488                        libcfs_nid2str(lpni->lpni_nid));
1489
1490         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1491                libcfs_nid2str(lp->lp_primary_nid),
1492                libcfs_nid2str(lpni->lpni_nid), flags);
1493         lnet_peer_ni_decref_locked(lpni);
1494         lnet_net_unlock(LNET_LOCK_EX);
1495
1496         return 0;
1497 }
1498
1499 /*
1500  * Create a new peer, with nid as its primary nid.
1501  *
1502  * Call with the lnet_api_mutex held.
1503  */
1504 static int
1505 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1506 {
1507         struct lnet_peer *lp;
1508         struct lnet_peer_net *lpn;
1509         struct lnet_peer_ni *lpni;
1510         int rc = 0;
1511
1512         LASSERT(nid != LNET_NID_ANY);
1513
1514         /*
1515          * No need for the lnet_net_lock here, because the
1516          * lnet_api_mutex is held.
1517          */
1518         lpni = lnet_find_peer_ni_locked(nid);
1519         if (lpni) {
1520                 /* A peer with this NID already exists. */
1521                 lp = lpni->lpni_peer_net->lpn_peer;
1522                 lnet_peer_ni_decref_locked(lpni);
1523                 /*
1524                  * This is an error if the peer was configured and the
1525                  * primary NID differs or an attempt is made to change
1526                  * the Multi-Rail flag. Otherwise the assumption is
1527                  * that an existing peer is being modified.
1528                  */
1529                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1530                         if (lp->lp_primary_nid != nid)
1531                                 rc = -EEXIST;
1532                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1533                                 rc = -EPERM;
1534                         goto out;
1535                 }
1536                 /* Delete and recreate as a configured peer. */
1537                 lnet_peer_del(lp);
1538         }
1539
1540         /* Create peer, peer_net, and peer_ni. */
1541         rc = -ENOMEM;
1542         lp = lnet_peer_alloc(nid);
1543         if (!lp)
1544                 goto out;
1545         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1546         if (!lpn)
1547                 goto out_free_lp;
1548         lpni = lnet_peer_ni_alloc(nid);
1549         if (!lpni)
1550                 goto out_free_lpn;
1551
1552         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1553
1554 out_free_lpn:
1555         LIBCFS_FREE(lpn, sizeof(*lpn));
1556 out_free_lp:
1557         LIBCFS_FREE(lp, sizeof(*lp));
1558 out:
1559         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1560                libcfs_nid2str(nid), flags, rc);
1561         return rc;
1562 }
1563
1564 /*
1565  * Add a NID to a peer. Call with ln_api_mutex held.
1566  *
1567  * Error codes:
1568  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1569  *  -EEXIST:   The NID was configured by DLC for a different peer.
1570  *  -ENOMEM:   Out of memory.
1571  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1572  *             non-multi-rail peer.
1573  */
1574 static int
1575 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1576 {
1577         struct lnet_peer_net *lpn;
1578         struct lnet_peer_ni *lpni;
1579         int rc = 0;
1580
1581         LASSERT(lp);
1582         LASSERT(nid != LNET_NID_ANY);
1583
1584         /* A configured peer can only be updated through configuration. */
1585         if (!(flags & LNET_PEER_CONFIGURED)) {
1586                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1587                         rc = -EPERM;
1588                         goto out;
1589                 }
1590         }
1591
1592         /*
1593          * The MULTI_RAIL flag can be set but not cleared, because
1594          * that would leave the peer struct in an invalid state.
1595          */
1596         if (flags & LNET_PEER_MULTI_RAIL) {
1597                 spin_lock(&lp->lp_lock);
1598                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1599                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1600                         lnet_peer_clr_non_mr_pref_nids(lp);
1601                 }
1602                 spin_unlock(&lp->lp_lock);
1603         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1604                 rc = -EPERM;
1605                 goto out;
1606         }
1607
1608         lpni = lnet_find_peer_ni_locked(nid);
1609         if (lpni) {
1610                 /*
1611                  * A peer_ni already exists. This is only a problem if
1612                  * it is not connected to this peer and was configured
1613                  * by DLC.
1614                  */
1615                 if (lpni->lpni_peer_net->lpn_peer == lp)
1616                         goto out_free_lpni;
1617                 if (lnet_peer_ni_is_configured(lpni)) {
1618                         rc = -EEXIST;
1619                         goto out_free_lpni;
1620                 }
1621                 /* If this is the primary NID, destroy the peer. */
1622                 if (lnet_peer_ni_is_primary(lpni)) {
1623                         struct lnet_peer *rtr_lp =
1624                                 lpni->lpni_peer_net->lpn_peer;
1625                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1626                         /*
1627                          * if we're trying to delete a router it means
1628                          * we're moving this peer NI to a new peer so must
1629                          * transfer router properties to the new peer
1630                          */
1631                         if (rtr_refcount > 0) {
1632                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1633                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1634                         }
1635                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1636                         lnet_peer_ni_decref_locked(lpni);
1637                         lpni = lnet_peer_ni_alloc(nid);
1638                         if (!lpni) {
1639                                 rc = -ENOMEM;
1640                                 goto out_free_lpni;
1641                         }
1642                 }
1643         } else {
1644                 lpni = lnet_peer_ni_alloc(nid);
1645                 if (!lpni) {
1646                         rc = -ENOMEM;
1647                         goto out_free_lpni;
1648                 }
1649         }
1650
1651         /*
1652          * Get the peer_net. Check that we're not adding a second
1653          * peer_ni on a peer_net of a non-multi-rail peer.
1654          */
1655         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1656         if (!lpn) {
1657                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1658                 if (!lpn) {
1659                         rc = -ENOMEM;
1660                         goto out_free_lpni;
1661                 }
1662         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1663                 rc = -ENOTUNIQ;
1664                 goto out_free_lpni;
1665         }
1666
1667         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1668
1669 out_free_lpni:
1670         lnet_peer_ni_decref_locked(lpni);
1671 out:
1672         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1673                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1674                flags, rc);
1675         return rc;
1676 }
1677
1678 /*
1679  * Update the primary NID of a peer, if possible.
1680  *
1681  * Call with the lnet_api_mutex held.
1682  */
1683 static int
1684 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1685 {
1686         lnet_nid_t old = lp->lp_primary_nid;
1687         int rc = 0;
1688
1689         if (lp->lp_primary_nid == nid)
1690                 goto out;
1691         rc = lnet_peer_add_nid(lp, nid, flags);
1692         if (rc)
1693                 goto out;
1694         lp->lp_primary_nid = nid;
1695 out:
1696         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1697                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1698         return rc;
1699 }
1700
1701 /*
1702  * lpni creation initiated due to traffic either sending or receiving.
1703  */
1704 static int
1705 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1706 {
1707         struct lnet_peer *lp;
1708         struct lnet_peer_net *lpn;
1709         struct lnet_peer_ni *lpni;
1710         unsigned flags = 0;
1711         int rc = 0;
1712
1713         if (nid == LNET_NID_ANY) {
1714                 rc = -EINVAL;
1715                 goto out;
1716         }
1717
1718         /* lnet_net_lock is not needed here because ln_api_lock is held */
1719         lpni = lnet_find_peer_ni_locked(nid);
1720         if (lpni) {
1721                 /*
1722                  * We must have raced with another thread. Since we
1723                  * know next to nothing about a peer_ni created by
1724                  * traffic, we just assume everything is ok and
1725                  * return.
1726                  */
1727                 lnet_peer_ni_decref_locked(lpni);
1728                 goto out;
1729         }
1730
1731         /* Create peer, peer_net, and peer_ni. */
1732         rc = -ENOMEM;
1733         lp = lnet_peer_alloc(nid);
1734         if (!lp)
1735                 goto out;
1736         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1737         if (!lpn)
1738                 goto out_free_lp;
1739         lpni = lnet_peer_ni_alloc(nid);
1740         if (!lpni)
1741                 goto out_free_lpn;
1742         if (pref != LNET_NID_ANY)
1743                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1744
1745         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1746
1747 out_free_lpn:
1748         LIBCFS_FREE(lpn, sizeof(*lpn));
1749 out_free_lp:
1750         LIBCFS_FREE(lp, sizeof(*lp));
1751 out:
1752         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1753         return rc;
1754 }
1755
1756 /*
1757  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1758  *
1759  * This API handles the following combinations:
1760  *   Create a peer with its primary NI if only the prim_nid is provided
1761  *   Add a NID to a peer identified by the prim_nid. The peer identified
1762  *   by the prim_nid must already exist.
1763  *   The peer being created may be non-MR.
1764  *
1765  * The caller must hold ln_api_mutex. This prevents the peer from
1766  * being created/modified/deleted by a different thread.
1767  */
1768 int
1769 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1770 {
1771         struct lnet_peer *lp = NULL;
1772         struct lnet_peer_ni *lpni;
1773         unsigned flags;
1774
1775         /* The prim_nid must always be specified */
1776         if (prim_nid == LNET_NID_ANY)
1777                 return -EINVAL;
1778
1779         flags = LNET_PEER_CONFIGURED;
1780         if (mr)
1781                 flags |= LNET_PEER_MULTI_RAIL;
1782
1783         /*
1784          * If nid isn't specified, we must create a new peer with
1785          * prim_nid as its primary nid.
1786          */
1787         if (nid == LNET_NID_ANY)
1788                 return lnet_peer_add(prim_nid, flags);
1789
1790         /* Look up the prim_nid, which must exist. */
1791         lpni = lnet_find_peer_ni_locked(prim_nid);
1792         if (!lpni)
1793                 return -ENOENT;
1794         lnet_peer_ni_decref_locked(lpni);
1795         lp = lpni->lpni_peer_net->lpn_peer;
1796
1797         /* Peer must have been configured. */
1798         if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1799                 CDEBUG(D_NET, "peer %s was not configured\n",
1800                        libcfs_nid2str(prim_nid));
1801                 return -ENOENT;
1802         }
1803
1804         /* Primary NID must match */
1805         if (lp->lp_primary_nid != prim_nid) {
1806                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1807                        libcfs_nid2str(prim_nid),
1808                        libcfs_nid2str(lp->lp_primary_nid));
1809                 return -ENODEV;
1810         }
1811
1812         /* Multi-Rail flag must match. */
1813         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1814                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1815                        libcfs_nid2str(prim_nid));
1816                 return -EPERM;
1817         }
1818
1819         return lnet_peer_add_nid(lp, nid, flags);
1820 }
1821
1822 /*
1823  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1824  *
1825  * This API handles the following combinations:
1826  *   Delete a NI from a peer if both prim_nid and nid are provided.
1827  *   Delete a peer if only prim_nid is provided.
1828  *   Delete a peer if its primary nid is provided.
1829  *
1830  * The caller must hold ln_api_mutex. This prevents the peer from
1831  * being modified/deleted by a different thread.
1832  */
1833 int
1834 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1835 {
1836         struct lnet_peer *lp;
1837         struct lnet_peer_ni *lpni;
1838         unsigned flags;
1839
1840         if (prim_nid == LNET_NID_ANY)
1841                 return -EINVAL;
1842
1843         lpni = lnet_find_peer_ni_locked(prim_nid);
1844         if (!lpni)
1845                 return -ENOENT;
1846         lnet_peer_ni_decref_locked(lpni);
1847         lp = lpni->lpni_peer_net->lpn_peer;
1848
1849         if (prim_nid != lp->lp_primary_nid) {
1850                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1851                        libcfs_nid2str(prim_nid),
1852                        libcfs_nid2str(lp->lp_primary_nid));
1853                 return -ENODEV;
1854         }
1855
1856         lnet_net_lock(LNET_LOCK_EX);
1857         if (lp->lp_rtr_refcount > 0) {
1858                 lnet_net_unlock(LNET_LOCK_EX);
1859                 CERROR("%s is a router. Can not be deleted\n",
1860                        libcfs_nid2str(prim_nid));
1861                 return -EBUSY;
1862         }
1863         lnet_net_unlock(LNET_LOCK_EX);
1864
1865         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1866                 return lnet_peer_del(lp);
1867
1868         flags = LNET_PEER_CONFIGURED;
1869         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1870                 flags |= LNET_PEER_MULTI_RAIL;
1871
1872         return lnet_peer_del_nid(lp, nid, flags);
1873 }
1874
1875 void
1876 lnet_destroy_peer_ni_locked(struct kref *ref)
1877 {
1878         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1879                                                  lpni_kref);
1880         struct lnet_peer_table *ptable;
1881         struct lnet_peer_net *lpn;
1882
1883         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1884
1885         LASSERT(kref_read(&lpni->lpni_kref) == 0);
1886         LASSERT(list_empty(&lpni->lpni_txq));
1887         LASSERT(lpni->lpni_txqnob == 0);
1888         LASSERT(list_empty(&lpni->lpni_peer_nis));
1889         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1890
1891         lpn = lpni->lpni_peer_net;
1892         lpni->lpni_peer_net = NULL;
1893         lpni->lpni_net = NULL;
1894
1895         if (!list_empty(&lpni->lpni_hashlist)) {
1896                 /* remove the peer ni from the zombie list */
1897                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1898                 spin_lock(&ptable->pt_zombie_lock);
1899                 list_del_init(&lpni->lpni_hashlist);
1900                 ptable->pt_zombies--;
1901                 spin_unlock(&ptable->pt_zombie_lock);
1902         }
1903
1904         if (lpni->lpni_pref_nnids > 1) {
1905                 struct lnet_nid_list *ne, *tmp;
1906
1907                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1908                                          nl_list) {
1909                         list_del_init(&ne->nl_list);
1910                         LIBCFS_FREE(ne, sizeof(*ne));
1911                 }
1912         }
1913         LIBCFS_FREE(lpni, sizeof(*lpni));
1914
1915         if (lpn)
1916                 lnet_peer_net_decref_locked(lpn);
1917 }
1918
1919 struct lnet_peer_ni *
1920 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1921 {
1922         struct lnet_peer_ni *lpni = NULL;
1923         int rc;
1924
1925         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1926                 return ERR_PTR(-ESHUTDOWN);
1927
1928         /*
1929          * find if a peer_ni already exists.
1930          * If so then just return that.
1931          */
1932         lpni = lnet_find_peer_ni_locked(nid);
1933         if (lpni)
1934                 return lpni;
1935
1936         lnet_net_unlock(cpt);
1937
1938         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1939         if (rc) {
1940                 lpni = ERR_PTR(rc);
1941                 goto out_net_relock;
1942         }
1943
1944         lpni = lnet_find_peer_ni_locked(nid);
1945         LASSERT(lpni);
1946
1947 out_net_relock:
1948         lnet_net_lock(cpt);
1949
1950         return lpni;
1951 }
1952
1953 /*
1954  * Get a peer_ni for the given nid, create it if necessary. Takes a
1955  * hold on the peer_ni.
1956  */
1957 struct lnet_peer_ni *
1958 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1959 {
1960         struct lnet_peer_ni *lpni = NULL;
1961         int rc;
1962
1963         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1964                 return ERR_PTR(-ESHUTDOWN);
1965
1966         /*
1967          * find if a peer_ni already exists.
1968          * If so then just return that.
1969          */
1970         lpni = lnet_find_peer_ni_locked(nid);
1971         if (lpni)
1972                 return lpni;
1973
1974         /*
1975          * Slow path:
1976          * use the lnet_api_mutex to serialize the creation of the peer_ni
1977          * and the creation/deletion of the local ni/net. When a local ni is
1978          * created, if there exists a set of peer_nis on that network,
1979          * they need to be traversed and updated. When a local NI is
1980          * deleted, which could result in a network being deleted, then
1981          * all peer nis on that network need to be removed as well.
1982          *
1983          * Creation through traffic should also be serialized with
1984          * creation through DLC.
1985          */
1986         lnet_net_unlock(cpt);
1987         mutex_lock(&the_lnet.ln_api_mutex);
1988         /*
1989          * Shutdown is only set under the ln_api_lock, so a single
1990          * check here is sufficent.
1991          */
1992         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1993                 lpni = ERR_PTR(-ESHUTDOWN);
1994                 goto out_mutex_unlock;
1995         }
1996
1997         rc = lnet_peer_ni_traffic_add(nid, pref);
1998         if (rc) {
1999                 lpni = ERR_PTR(rc);
2000                 goto out_mutex_unlock;
2001         }
2002
2003         lpni = lnet_find_peer_ni_locked(nid);
2004         LASSERT(lpni);
2005
2006 out_mutex_unlock:
2007         mutex_unlock(&the_lnet.ln_api_mutex);
2008         lnet_net_lock(cpt);
2009
2010         /* Lock has been dropped, check again for shutdown. */
2011         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2012                 if (!IS_ERR(lpni))
2013                         lnet_peer_ni_decref_locked(lpni);
2014                 lpni = ERR_PTR(-ESHUTDOWN);
2015         }
2016
2017         return lpni;
2018 }
2019
2020 bool
2021 lnet_peer_gw_discovery(struct lnet_peer *lp)
2022 {
2023         bool rc = false;
2024
2025         spin_lock(&lp->lp_lock);
2026         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2027                 rc = true;
2028         spin_unlock(&lp->lp_lock);
2029
2030         return rc;
2031 }
2032
2033 bool
2034 lnet_peer_is_uptodate(struct lnet_peer *lp)
2035 {
2036         bool rc;
2037
2038         spin_lock(&lp->lp_lock);
2039         rc = lnet_peer_is_uptodate_locked(lp);
2040         spin_unlock(&lp->lp_lock);
2041         return rc;
2042 }
2043
2044 /*
2045  * Is a peer uptodate from the point of view of discovery?
2046  *
2047  * If it is currently being processed, obviously not.
2048  * A forced Ping or Push is also handled by the discovery thread.
2049  *
2050  * Otherwise look at whether the peer needs rediscovering.
2051  */
2052 bool
2053 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2054 __must_hold(&lp->lp_lock)
2055 {
2056         bool rc;
2057
2058         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2059                             LNET_PEER_FORCE_PING |
2060                             LNET_PEER_FORCE_PUSH)) {
2061                 rc = false;
2062         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2063                 rc = false;
2064         } else if (lnet_peer_needs_push(lp)) {
2065                 rc = false;
2066         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2067                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2068                         rc = true;
2069                 else
2070                         rc = false;
2071         } else {
2072                 rc = false;
2073         }
2074
2075         return rc;
2076 }
2077
2078 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2079 void
2080 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2081 {
2082         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2083          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2084          * when adding to the list and queuing the peer to ensure that we do not
2085          * strand any messages on the lp_dc_pendq. This scheme ensures the
2086          * message will be resent even if the peer is already being discovered.
2087          * Therefore we needn't check the return value of
2088          * lnet_peer_queue_for_discovery(lp).
2089          */
2090         lnet_net_lock(LNET_LOCK_EX);
2091         spin_lock(&lp->lp_lock);
2092         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2093         spin_unlock(&lp->lp_lock);
2094         lnet_peer_queue_for_discovery(lp);
2095         lnet_net_unlock(LNET_LOCK_EX);
2096 }
2097
2098 /*
2099  * Queue a peer for the attention of the discovery thread.  Call with
2100  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2101  * -EALREADY if the peer was already queued.
2102  */
2103 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2104 {
2105         int rc;
2106
2107         spin_lock(&lp->lp_lock);
2108         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2109                 lp->lp_state |= LNET_PEER_DISCOVERING;
2110         spin_unlock(&lp->lp_lock);
2111         if (list_empty(&lp->lp_dc_list)) {
2112                 lnet_peer_addref_locked(lp);
2113                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2114                 wake_up(&the_lnet.ln_dc_waitq);
2115                 rc = 0;
2116         } else {
2117                 rc = -EALREADY;
2118         }
2119
2120         CDEBUG(D_NET, "Queue peer %s: %d\n",
2121                libcfs_nid2str(lp->lp_primary_nid), rc);
2122
2123         return rc;
2124 }
2125
2126 /*
2127  * Discovery of a peer is complete. Wake all waiters on the peer.
2128  * Call with lnet_net_lock/EX held.
2129  */
2130 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2131 {
2132         struct lnet_msg *msg, *tmp;
2133         int rc = 0;
2134         LIST_HEAD(pending_msgs);
2135
2136         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2137                libcfs_nid2str(lp->lp_primary_nid));
2138
2139         list_del_init(&lp->lp_dc_list);
2140         spin_lock(&lp->lp_lock);
2141         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2142         spin_unlock(&lp->lp_lock);
2143         wake_up_all(&lp->lp_dc_waitq);
2144
2145         if (lp->lp_rtr_refcount > 0)
2146                 lnet_router_discovery_complete(lp);
2147
2148         lnet_net_unlock(LNET_LOCK_EX);
2149
2150         /* iterate through all pending messages and send them again */
2151         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2152                 list_del_init(&msg->msg_list);
2153                 if (lp->lp_dc_error) {
2154                         lnet_finalize(msg, lp->lp_dc_error);
2155                         continue;
2156                 }
2157
2158                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2159                        lnet_msgtyp2str(msg->msg_type),
2160                        libcfs_id2str(msg->msg_target));
2161                 rc = lnet_send(msg->msg_src_nid_param, msg,
2162                                msg->msg_rtr_nid_param);
2163                 if (rc < 0) {
2164                         CNETERR("Error sending %s to %s: %d\n",
2165                                lnet_msgtyp2str(msg->msg_type),
2166                                libcfs_id2str(msg->msg_target), rc);
2167                         lnet_finalize(msg, rc);
2168                 }
2169         }
2170         lnet_net_lock(LNET_LOCK_EX);
2171         lnet_peer_decref_locked(lp);
2172 }
2173
2174 /*
2175  * Handle inbound push.
2176  * Like any event handler, called with lnet_res_lock/CPT held.
2177  */
2178 void lnet_peer_push_event(struct lnet_event *ev)
2179 {
2180         struct lnet_ping_buffer *pbuf;
2181         struct lnet_peer *lp;
2182
2183         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2184
2185         /* lnet_find_peer() adds a refcount */
2186         lp = lnet_find_peer(ev->source.nid);
2187         if (!lp) {
2188                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2189                        libcfs_nid2str(ev->initiator.nid),
2190                        libcfs_nid2str(ev->source.nid));
2191                 pbuf->pb_needs_post = true;
2192                 return;
2193         }
2194
2195         /* Ensure peer state remains consistent while we modify it. */
2196         spin_lock(&lp->lp_lock);
2197
2198         /*
2199          * If some kind of error happened the contents of the message
2200          * cannot be used. Clear the NIDS_UPTODATE and set the
2201          * FORCE_PING flag to trigger a ping.
2202          */
2203         if (ev->status) {
2204                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2205                 lp->lp_state |= LNET_PEER_FORCE_PING;
2206                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2207                        ev->status,
2208                        libcfs_nid2str(lp->lp_primary_nid),
2209                        libcfs_nid2str(ev->source.nid));
2210                 goto out;
2211         }
2212
2213         /*
2214          * A push with invalid or corrupted info. Clear the UPTODATE
2215          * flag to trigger a ping.
2216          */
2217         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2218                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2219                 lp->lp_state |= LNET_PEER_FORCE_PING;
2220                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2221                        libcfs_nid2str(lp->lp_primary_nid));
2222                 goto out;
2223         }
2224
2225         /*
2226          * Make sure we'll allocate the correct size ping buffer when
2227          * pinging the peer.
2228          */
2229         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2230                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2231
2232         /*
2233          * A non-Multi-Rail peer is not supposed to be capable of
2234          * sending a push.
2235          */
2236         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2237                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2238                        libcfs_nid2str(lp->lp_primary_nid));
2239                 goto out;
2240         }
2241
2242         /*
2243          * The peer may have discovery disabled at its end. Set
2244          * NO_DISCOVERY as appropriate.
2245          */
2246         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2247                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2248                        libcfs_nid2str(lp->lp_primary_nid));
2249                 /*
2250                  * Mark the peer for deletion if we already know about it
2251                  * and it's going from discovery set to no discovery set
2252                  */
2253                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2254                                       LNET_PEER_DISCOVERING)) &&
2255                      lp->lp_state & LNET_PEER_DISCOVERED) {
2256                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2257                                libcfs_nid2str(lp->lp_primary_nid),
2258                                lp->lp_state);
2259                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2260                 }
2261                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2262         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2263                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2264                        libcfs_nid2str(lp->lp_primary_nid));
2265                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2266         }
2267
2268         /*
2269          * Update the MULTI_RAIL flag based on the push. If the peer
2270          * was configured with DLC then the setting should match what
2271          * DLC put in.
2272          * NB: We verified above that the MR feature bit is set in pi_features
2273          */
2274         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2275                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2276                        libcfs_nid2str(lp->lp_primary_nid), lp);
2277         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2278                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2279                       libcfs_nid2str(lp->lp_primary_nid));
2280         } else if (lnet_peer_discovery_disabled) {
2281                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2282                        libcfs_nid2str(lp->lp_primary_nid), lp);
2283         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2284                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2285                        libcfs_nid2str(lp->lp_primary_nid), lp);
2286         } else {
2287                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2288                        libcfs_nid2str(lp->lp_primary_nid), lp);
2289                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2290                 lnet_peer_clr_non_mr_pref_nids(lp);
2291         }
2292
2293         /*
2294          * Check for truncation of the Put message. Clear the
2295          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2296          * and tell discovery to allocate a bigger buffer.
2297          */
2298         if (ev->mlength < ev->rlength) {
2299                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2300                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2301                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2302                 lp->lp_state |= LNET_PEER_FORCE_PING;
2303                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2304                        libcfs_nid2str(lp->lp_primary_nid),
2305                        pbuf->pb_info.pi_nnis);
2306                 goto out;
2307         }
2308
2309         /* always assume new data */
2310         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2311         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2312
2313         /*
2314          * If there is data present that hasn't been processed yet,
2315          * we'll replace it if the Put contained newer data and it
2316          * fits. We're racing with a Ping or earlier Push in this
2317          * case.
2318          */
2319         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2320                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2321                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2322                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2323                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2324                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2325                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2326                               libcfs_nid2str(lp->lp_primary_nid),
2327                               LNET_PING_BUFFER_SEQNO(pbuf),
2328                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2329                 }
2330                 goto out;
2331         }
2332
2333         /*
2334          * Allocate a buffer to copy the data. On a failure we drop
2335          * the Push and set FORCE_PING to force the discovery
2336          * thread to fix the problem by pinging the peer.
2337          */
2338         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2339         if (!lp->lp_data) {
2340                 lp->lp_state |= LNET_PEER_FORCE_PING;
2341                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2342                        libcfs_nid2str(lp->lp_primary_nid),
2343                        LNET_PING_BUFFER_SEQNO(pbuf));
2344                 goto out;
2345         }
2346
2347         /* Success */
2348         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2349                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2350         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2351         CDEBUG(D_NET, "Received Push %s %u\n",
2352                libcfs_nid2str(lp->lp_primary_nid),
2353                LNET_PING_BUFFER_SEQNO(pbuf));
2354
2355 out:
2356         /* We've processed this buffer. It can be reposted */
2357         pbuf->pb_needs_post = true;
2358
2359         /*
2360          * Queue the peer for discovery if not done, force it on the request
2361          * queue and wake the discovery thread if the peer was already queued,
2362          * because its status changed.
2363          */
2364         spin_unlock(&lp->lp_lock);
2365         lnet_net_lock(LNET_LOCK_EX);
2366         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2367                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2368                 wake_up(&the_lnet.ln_dc_waitq);
2369         }
2370         /* Drop refcount from lookup */
2371         lnet_peer_decref_locked(lp);
2372         lnet_net_unlock(LNET_LOCK_EX);
2373 }
2374
2375 /*
2376  * Clear the discovery error state, unless we're already discovering
2377  * this peer, in which case the error is current.
2378  */
2379 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2380 {
2381         spin_lock(&lp->lp_lock);
2382         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2383                 lp->lp_dc_error = 0;
2384         spin_unlock(&lp->lp_lock);
2385 }
2386
2387 /*
2388  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2389  * dropped/retaken within this function. An lnet_peer_ni is passed in
2390  * because discovery could tear down an lnet_peer.
2391  */
2392 int
2393 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2394 {
2395         DEFINE_WAIT(wait);
2396         struct lnet_peer *lp;
2397         int rc = 0;
2398         int count = 0;
2399
2400 again:
2401         lnet_net_unlock(cpt);
2402         lnet_net_lock(LNET_LOCK_EX);
2403         lp = lpni->lpni_peer_net->lpn_peer;
2404         lnet_peer_clear_discovery_error(lp);
2405
2406         /*
2407          * We're willing to be interrupted. The lpni can become a
2408          * zombie if we race with DLC, so we must check for that.
2409          */
2410         for (;;) {
2411                 /* Keep lp alive when the lnet_net_lock is unlocked */
2412                 lnet_peer_addref_locked(lp);
2413                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2414                 if (signal_pending(current))
2415                         break;
2416                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2417                         break;
2418                 /*
2419                  * Don't repeat discovery if discovery is disabled. This is
2420                  * done to ensure we can use discovery as a standard ping as
2421                  * well for backwards compatibility with routers which do not
2422                  * have discovery or have discovery disabled
2423                  */
2424                 if (lnet_is_discovery_disabled(lp) && count > 0)
2425                         break;
2426                 if (lp->lp_dc_error)
2427                         break;
2428                 if (lnet_peer_is_uptodate(lp))
2429                         break;
2430                 lnet_peer_queue_for_discovery(lp);
2431                 count++;
2432                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2433
2434                 /*
2435                  * If caller requested a non-blocking operation then
2436                  * return immediately. Once discovery is complete any
2437                  * pending messages that were stopped due to discovery
2438                  * will be transmitted.
2439                  */
2440                 if (!block)
2441                         break;
2442
2443                 lnet_net_unlock(LNET_LOCK_EX);
2444                 schedule();
2445                 finish_wait(&lp->lp_dc_waitq, &wait);
2446                 lnet_net_lock(LNET_LOCK_EX);
2447                 lnet_peer_decref_locked(lp);
2448                 /* Peer may have changed */
2449                 lp = lpni->lpni_peer_net->lpn_peer;
2450         }
2451         finish_wait(&lp->lp_dc_waitq, &wait);
2452
2453         lnet_net_unlock(LNET_LOCK_EX);
2454         lnet_net_lock(cpt);
2455         lnet_peer_decref_locked(lp);
2456         /*
2457          * The peer may have changed, so re-check and rediscover if that turns
2458          * out to have been the case. The reference count on lp ensured that
2459          * even if it was unlinked from lpni the memory could not be recycled.
2460          * Thus the check below is sufficient to determine whether the peer
2461          * changed. If the peer changed, then lp must not be dereferenced.
2462          */
2463         if (lp != lpni->lpni_peer_net->lpn_peer)
2464                 goto again;
2465
2466         if (signal_pending(current))
2467                 rc = -EINTR;
2468         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2469                 rc = -ESHUTDOWN;
2470         else if (lp->lp_dc_error)
2471                 rc = lp->lp_dc_error;
2472         else if (!block)
2473                 CDEBUG(D_NET, "non-blocking discovery\n");
2474         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2475                 goto again;
2476
2477         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2478                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2479                libcfs_nid2str(lpni->lpni_nid), rc,
2480                (!block) ? "pending discovery" : "discovery complete");
2481
2482         return rc;
2483 }
2484
2485 /* Handle an incoming ack for a push. */
2486 static void
2487 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2488 {
2489         struct lnet_ping_buffer *pbuf;
2490
2491         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2492         spin_lock(&lp->lp_lock);
2493         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2494         lp->lp_push_error = ev->status;
2495         if (ev->status)
2496                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2497         else
2498                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2499         spin_unlock(&lp->lp_lock);
2500
2501         CDEBUG(D_NET, "peer %s ev->status %d\n",
2502                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2503 }
2504
2505 /* Handle a Reply message. This is the reply to a Ping message. */
2506 static void
2507 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2508 {
2509         struct lnet_ping_buffer *pbuf;
2510         int rc;
2511
2512         spin_lock(&lp->lp_lock);
2513
2514         lp->lp_disc_src_nid = ev->target.nid;
2515
2516         /*
2517          * If some kind of error happened the contents of message
2518          * cannot be used. Set PING_FAILED to trigger a retry.
2519          */
2520         if (ev->status) {
2521                 lp->lp_state |= LNET_PEER_PING_FAILED;
2522                 lp->lp_ping_error = ev->status;
2523                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2524                        ev->status,
2525                        libcfs_nid2str(lp->lp_primary_nid),
2526                        libcfs_nid2str(ev->source.nid));
2527                 goto out;
2528         }
2529
2530         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2531         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2532                 lnet_swap_pinginfo(pbuf);
2533
2534         /*
2535          * A reply with invalid or corrupted info. Set PING_FAILED to
2536          * trigger a retry.
2537          */
2538         rc = lnet_ping_info_validate(&pbuf->pb_info);
2539         if (rc) {
2540                 lp->lp_state |= LNET_PEER_PING_FAILED;
2541                 lp->lp_ping_error = 0;
2542                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2543                        libcfs_nid2str(lp->lp_primary_nid), rc);
2544                 goto out;
2545         }
2546
2547
2548         /*
2549          * The peer may have discovery disabled at its end. Set
2550          * NO_DISCOVERY as appropriate.
2551          */
2552         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2553             !lnet_peer_discovery_disabled) {
2554                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2555                        libcfs_nid2str(lp->lp_primary_nid));
2556                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2557         } else {
2558                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2559                        libcfs_nid2str(lp->lp_primary_nid));
2560                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2561         }
2562
2563         /*
2564          * Update the MULTI_RAIL flag based on the reply. If the peer
2565          * was configured with DLC then the setting should match what
2566          * DLC put in.
2567          */
2568         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2569                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2570                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2571                                libcfs_nid2str(lp->lp_primary_nid), lp);
2572                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2573                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2574                               libcfs_nid2str(lp->lp_primary_nid));
2575                 } else if (lnet_peer_discovery_disabled) {
2576                         CDEBUG(D_NET,
2577                                "peer %s(%p) not MR: DD disabled locally\n",
2578                                libcfs_nid2str(lp->lp_primary_nid), lp);
2579                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2580                         CDEBUG(D_NET,
2581                                "peer %s(%p) not MR: DD disabled remotely\n",
2582                                libcfs_nid2str(lp->lp_primary_nid), lp);
2583                 } else {
2584                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2585                                libcfs_nid2str(lp->lp_primary_nid), lp);
2586                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2587                         lnet_peer_clr_non_mr_pref_nids(lp);
2588                 }
2589         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2590                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2591                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2592                               libcfs_nid2str(lp->lp_primary_nid));
2593                 } else {
2594                         CERROR("Multi-Rail state vanished from %s\n",
2595                                libcfs_nid2str(lp->lp_primary_nid));
2596                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2597                 }
2598         }
2599
2600         /*
2601          * Make sure we'll allocate the correct size ping buffer when
2602          * pinging the peer.
2603          */
2604         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2605                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2606
2607         /*
2608          * Check for truncation of the Reply. Clear PING_SENT and set
2609          * PING_FAILED to trigger a retry.
2610          */
2611         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2612                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2613                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2614                 lp->lp_state |= LNET_PEER_PING_FAILED;
2615                 lp->lp_ping_error = 0;
2616                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2617                        libcfs_nid2str(lp->lp_primary_nid),
2618                        pbuf->pb_info.pi_nnis);
2619                 goto out;
2620         }
2621
2622         /*
2623          * Check the sequence numbers in the reply. These are only
2624          * available if the reply came from a Multi-Rail peer.
2625          */
2626         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2627             pbuf->pb_info.pi_nnis > 1 &&
2628             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2629                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2630                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2631                                 libcfs_nid2str(lp->lp_primary_nid),
2632                                 LNET_PING_BUFFER_SEQNO(pbuf),
2633                                 lp->lp_peer_seqno);
2634
2635                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2636         }
2637
2638         /* We're happy with the state of the data in the buffer. */
2639         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2640                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2641         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2642                 lnet_ping_buffer_decref(lp->lp_data);
2643         else
2644                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2645         lnet_ping_buffer_addref(pbuf);
2646         lp->lp_data = pbuf;
2647 out:
2648         lp->lp_state &= ~LNET_PEER_PING_SENT;
2649         spin_unlock(&lp->lp_lock);
2650
2651         lnet_net_lock(LNET_LOCK_EX);
2652         /*
2653          * If this peer is a gateway, call the routing callback to
2654          * handle the ping reply
2655          */
2656         if (lp->lp_rtr_refcount > 0)
2657                 lnet_router_discovery_ping_reply(lp);
2658         lnet_net_unlock(LNET_LOCK_EX);
2659 }
2660
2661 /*
2662  * Send event handling. Only matters for error cases, where we clean
2663  * up state on the peer and peer_ni that would otherwise be updated in
2664  * the REPLY event handler for a successful Ping, and the ACK event
2665  * handler for a successful Push.
2666  */
2667 static int
2668 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2669 {
2670         int rc = 0;
2671
2672         if (!ev->status)
2673                 goto out;
2674
2675         spin_lock(&lp->lp_lock);
2676         if (ev->msg_type == LNET_MSG_GET) {
2677                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2678                 lp->lp_state |= LNET_PEER_PING_FAILED;
2679                 lp->lp_ping_error = ev->status;
2680         } else { /* ev->msg_type == LNET_MSG_PUT */
2681                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2682                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2683                 lp->lp_push_error = ev->status;
2684         }
2685         spin_unlock(&lp->lp_lock);
2686         rc = LNET_REDISCOVER_PEER;
2687 out:
2688         CDEBUG(D_NET, "%s Send to %s: %d\n",
2689                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2690                 libcfs_nid2str(ev->target.nid), rc);
2691         return rc;
2692 }
2693
2694 /*
2695  * Unlink event handling. This event is only seen if a call to
2696  * LNetMDUnlink() caused the event to be unlinked. If this call was
2697  * made after the event was set up in LNetGet() or LNetPut() then we
2698  * assume the Ping or Push timed out.
2699  */
2700 static void
2701 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2702 {
2703         spin_lock(&lp->lp_lock);
2704         /* We've passed through LNetGet() */
2705         if (lp->lp_state & LNET_PEER_PING_SENT) {
2706                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2707                 lp->lp_state |= LNET_PEER_PING_FAILED;
2708                 lp->lp_ping_error = -ETIMEDOUT;
2709                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2710                         libcfs_nid2str(lp->lp_primary_nid));
2711         }
2712         /* We've passed through LNetPut() */
2713         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2714                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2715                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2716                 lp->lp_push_error = -ETIMEDOUT;
2717                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2718                         libcfs_nid2str(lp->lp_primary_nid));
2719         }
2720         spin_unlock(&lp->lp_lock);
2721 }
2722
2723 /*
2724  * Event handler for the discovery EQ.
2725  *
2726  * Called with lnet_res_lock(cpt) held. The cpt is the
2727  * lnet_cpt_of_cookie() of the md handle cookie.
2728  */
2729 static void lnet_discovery_event_handler(struct lnet_event *event)
2730 {
2731         struct lnet_peer *lp = event->md_user_ptr;
2732         struct lnet_ping_buffer *pbuf;
2733         int rc;
2734
2735         /* discovery needs to take another look */
2736         rc = LNET_REDISCOVER_PEER;
2737
2738         CDEBUG(D_NET, "Received event: %d\n", event->type);
2739
2740         switch (event->type) {
2741         case LNET_EVENT_ACK:
2742                 lnet_discovery_event_ack(lp, event);
2743                 break;
2744         case LNET_EVENT_REPLY:
2745                 lnet_discovery_event_reply(lp, event);
2746                 break;
2747         case LNET_EVENT_SEND:
2748                 /* Only send failure triggers a retry. */
2749                 rc = lnet_discovery_event_send(lp, event);
2750                 break;
2751         case LNET_EVENT_UNLINK:
2752                 /* LNetMDUnlink() was called */
2753                 lnet_discovery_event_unlink(lp, event);
2754                 break;
2755         default:
2756                 /* Invalid events. */
2757                 LBUG();
2758         }
2759         lnet_net_lock(LNET_LOCK_EX);
2760         if (event->unlinked) {
2761                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2762                 lnet_ping_buffer_decref(pbuf);
2763                 lnet_peer_decref_locked(lp);
2764         }
2765
2766         /* put peer back at end of request queue, if discovery not already
2767          * done */
2768         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2769                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2770                 wake_up(&the_lnet.ln_dc_waitq);
2771         }
2772         lnet_net_unlock(LNET_LOCK_EX);
2773 }
2774
2775 /*
2776  * Build a peer from incoming data.
2777  *
2778  * The NIDs in the incoming data are supposed to be structured as follows:
2779  *  - loopback
2780  *  - primary NID
2781  *  - other NIDs in same net
2782  *  - NIDs in second net
2783  *  - NIDs in third net
2784  *  - ...
2785  * This due to the way the list of NIDs in the data is created.
2786  *
2787  * Note that this function will mark the peer uptodate unless an
2788  * ENOMEM is encontered. All other errors are due to a conflict
2789  * between the DLC configuration and what discovery sees. We treat DLC
2790  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2791  * peer from becoming stuck in discovery.
2792  */
2793 static int lnet_peer_merge_data(struct lnet_peer *lp,
2794                                 struct lnet_ping_buffer *pbuf)
2795 {
2796         struct lnet_peer_ni *lpni;
2797         lnet_nid_t *curnis = NULL;
2798         struct lnet_ni_status *addnis = NULL;
2799         lnet_nid_t *delnis = NULL;
2800         unsigned flags;
2801         int ncurnis;
2802         int naddnis;
2803         int ndelnis;
2804         int nnis = 0;
2805         int i;
2806         int j;
2807         int rc;
2808
2809         flags = LNET_PEER_DISCOVERED;
2810         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2811                 flags |= LNET_PEER_MULTI_RAIL;
2812
2813         /*
2814          * Cache the routing feature for the peer; whether it is enabled
2815          * for disabled as reported by the remote peer.
2816          */
2817         spin_lock(&lp->lp_lock);
2818         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2819                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2820         else
2821                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2822         spin_unlock(&lp->lp_lock);
2823
2824         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2825         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2826         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2827         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2828         if (!curnis || !addnis || !delnis) {
2829                 rc = -ENOMEM;
2830                 goto out;
2831         }
2832         ncurnis = 0;
2833         naddnis = 0;
2834         ndelnis = 0;
2835
2836         /* Construct the list of NIDs present in peer. */
2837         lpni = NULL;
2838         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2839                 curnis[ncurnis++] = lpni->lpni_nid;
2840
2841         /*
2842          * Check for NIDs in pbuf not present in curnis[].
2843          * The loop starts at 1 to skip the loopback NID.
2844          */
2845         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2846                 for (j = 0; j < ncurnis; j++)
2847                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2848                                 break;
2849                 if (j == ncurnis)
2850                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2851         }
2852         /*
2853          * Check for NIDs in curnis[] not present in pbuf.
2854          * The nested loop starts at 1 to skip the loopback NID.
2855          *
2856          * But never add the loopback NID to delnis[]: if it is
2857          * present in curnis[] then this peer is for this node.
2858          */
2859         for (i = 0; i < ncurnis; i++) {
2860                 if (curnis[i] == LNET_NID_LO_0)
2861                         continue;
2862                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2863                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2864                                 /*
2865                                  * update the information we cache for the
2866                                  * peer with the latest information we
2867                                  * received
2868                                  */
2869                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
2870                                 if (lpni) {
2871                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2872                                         lnet_peer_ni_decref_locked(lpni);
2873                                 }
2874                                 break;
2875                         }
2876                 }
2877                 if (j == pbuf->pb_info.pi_nnis)
2878                         delnis[ndelnis++] = curnis[i];
2879         }
2880
2881         /*
2882          * If we get here and the discovery is disabled then we don't want
2883          * to add or delete any NIs. We just updated the ones we have some
2884          * information on, and call it a day
2885          */
2886         rc = 0;
2887         if (lnet_is_discovery_disabled(lp))
2888                 goto out;
2889
2890         for (i = 0; i < naddnis; i++) {
2891                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2892                 if (rc) {
2893                         CERROR("Error adding NID %s to peer %s: %d\n",
2894                                libcfs_nid2str(addnis[i].ns_nid),
2895                                libcfs_nid2str(lp->lp_primary_nid), rc);
2896                         if (rc == -ENOMEM)
2897                                 goto out;
2898                 }
2899                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2900                 if (lpni) {
2901                         lpni->lpni_ns_status = addnis[i].ns_status;
2902                         lnet_peer_ni_decref_locked(lpni);
2903                 }
2904         }
2905
2906         for (i = 0; i < ndelnis; i++) {
2907                 /*
2908                  * for routers it's okay to delete the primary_nid because
2909                  * the upper layers don't really rely on it. So if we're
2910                  * being told that the router changed its primary_nid
2911                  * then it's okay to delete it.
2912                  */
2913                 if (lp->lp_rtr_refcount > 0)
2914                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2915                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2916                 if (rc) {
2917                         CERROR("Error deleting NID %s from peer %s: %d\n",
2918                                libcfs_nid2str(delnis[i]),
2919                                libcfs_nid2str(lp->lp_primary_nid), rc);
2920                         if (rc == -ENOMEM)
2921                                 goto out;
2922                 }
2923         }
2924         /*
2925          * Errors other than -ENOMEM are due to peers having been
2926          * configured with DLC. Ignore these because DLC overrides
2927          * Discovery.
2928          */
2929         rc = 0;
2930 out:
2931         CFS_FREE_PTR_ARRAY(curnis, nnis);
2932         CFS_FREE_PTR_ARRAY(addnis, nnis);
2933         CFS_FREE_PTR_ARRAY(delnis, nnis);
2934         lnet_ping_buffer_decref(pbuf);
2935         CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2936
2937         if (rc) {
2938                 spin_lock(&lp->lp_lock);
2939                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2940                 lp->lp_state |= LNET_PEER_FORCE_PING;
2941                 spin_unlock(&lp->lp_lock);
2942         }
2943         return rc;
2944 }
2945
2946 /*
2947  * The data in pbuf says lp is its primary peer, but the data was
2948  * received by a different peer. Try to update lp with the data.
2949  */
2950 static int
2951 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2952 {
2953         struct lnet_handle_md mdh;
2954
2955         /* Queue lp for discovery, and force it on the request queue. */
2956         lnet_net_lock(LNET_LOCK_EX);
2957         if (lnet_peer_queue_for_discovery(lp))
2958                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2959         lnet_net_unlock(LNET_LOCK_EX);
2960
2961         LNetInvalidateMDHandle(&mdh);
2962
2963         /*
2964          * Decide whether we can move the peer to the DATA_PRESENT state.
2965          *
2966          * We replace stale data for a multi-rail peer, repair PING_FAILED
2967          * status, and preempt FORCE_PING.
2968          *
2969          * If after that we have DATA_PRESENT, we merge it into this peer.
2970          */
2971         spin_lock(&lp->lp_lock);
2972         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2973                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2974                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2975                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2976                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2977                         lnet_ping_buffer_decref(pbuf);
2978                         pbuf = lp->lp_data;
2979                         lp->lp_data = NULL;
2980                 }
2981         }
2982         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2983                 lnet_ping_buffer_decref(lp->lp_data);
2984                 lp->lp_data = NULL;
2985                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2986         }
2987         if (lp->lp_state & LNET_PEER_PING_FAILED) {
2988                 mdh = lp->lp_ping_mdh;
2989                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2990                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2991                 lp->lp_ping_error = 0;
2992         }
2993         if (lp->lp_state & LNET_PEER_FORCE_PING)
2994                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2995         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2996         spin_unlock(&lp->lp_lock);
2997
2998         if (!LNetMDHandleIsInvalid(mdh))
2999                 LNetMDUnlink(mdh);
3000
3001         if (pbuf)
3002                 return lnet_peer_merge_data(lp, pbuf);
3003
3004         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3005         return 0;
3006 }
3007
3008 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3009 {
3010         int i;
3011
3012         for (i = 0; i < pinfo->pi_nnis; i++) {
3013                 if (pinfo->pi_ni[i].ns_nid == nid)
3014                         return true;
3015         }
3016
3017         return false;
3018 }
3019
3020 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3021  * to the discovery queue a reference was taken that will prevent the peer from
3022  * actually being freed by this function. After this function exits the
3023  * discovery thread should call lnet_peer_discovery_complete() which will
3024  * drop that reference as well as wake any waiters that may also be holding a
3025  * ref on the peer
3026  */
3027 static int lnet_peer_deletion(struct lnet_peer *lp)
3028 __must_hold(&lp->lp_lock)
3029 {
3030         struct list_head rlist;
3031         struct lnet_route *route, *tmp;
3032         int sensitivity = lp->lp_health_sensitivity;
3033
3034         INIT_LIST_HEAD(&rlist);
3035
3036         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3037                           LNET_PEER_FORCE_PUSH);
3038         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3039                libcfs_nid2str(lp->lp_primary_nid), lp, lp->lp_state);
3040
3041         /* no-op if lnet_peer_del() has already been called on this peer */
3042         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3043                 return 0;
3044
3045         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3046                 return -ESHUTDOWN;
3047
3048         spin_unlock(&lp->lp_lock);
3049
3050         mutex_lock(&the_lnet.ln_api_mutex);
3051
3052         lnet_net_lock(LNET_LOCK_EX);
3053         /* remove the peer from the discovery work
3054          * queue if it's on there in preparation
3055          * of deleting it.
3056          */
3057         if (!list_empty(&lp->lp_dc_list))
3058                 list_del(&lp->lp_dc_list);
3059         list_for_each_entry_safe(route, tmp,
3060                                  &lp->lp_routes,
3061                                  lr_gwlist)
3062                 lnet_move_route(route, NULL, &rlist);
3063         lnet_net_unlock(LNET_LOCK_EX);
3064
3065         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3066         lnet_peer_del(lp);
3067
3068         list_for_each_entry_safe(route, tmp,
3069                                  &rlist, lr_list) {
3070                 /* re-add these routes */
3071                 lnet_add_route(route->lr_net,
3072                                route->lr_hops,
3073                                route->lr_nid,
3074                                route->lr_priority,
3075                                sensitivity);
3076                 LIBCFS_FREE(route, sizeof(*route));
3077         }
3078
3079         mutex_unlock(&the_lnet.ln_api_mutex);
3080
3081         spin_lock(&lp->lp_lock);
3082
3083         return 0;
3084 }
3085
3086 /*
3087  * Update a peer using the data received.
3088  */
3089 static int lnet_peer_data_present(struct lnet_peer *lp)
3090 __must_hold(&lp->lp_lock)
3091 {
3092         struct lnet_ping_buffer *pbuf;
3093         struct lnet_peer_ni *lpni;
3094         lnet_nid_t nid = LNET_NID_ANY;
3095         unsigned flags;
3096         int rc = 0;
3097
3098         pbuf = lp->lp_data;
3099         lp->lp_data = NULL;
3100         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3101         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3102         spin_unlock(&lp->lp_lock);
3103
3104         /*
3105          * Modifications of peer structures are done while holding the
3106          * ln_api_mutex. A global lock is required because we may be
3107          * modifying multiple peer structures, and a mutex greatly
3108          * simplifies memory management.
3109          *
3110          * The actual changes to the data structures must also protect
3111          * against concurrent lookups, for which the lnet_net_lock in
3112          * LNET_LOCK_EX mode is used.
3113          */
3114         mutex_lock(&the_lnet.ln_api_mutex);
3115         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3116                 rc = -ESHUTDOWN;
3117                 goto out;
3118         }
3119
3120         /*
3121          * If this peer is not on the peer list then it is being torn
3122          * down, and our reference count may be all that is keeping it
3123          * alive. Don't do any work on it.
3124          */
3125         if (list_empty(&lp->lp_peer_list))
3126                 goto out;
3127
3128         flags = LNET_PEER_DISCOVERED;
3129         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3130                 flags |= LNET_PEER_MULTI_RAIL;
3131
3132         /*
3133          * Check whether the primary NID in the message matches the
3134          * primary NID of the peer. If it does, update the peer, if
3135          * it it does not, check whether there is already a peer with
3136          * that primary NID. If no such peer exists, try to update
3137          * the primary NID of the current peer (allowed if it was
3138          * created due to message traffic) and complete the update.
3139          * If the peer did exist, hand off the data to it.
3140          *
3141          * The peer for the loopback interface is a special case: this
3142          * is the peer for the local node, and we want to set its
3143          * primary NID to the correct value here. Moreover, this peer
3144          * can show up with only the loopback NID in the ping buffer.
3145          */
3146         if (pbuf->pb_info.pi_nnis <= 1)
3147                 goto out;
3148         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3149         if (lp->lp_primary_nid == LNET_NID_LO_0) {
3150                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3151                 if (!rc)
3152                         rc = lnet_peer_merge_data(lp, pbuf);
3153         /*
3154          * if the primary nid of the peer is present in the ping info returned
3155          * from the peer, but it's not the local primary peer we have
3156          * cached and discovery is disabled, then we don't want to update
3157          * our local peer info, by adding or removing NIDs, we just want
3158          * to update the status of the nids that we currently have
3159          * recorded in that peer.
3160          */
3161         } else if (lp->lp_primary_nid == nid ||
3162                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3163                     lnet_is_discovery_disabled(lp))) {
3164                 rc = lnet_peer_merge_data(lp, pbuf);
3165         } else {
3166                 lpni = lnet_find_peer_ni_locked(nid);
3167                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3168                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3169                         if (rc) {
3170                                 CERROR("Primary NID error %s versus %s: %d\n",
3171                                        libcfs_nid2str(lp->lp_primary_nid),
3172                                        libcfs_nid2str(nid), rc);
3173                         } else {
3174                                 rc = lnet_peer_merge_data(lp, pbuf);
3175                         }
3176                         if (lpni)
3177                                 lnet_peer_ni_decref_locked(lpni);
3178                 } else {
3179                         struct lnet_peer *new_lp;
3180                         new_lp = lpni->lpni_peer_net->lpn_peer;
3181                         /*
3182                          * if lp has discovery/MR enabled that means new_lp
3183                          * should have discovery/MR enabled as well, since
3184                          * it's the same peer, which we're about to merge
3185                          */
3186                         spin_lock(&lp->lp_lock);
3187                         spin_lock(&new_lp->lp_lock);
3188                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3189                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3190                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3191                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3192                         /* If we're processing a ping reply then we may be
3193                          * about to send a push to the peer that we ping'd.
3194                          * Since the ping reply that we're processing was
3195                          * received by lp, we need to set the discovery source
3196                          * NID for new_lp to the NID stored in lp.
3197                          */
3198                         if (lp->lp_disc_src_nid != LNET_NID_ANY)
3199                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3200                         spin_unlock(&new_lp->lp_lock);
3201                         spin_unlock(&lp->lp_lock);
3202
3203                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3204                         lnet_consolidate_routes_locked(lp, new_lp);
3205                         lnet_peer_ni_decref_locked(lpni);
3206                 }
3207         }
3208 out:
3209         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3210                lp->lp_state);
3211         mutex_unlock(&the_lnet.ln_api_mutex);
3212
3213         spin_lock(&lp->lp_lock);
3214         /* Tell discovery to re-check the peer immediately. */
3215         if (!rc)
3216                 rc = LNET_REDISCOVER_PEER;
3217         return rc;
3218 }
3219
3220 /*
3221  * A ping failed. Clear the PING_FAILED state and set the
3222  * FORCE_PING state, to ensure a retry even if discovery is
3223  * disabled. This avoids being left with incorrect state.
3224  */
3225 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3226 __must_hold(&lp->lp_lock)
3227 {
3228         struct lnet_handle_md mdh;
3229         int rc;
3230
3231         mdh = lp->lp_ping_mdh;
3232         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3233         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3234         lp->lp_state |= LNET_PEER_FORCE_PING;
3235         rc = lp->lp_ping_error;
3236         lp->lp_ping_error = 0;
3237         spin_unlock(&lp->lp_lock);
3238
3239         if (!LNetMDHandleIsInvalid(mdh))
3240                 LNetMDUnlink(mdh);
3241
3242         CDEBUG(D_NET, "peer %s:%d\n",
3243                libcfs_nid2str(lp->lp_primary_nid), rc);
3244
3245         spin_lock(&lp->lp_lock);
3246         return rc ? rc : LNET_REDISCOVER_PEER;
3247 }
3248
3249 /*
3250  * Select NID to send a Ping or Push to.
3251  */
3252 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
3253 {
3254         struct lnet_peer_ni *lpni;
3255
3256         /* Look for a direct-connected NID for this peer. */
3257         lpni = NULL;
3258         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3259                 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
3260                         continue;
3261                 break;
3262         }
3263         if (lpni)
3264                 return lpni->lpni_nid;
3265
3266         /* Look for a routed-connected NID for this peer. */
3267         lpni = NULL;
3268         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3269                 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
3270                         continue;
3271                 break;
3272         }
3273         if (lpni)
3274                 return lpni->lpni_nid;
3275
3276         return LNET_NID_ANY;
3277 }
3278
3279 /* Active side of ping. */
3280 static int lnet_peer_send_ping(struct lnet_peer *lp)
3281 __must_hold(&lp->lp_lock)
3282 {
3283         lnet_nid_t pnid;
3284         int nnis;
3285         int rc;
3286         int cpt;
3287
3288         lp->lp_state |= LNET_PEER_PING_SENT;
3289         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3290         spin_unlock(&lp->lp_lock);
3291
3292         cpt = lnet_net_lock_current();
3293         /* Refcount for MD. */
3294         lnet_peer_addref_locked(lp);
3295         pnid = lnet_peer_select_nid(lp);
3296         lnet_net_unlock(cpt);
3297
3298         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3299
3300         rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
3301                             the_lnet.ln_dc_handler, false);
3302
3303         /*
3304          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3305          * refcount on the peer, otherwise LNetMDUnlink will be called
3306          * which will eventually do that.
3307          */
3308         if (rc > 0) {
3309                 lnet_net_lock(cpt);
3310                 lnet_peer_decref_locked(lp);
3311                 lnet_net_unlock(cpt);
3312                 rc = -rc; /* change the rc to negative value */
3313                 goto fail_error;
3314         } else if (rc < 0) {
3315                 goto fail_error;
3316         }
3317
3318         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3319
3320         spin_lock(&lp->lp_lock);
3321         return 0;
3322
3323 fail_error:
3324         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3325         /*
3326          * The errors that get us here are considered hard errors and
3327          * cause Discovery to terminate. So we clear PING_SENT, but do
3328          * not set either PING_FAILED or FORCE_PING. In fact we need
3329          * to clear PING_FAILED, because the unlink event handler will
3330          * have set it if we called LNetMDUnlink() above.
3331          */
3332         spin_lock(&lp->lp_lock);
3333         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3334         return rc;
3335 }
3336
3337 /*
3338  * This function exists because you cannot call LNetMDUnlink() from an
3339  * event handler.
3340  */
3341 static int lnet_peer_push_failed(struct lnet_peer *lp)
3342 __must_hold(&lp->lp_lock)
3343 {
3344         struct lnet_handle_md mdh;
3345         int rc;
3346
3347         mdh = lp->lp_push_mdh;
3348         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3349         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3350         rc = lp->lp_push_error;
3351         lp->lp_push_error = 0;
3352         spin_unlock(&lp->lp_lock);
3353
3354         if (!LNetMDHandleIsInvalid(mdh))
3355                 LNetMDUnlink(mdh);
3356
3357         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3358         spin_lock(&lp->lp_lock);
3359         return rc ? rc : LNET_REDISCOVER_PEER;
3360 }
3361
3362 /*
3363  * Mark the peer as discovered.
3364  */
3365 static int lnet_peer_discovered(struct lnet_peer *lp)
3366 __must_hold(&lp->lp_lock)
3367 {
3368         lp->lp_state |= LNET_PEER_DISCOVERED;
3369         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3370                           LNET_PEER_REDISCOVER);
3371
3372         lp->lp_dc_error = 0;
3373
3374         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3375
3376         return 0;
3377 }
3378
3379 /* Active side of push. */
3380 static int lnet_peer_send_push(struct lnet_peer *lp)
3381 __must_hold(&lp->lp_lock)
3382 {
3383         struct lnet_ping_buffer *pbuf;
3384         struct lnet_process_id id;
3385         struct lnet_md md;
3386         int cpt;
3387         int rc;
3388
3389         /* Don't push to a non-multi-rail peer. */
3390         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3391                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3392                 /* if peer's NIDs are uptodate then peer is discovered */
3393                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3394                         rc = lnet_peer_discovered(lp);
3395                         return rc;
3396                 }
3397
3398                 return 0;
3399         }
3400
3401         lp->lp_state |= LNET_PEER_PUSH_SENT;
3402         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3403         spin_unlock(&lp->lp_lock);
3404
3405         cpt = lnet_net_lock_current();
3406         pbuf = the_lnet.ln_ping_target;
3407         lnet_ping_buffer_addref(pbuf);
3408         lnet_net_unlock(cpt);
3409
3410         /* Push source MD */
3411         md.start     = &pbuf->pb_info;
3412         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3413         md.threshold = 2; /* Put/Ack */
3414         md.max_size  = 0;
3415         md.options   = LNET_MD_TRACK_RESPONSE;
3416         md.handler   = the_lnet.ln_dc_handler;
3417         md.user_ptr  = lp;
3418
3419         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3420         if (rc) {
3421                 lnet_ping_buffer_decref(pbuf);
3422                 CERROR("Can't bind push source MD: %d\n", rc);
3423                 goto fail_error;
3424         }
3425         cpt = lnet_net_lock_current();
3426         /* Refcount for MD. */
3427         lnet_peer_addref_locked(lp);
3428         id.pid = LNET_PID_LUSTRE;
3429         id.nid = lnet_peer_select_nid(lp);
3430         lnet_net_unlock(cpt);
3431
3432         if (id.nid == LNET_NID_ANY) {
3433                 rc = -EHOSTUNREACH;
3434                 goto fail_unlink;
3435         }
3436
3437         rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3438                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3439                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3440
3441         /*
3442          * reset the discovery nid. There is no need to restrict sending
3443          * from that source, if we call lnet_push_update_to_peers(). It'll
3444          * get set to a specific NID, if we initiate discovery from the
3445          * scratch
3446          */
3447         lp->lp_disc_src_nid = LNET_NID_ANY;
3448
3449         if (rc)
3450                 goto fail_unlink;
3451
3452         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3453
3454         spin_lock(&lp->lp_lock);
3455         return 0;
3456
3457 fail_unlink:
3458         LNetMDUnlink(lp->lp_push_mdh);
3459         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3460 fail_error:
3461         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3462         /*
3463          * The errors that get us here are considered hard errors and
3464          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3465          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3466          * because the unlink event handler will have set it if we
3467          * called LNetMDUnlink() above.
3468          */
3469         spin_lock(&lp->lp_lock);
3470         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3471         return rc;
3472 }
3473
3474 /*
3475  * An unrecoverable error was encountered during discovery.
3476  * Set error status in peer and abort discovery.
3477  */
3478 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3479 {
3480         CDEBUG(D_NET, "Discovery error %s: %d\n",
3481                libcfs_nid2str(lp->lp_primary_nid), error);
3482
3483         spin_lock(&lp->lp_lock);
3484         lp->lp_dc_error = error;
3485         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3486         lp->lp_state |= LNET_PEER_REDISCOVER;
3487         spin_unlock(&lp->lp_lock);
3488 }
3489
3490 /*
3491  * Wait for work to be queued or some other change that must be
3492  * attended to. Returns non-zero if the discovery thread should shut
3493  * down.
3494  */
3495 static int lnet_peer_discovery_wait_for_work(void)
3496 {
3497         int cpt;
3498         int rc = 0;
3499
3500         DEFINE_WAIT(wait);
3501
3502         cpt = lnet_net_lock_current();
3503         for (;;) {
3504                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3505                                 TASK_INTERRUPTIBLE);
3506                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3507                         break;
3508                 if (lnet_push_target_resize_needed() ||
3509                     the_lnet.ln_push_target->pb_needs_post)
3510                         break;
3511                 if (!list_empty(&the_lnet.ln_dc_request))
3512                         break;
3513                 if (!list_empty(&the_lnet.ln_msg_resend))
3514                         break;
3515                 lnet_net_unlock(cpt);
3516
3517                 /*
3518                  * wakeup max every second to check if there are peers that
3519                  * have been stuck on the working queue for greater than
3520                  * the peer timeout.
3521                  */
3522                 schedule_timeout(cfs_time_seconds(1));
3523                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3524                 cpt = lnet_net_lock_current();
3525         }
3526         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3527
3528         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3529                 rc = -ESHUTDOWN;
3530
3531         lnet_net_unlock(cpt);
3532
3533         CDEBUG(D_NET, "woken: %d\n", rc);
3534
3535         return rc;
3536 }
3537
3538 /*
3539  * Messages that were pending on a destroyed peer will be put on a global
3540  * resend list. The message resend list will be checked by
3541  * the discovery thread when it wakes up, and will resend messages. These
3542  * messages can still be sendable in the case the lpni which was the initial
3543  * cause of the message re-queue was transfered to another peer.
3544  *
3545  * It is possible that LNet could be shutdown while we're iterating
3546  * through the list. lnet_shudown_lndnets() will attempt to access the
3547  * resend list, but will have to wait until the spinlock is released, by
3548  * which time there shouldn't be any more messages on the resend list.
3549  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3550  * for the messages so they can be released. The other case is that
3551  * lnet_shudown_lndnets() can finalize all the messages before this
3552  * function can visit the resend list, in which case this function will be
3553  * a no-op.
3554  */
3555 static void lnet_resend_msgs(void)
3556 {
3557         struct lnet_msg *msg, *tmp;
3558         LIST_HEAD(resend);
3559         int rc;
3560
3561         spin_lock(&the_lnet.ln_msg_resend_lock);
3562         list_splice(&the_lnet.ln_msg_resend, &resend);
3563         spin_unlock(&the_lnet.ln_msg_resend_lock);
3564
3565         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3566                 list_del_init(&msg->msg_list);
3567                 rc = lnet_send(msg->msg_src_nid_param, msg,
3568                                msg->msg_rtr_nid_param);
3569                 if (rc < 0) {
3570                         CNETERR("Error sending %s to %s: %d\n",
3571                                lnet_msgtyp2str(msg->msg_type),
3572                                libcfs_id2str(msg->msg_target), rc);
3573                         lnet_finalize(msg, rc);
3574                 }
3575         }
3576 }
3577
3578 /* The discovery thread. */
3579 static int lnet_peer_discovery(void *arg)
3580 {
3581         struct lnet_peer *lp;
3582         int rc;
3583
3584         wait_for_completion(&the_lnet.ln_started);
3585
3586         CDEBUG(D_NET, "started\n");
3587
3588         for (;;) {
3589                 if (lnet_peer_discovery_wait_for_work())
3590                         break;
3591
3592                 if (lnet_push_target_resize_needed())
3593                         lnet_push_target_resize();
3594                 else if (the_lnet.ln_push_target->pb_needs_post)
3595                         lnet_push_target_post(the_lnet.ln_push_target,
3596                                               &the_lnet.ln_push_target_md);
3597
3598                 lnet_resend_msgs();
3599
3600                 lnet_net_lock(LNET_LOCK_EX);
3601                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3602                         lnet_net_unlock(LNET_LOCK_EX);
3603                         break;
3604                 }
3605
3606                 /*
3607                  * Process all incoming discovery work requests.  When
3608                  * discovery must wait on a peer to change state, it
3609                  * is added to the tail of the ln_dc_working queue. A
3610                  * timestamp keeps track of when the peer was added,
3611                  * so we can time out discovery requests that take too
3612                  * long.
3613                  */
3614                 while (!list_empty(&the_lnet.ln_dc_request)) {
3615                         lp = list_first_entry(&the_lnet.ln_dc_request,
3616                                               struct lnet_peer, lp_dc_list);
3617                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3618                         /*
3619                          * set the time the peer was put on the dc_working
3620                          * queue. It shouldn't remain on the queue
3621                          * forever, in case the GET message (for ping)
3622                          * doesn't get a REPLY or the PUT message (for
3623                          * push) doesn't get an ACK.
3624                          */
3625                         lp->lp_last_queued = ktime_get_real_seconds();
3626                         lnet_net_unlock(LNET_LOCK_EX);
3627
3628                         if (lnet_push_target_resize_needed())
3629                                 lnet_push_target_resize();
3630                         else if (the_lnet.ln_push_target->pb_needs_post)
3631                                 lnet_push_target_post(the_lnet.ln_push_target,
3632                                                       &the_lnet.ln_push_target_md);
3633
3634                         /*
3635                          * Select an action depending on the state of
3636                          * the peer and whether discovery is disabled.
3637                          * The check whether discovery is disabled is
3638                          * done after the code that handles processing
3639                          * for arrived data, cleanup for failures, and
3640                          * forcing a Ping or Push.
3641                          */
3642                         spin_lock(&lp->lp_lock);
3643                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3644                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3645                                 lp->lp_state);
3646                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3647                                             LNET_PEER_MARK_DELETED))
3648                                 rc = lnet_peer_deletion(lp);
3649                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3650                                 rc = lnet_peer_data_present(lp);
3651                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3652                                 rc = lnet_peer_ping_failed(lp);
3653                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3654                                 rc = lnet_peer_push_failed(lp);
3655                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3656                                 rc = lnet_peer_send_ping(lp);
3657                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3658                                 rc = lnet_peer_send_push(lp);
3659                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3660                                 rc = lnet_peer_send_ping(lp);
3661                         else if (lnet_peer_needs_push(lp))
3662                                 rc = lnet_peer_send_push(lp);
3663                         else
3664                                 rc = lnet_peer_discovered(lp);
3665                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3666                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3667                                 lp->lp_state, rc);
3668                         spin_unlock(&lp->lp_lock);
3669
3670                         lnet_net_lock(LNET_LOCK_EX);
3671                         if (rc == LNET_REDISCOVER_PEER) {
3672                                 list_move(&lp->lp_dc_list,
3673                                           &the_lnet.ln_dc_request);
3674                         } else if (rc) {
3675                                 lnet_peer_discovery_error(lp, rc);
3676                         }
3677                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3678                                 lnet_peer_discovery_complete(lp);
3679                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3680                                 break;
3681
3682                 }
3683
3684                 lnet_net_unlock(LNET_LOCK_EX);
3685         }
3686
3687         CDEBUG(D_NET, "stopping\n");
3688         /*
3689          * Clean up before telling lnet_peer_discovery_stop() that
3690          * we're done. Use wake_up() below to somewhat reduce the
3691          * size of the thundering herd if there are multiple threads
3692          * waiting on discovery of a single peer.
3693          */
3694
3695         /* Queue cleanup 1: stop all pending pings and pushes. */
3696         lnet_net_lock(LNET_LOCK_EX);
3697         while (!list_empty(&the_lnet.ln_dc_working)) {
3698                 lp = list_first_entry(&the_lnet.ln_dc_working,
3699                                       struct lnet_peer, lp_dc_list);
3700                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3701                 lnet_net_unlock(LNET_LOCK_EX);
3702                 lnet_peer_cancel_discovery(lp);
3703                 lnet_net_lock(LNET_LOCK_EX);
3704         }
3705         lnet_net_unlock(LNET_LOCK_EX);
3706
3707         /* Queue cleanup 2: wait for the expired queue to clear. */
3708         while (!list_empty(&the_lnet.ln_dc_expired))
3709                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3710
3711         /* Queue cleanup 3: clear the request queue. */
3712         lnet_net_lock(LNET_LOCK_EX);
3713         while (!list_empty(&the_lnet.ln_dc_request)) {
3714                 lp = list_first_entry(&the_lnet.ln_dc_request,
3715                                       struct lnet_peer, lp_dc_list);
3716                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3717                 lnet_peer_discovery_complete(lp);
3718         }
3719         lnet_net_unlock(LNET_LOCK_EX);
3720
3721         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3722         the_lnet.ln_dc_handler = NULL;
3723
3724         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3725         wake_up(&the_lnet.ln_dc_waitq);
3726
3727         CDEBUG(D_NET, "stopped\n");
3728
3729         return 0;
3730 }
3731
3732 /* ln_api_mutex is held on entry. */
3733 int lnet_peer_discovery_start(void)
3734 {
3735         struct task_struct *task;
3736         int rc = 0;
3737
3738         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3739                 return -EALREADY;
3740
3741         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3742         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3743         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3744         if (IS_ERR(task)) {
3745                 rc = PTR_ERR(task);
3746                 CERROR("Can't start peer discovery thread: %d\n", rc);
3747
3748                 the_lnet.ln_dc_handler = NULL;
3749
3750                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3751         }
3752
3753         CDEBUG(D_NET, "discovery start: %d\n", rc);
3754
3755         return rc;
3756 }
3757
3758 /* ln_api_mutex is held on entry. */
3759 void lnet_peer_discovery_stop(void)
3760 {
3761         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3762                 return;
3763
3764         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3765         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3766
3767         /* In the LNetNIInit() path we may be stopping discovery before it
3768          * entered its work loop
3769          */
3770         if (!completion_done(&the_lnet.ln_started))
3771                 complete(&the_lnet.ln_started);
3772         else
3773                 wake_up(&the_lnet.ln_dc_waitq);
3774
3775         wait_event(the_lnet.ln_dc_waitq,
3776                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3777
3778         LASSERT(list_empty(&the_lnet.ln_dc_request));
3779         LASSERT(list_empty(&the_lnet.ln_dc_working));
3780         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3781
3782         CDEBUG(D_NET, "discovery stopped\n");
3783 }
3784
3785 /* Debugging */
3786
3787 void
3788 lnet_debug_peer(lnet_nid_t nid)
3789 {
3790         char                    *aliveness = "NA";
3791         struct lnet_peer_ni     *lp;
3792         int                     cpt;
3793
3794         cpt = lnet_cpt_of_nid(nid, NULL);
3795         lnet_net_lock(cpt);
3796
3797         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3798         if (IS_ERR(lp)) {
3799                 lnet_net_unlock(cpt);
3800                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3801                 return;
3802         }
3803
3804         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3805                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3806
3807         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3808                libcfs_nid2str(lp->lpni_nid), kref_read(&lp->lpni_kref),
3809                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3810                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3811                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3812
3813         lnet_peer_ni_decref_locked(lp);
3814
3815         lnet_net_unlock(cpt);
3816 }
3817
3818 /* Gathering information for userspace. */
3819
3820 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3821                           char aliveness[LNET_MAX_STR_LEN],
3822                           __u32 *cpt_iter, __u32 *refcount,
3823                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3824                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3825                           __u32 *peer_tx_qnob)
3826 {
3827         struct lnet_peer_table          *peer_table;
3828         struct lnet_peer_ni             *lp;
3829         int                             j;
3830         int                             lncpt;
3831         bool                            found = false;
3832
3833         /* get the number of CPTs */
3834         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3835
3836         /* if the cpt number to be examined is >= the number of cpts in
3837          * the system then indicate that there are no more cpts to examin
3838          */
3839         if (*cpt_iter >= lncpt)
3840                 return -ENOENT;
3841
3842         /* get the current table */
3843         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3844         /* if the ptable is NULL then there are no more cpts to examine */
3845         if (peer_table == NULL)
3846                 return -ENOENT;
3847
3848         lnet_net_lock(*cpt_iter);
3849
3850         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3851                 struct list_head *peers = &peer_table->pt_hash[j];
3852
3853                 list_for_each_entry(lp, peers, lpni_hashlist) {
3854                         if (peer_index-- > 0)
3855                                 continue;
3856
3857                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3858                         if (lnet_isrouter(lp) ||
3859                                 lnet_peer_aliveness_enabled(lp))
3860                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3861                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3862
3863                         *nid = lp->lpni_nid;
3864                         *refcount = kref_read(&lp->lpni_kref);
3865                         *ni_peer_tx_credits =
3866                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3867                         *peer_tx_credits = lp->lpni_txcredits;
3868                         *peer_rtr_credits = lp->lpni_rtrcredits;
3869                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
3870                         *peer_tx_qnob = lp->lpni_txqnob;
3871
3872                         found = true;
3873                 }
3874
3875         }
3876         lnet_net_unlock(*cpt_iter);
3877
3878         *cpt_iter = lncpt;
3879
3880         return found ? 0 : -ENOENT;
3881 }
3882
3883 /* ln_api_mutex is held, which keeps the peer list stable */
3884 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3885 {
3886         struct lnet_ioctl_element_stats *lpni_stats;
3887         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3888         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3889         struct lnet_peer_ni_credit_info *lpni_info;
3890         struct lnet_peer_ni *lpni;
3891         struct lnet_peer *lp;
3892         lnet_nid_t nid;
3893         __u32 size;
3894         int rc;
3895
3896         lp = lnet_find_peer(cfg->prcfg_prim_nid);
3897
3898         if (!lp) {
3899                 rc = -ENOENT;
3900                 goto out;
3901         }
3902
3903         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3904                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3905         size *= lp->lp_nnis;
3906         if (size > cfg->prcfg_size) {
3907                 cfg->prcfg_size = size;
3908                 rc = -E2BIG;
3909                 goto out_lp_decref;
3910         }
3911
3912         cfg->prcfg_prim_nid = lp->lp_primary_nid;
3913         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3914         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3915         cfg->prcfg_count = lp->lp_nnis;
3916         cfg->prcfg_size = size;
3917         cfg->prcfg_state = lp->lp_state;
3918
3919         /* Allocate helper buffers. */
3920         rc = -ENOMEM;
3921         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3922         if (!lpni_info)
3923                 goto out_lp_decref;
3924         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3925         if (!lpni_stats)
3926                 goto out_free_info;
3927         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3928         if (!lpni_msg_stats)
3929                 goto out_free_stats;
3930         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3931         if (!lpni_hstats)
3932                 goto out_free_msg_stats;
3933
3934
3935         lpni = NULL;
3936         rc = -EFAULT;
3937         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3938                 nid = lpni->lpni_nid;
3939                 if (copy_to_user(bulk, &nid, sizeof(nid)))
3940                         goto out_free_hstats;
3941                 bulk += sizeof(nid);
3942
3943                 memset(lpni_info, 0, sizeof(*lpni_info));
3944                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3945                 if (lnet_isrouter(lpni) ||
3946                         lnet_peer_aliveness_enabled(lpni))
3947                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3948                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3949
3950                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
3951                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3952                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3953                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3954                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3955                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3956                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3957                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3958                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3959                         goto out_free_hstats;
3960                 bulk += sizeof(*lpni_info);
3961
3962                 memset(lpni_stats, 0, sizeof(*lpni_stats));
3963                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3964                                                             LNET_STATS_TYPE_SEND);
3965                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3966                                                             LNET_STATS_TYPE_RECV);
3967                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3968                                                             LNET_STATS_TYPE_DROP);
3969                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3970                         goto out_free_hstats;
3971                 bulk += sizeof(*lpni_stats);
3972                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3973                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3974                         goto out_free_hstats;
3975                 bulk += sizeof(*lpni_msg_stats);
3976                 lpni_hstats->hlpni_network_timeout =
3977                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3978                 lpni_hstats->hlpni_remote_dropped =
3979                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3980                 lpni_hstats->hlpni_remote_timeout =
3981                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3982                 lpni_hstats->hlpni_remote_error =
3983                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3984                 lpni_hstats->hlpni_health_value =
3985                   atomic_read(&lpni->lpni_healthv);
3986                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3987                         goto out_free_hstats;
3988                 bulk += sizeof(*lpni_hstats);
3989         }
3990         rc = 0;
3991
3992 out_free_hstats:
3993         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3994 out_free_msg_stats:
3995         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3996 out_free_stats:
3997         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3998 out_free_info:
3999         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4000 out_lp_decref:
4001         lnet_peer_decref_locked(lp);
4002 out:
4003         return rc;
4004 }
4005
4006 /* must hold net_lock/0 */
4007 void
4008 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4009                                      struct list_head *recovery_queue,
4010                                      time64_t now)
4011 {
4012         /* the mt could've shutdown and cleaned up the queues */
4013         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4014                 return;
4015
4016         if (!list_empty(&lpni->lpni_recovery))
4017                 return;
4018
4019         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4020                 return;
4021
4022         if (!lpni->lpni_last_alive) {
4023                 CDEBUG(D_NET,
4024                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4025                        libcfs_nid2str(lpni->lpni_nid), lpni,
4026                        lpni->lpni_last_alive);
4027                 return;
4028         }
4029
4030         if (now > lpni->lpni_last_alive + lnet_recovery_limit) {
4031                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4032                        libcfs_nid2str(lpni->lpni_nid),
4033                        lpni->lpni_last_alive);
4034                 /* Reset the ping count so that if this peer NI is added back to
4035                  * the recovery queue we will send the first ping right away.
4036                  */
4037                 lpni->lpni_ping_count = 0;
4038                 return;
4039         }
4040
4041         /* This peer NI is going on the recovery queue, so take a ref on it */
4042         lnet_peer_ni_addref_locked(lpni);
4043
4044         lnet_peer_ni_set_next_ping(lpni, now);
4045
4046         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4047                libcfs_nid2str(lpni->lpni_nid),
4048                lpni->lpni_ping_count,
4049                lpni->lpni_next_ping,
4050                lpni->lpni_last_alive,
4051                atomic_read(&lpni->lpni_healthv));
4052
4053         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4054 }
4055
4056 /* Call with the ln_api_mutex held */
4057 void
4058 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4059 {
4060         struct lnet_peer_table *ptable;
4061         struct lnet_peer *lp;
4062         struct lnet_peer_net *lpn;
4063         struct lnet_peer_ni *lpni;
4064         int lncpt;
4065         int cpt;
4066         time64_t now;
4067
4068         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4069                 return;
4070
4071         now = ktime_get_seconds();
4072
4073         if (!all) {
4074                 lnet_net_lock(LNET_LOCK_EX);
4075                 lpni = lnet_find_peer_ni_locked(nid);
4076                 if (!lpni) {
4077                         lnet_net_unlock(LNET_LOCK_EX);
4078                         return;
4079                 }
4080                 atomic_set(&lpni->lpni_healthv, value);
4081                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4082                                              &the_lnet.ln_mt_peerNIRecovq, now);
4083                 lnet_peer_ni_decref_locked(lpni);
4084                 lnet_net_unlock(LNET_LOCK_EX);
4085                 return;
4086         }
4087
4088         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4089
4090         /*
4091          * Walk all the peers and reset the health value for each one to the
4092          * specified value.
4093          */
4094         lnet_net_lock(LNET_LOCK_EX);
4095         for (cpt = 0; cpt < lncpt; cpt++) {
4096                 ptable = the_lnet.ln_peer_tables[cpt];
4097                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4098                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4099                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4100                                                     lpni_peer_nis) {
4101                                         atomic_set(&lpni->lpni_healthv, value);
4102                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4103                                              &the_lnet.ln_mt_peerNIRecovq, now);
4104                                 }
4105                         }
4106                 }
4107         }
4108         lnet_net_unlock(LNET_LOCK_EX);
4109 }
4110