Whamcloud - gitweb
83f52af2dd0dbab21940cd777a1b211c8d397db1
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/peer.c
33  */
34
35 #define DEBUG_SUBSYSTEM S_LNET
36
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
40 #endif
41 #include <linux/uaccess.h>
42
43 #include <lnet/udsp.h>
44 #include <lnet/lib-lnet.h>
45 #include <uapi/linux/lnet/lnet-dlc.h>
46
47 /* Value indicating that recovery needs to re-check a peer immediately. */
48 #define LNET_REDISCOVER_PEER    (1)
49
50 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
51
52 static void
53 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
54 {
55         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
56                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
57                 lnet_peer_ni_decref_locked(lpni);
58         }
59 }
60
61 void
62 lnet_peer_net_added(struct lnet_net *net)
63 {
64         struct lnet_peer_ni *lpni, *tmp;
65
66         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
67                                  lpni_on_remote_peer_ni_list) {
68
69                 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
70                         lpni->lpni_net = net;
71
72                         spin_lock(&lpni->lpni_lock);
73                         lpni->lpni_txcredits =
74                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
75                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
76                         lpni->lpni_rtrcredits =
77                                 lnet_peer_buffer_credits(lpni->lpni_net);
78                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
79                         spin_unlock(&lpni->lpni_lock);
80
81                         lnet_peer_remove_from_remote_list(lpni);
82                 }
83         }
84 }
85
86 static void
87 lnet_peer_tables_destroy(void)
88 {
89         struct lnet_peer_table  *ptable;
90         struct list_head        *hash;
91         int                     i;
92         int                     j;
93
94         if (!the_lnet.ln_peer_tables)
95                 return;
96
97         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
98                 hash = ptable->pt_hash;
99                 if (!hash) /* not intialized */
100                         break;
101
102                 LASSERT(list_empty(&ptable->pt_zombie_list));
103
104                 ptable->pt_hash = NULL;
105                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
106                         LASSERT(list_empty(&hash[j]));
107
108                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
109         }
110
111         cfs_percpt_free(the_lnet.ln_peer_tables);
112         the_lnet.ln_peer_tables = NULL;
113 }
114
115 int
116 lnet_peer_tables_create(void)
117 {
118         struct lnet_peer_table  *ptable;
119         struct list_head        *hash;
120         int                     i;
121         int                     j;
122
123         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
124                                                    sizeof(*ptable));
125         if (the_lnet.ln_peer_tables == NULL) {
126                 CERROR("Failed to allocate cpu-partition peer tables\n");
127                 return -ENOMEM;
128         }
129
130         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
131                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
132                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
133                 if (hash == NULL) {
134                         CERROR("Failed to create peer hash table\n");
135                         lnet_peer_tables_destroy();
136                         return -ENOMEM;
137                 }
138
139                 spin_lock_init(&ptable->pt_zombie_lock);
140                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
141
142                 INIT_LIST_HEAD(&ptable->pt_peer_list);
143
144                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
145                         INIT_LIST_HEAD(&hash[j]);
146                 ptable->pt_hash = hash; /* sign of initialization */
147         }
148
149         return 0;
150 }
151
152 static struct lnet_peer_ni *
153 lnet_peer_ni_alloc(lnet_nid_t nid)
154 {
155         struct lnet_peer_ni *lpni;
156         struct lnet_net *net;
157         int cpt;
158
159         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
160
161         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
162         if (!lpni)
163                 return NULL;
164
165         INIT_LIST_HEAD(&lpni->lpni_txq);
166         INIT_LIST_HEAD(&lpni->lpni_hashlist);
167         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
168         INIT_LIST_HEAD(&lpni->lpni_recovery);
169         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
170         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
171         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
172         atomic_set(&lpni->lpni_refcount, 1);
173         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
174
175         spin_lock_init(&lpni->lpni_lock);
176
177         if (lnet_peers_start_down())
178                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
179         else
180                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
181         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
182         lpni->lpni_nid = nid;
183         lpni->lpni_cpt = cpt;
184         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
185
186         net = lnet_get_net_locked(LNET_NIDNET(nid));
187         lpni->lpni_net = net;
188         if (net) {
189                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
190                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
191                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
192                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
193         } else {
194                 /*
195                  * This peer_ni is not on a local network, so we
196                  * cannot add the credits here. In case the net is
197                  * added later, add the peer_ni to the remote peer ni
198                  * list so it can be easily found and revisited.
199                  */
200                 /* FIXME: per-net implementation instead? */
201                 lnet_peer_ni_addref_locked(lpni);
202                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
203                               &the_lnet.ln_remote_peer_ni_list);
204         }
205
206         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
207
208         return lpni;
209 }
210
211 static struct lnet_peer_net *
212 lnet_peer_net_alloc(__u32 net_id)
213 {
214         struct lnet_peer_net *lpn;
215
216         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
217         if (!lpn)
218                 return NULL;
219
220         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
221         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
222         lpn->lpn_net_id = net_id;
223         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
224
225         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
226
227         return lpn;
228 }
229
230 void
231 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
232 {
233         struct lnet_peer *lp;
234
235         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
236
237         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
238         LASSERT(list_empty(&lpn->lpn_peer_nis));
239         LASSERT(list_empty(&lpn->lpn_peer_nets));
240         lp = lpn->lpn_peer;
241         lpn->lpn_peer = NULL;
242         LIBCFS_FREE(lpn, sizeof(*lpn));
243
244         lnet_peer_decref_locked(lp);
245 }
246
247 static struct lnet_peer *
248 lnet_peer_alloc(lnet_nid_t nid)
249 {
250         struct lnet_peer *lp;
251
252         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
253         if (!lp)
254                 return NULL;
255
256         INIT_LIST_HEAD(&lp->lp_rtrq);
257         INIT_LIST_HEAD(&lp->lp_routes);
258         INIT_LIST_HEAD(&lp->lp_peer_list);
259         INIT_LIST_HEAD(&lp->lp_peer_nets);
260         INIT_LIST_HEAD(&lp->lp_dc_list);
261         INIT_LIST_HEAD(&lp->lp_dc_pendq);
262         INIT_LIST_HEAD(&lp->lp_rtr_list);
263         init_waitqueue_head(&lp->lp_dc_waitq);
264         spin_lock_init(&lp->lp_lock);
265         lp->lp_primary_nid = nid;
266         lp->lp_disc_src_nid = LNET_NID_ANY;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid == LNET_NID_LO_0)
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nid2str(lp->lp_primary_nid),
382                 libcfs_nid2str(lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nid2str(lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
461
462         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
463         while (lpni != NULL) {
464                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
465                 rc = lnet_peer_ni_del_locked(lpni, false);
466                 if (rc != 0)
467                         rc2 = rc;
468                 lpni = lpni2;
469         }
470
471         return rc2;
472 }
473
474 static int
475 lnet_peer_del(struct lnet_peer *peer)
476 {
477         lnet_net_lock(LNET_LOCK_EX);
478         lnet_peer_del_locked(peer);
479         lnet_net_unlock(LNET_LOCK_EX);
480
481         return 0;
482 }
483
484 /*
485  * Delete a NID from a peer. Call with ln_api_mutex held.
486  *
487  * Error codes:
488  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
489  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
490  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
491  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
492  */
493 static int
494 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
495 {
496         struct lnet_peer_ni *lpni;
497         lnet_nid_t primary_nid = lp->lp_primary_nid;
498         int rc = 0;
499         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
500
501         if (!(flags & LNET_PEER_CONFIGURED)) {
502                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
503                         rc = -EPERM;
504                         goto out;
505                 }
506         }
507         lpni = lnet_find_peer_ni_locked(nid);
508         if (!lpni) {
509                 rc = -ENOENT;
510                 goto out;
511         }
512         lnet_peer_ni_decref_locked(lpni);
513         if (lp != lpni->lpni_peer_net->lpn_peer) {
514                 rc = -ECHILD;
515                 goto out;
516         }
517
518         /*
519          * This function only allows deletion of the primary NID if it
520          * is the only NID.
521          */
522         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
523                 rc = -EBUSY;
524                 goto out;
525         }
526
527         lnet_net_lock(LNET_LOCK_EX);
528
529         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
530                 struct lnet_peer_ni *lpni2;
531                 /* assign the next peer_ni to be the primary */
532                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
533                 LASSERT(lpni2);
534                 lp->lp_primary_nid = lpni2->lpni_nid;
535         }
536         rc = lnet_peer_ni_del_locked(lpni, force);
537
538         lnet_net_unlock(LNET_LOCK_EX);
539
540 out:
541         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
542                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
543
544         return rc;
545 }
546
547 static void
548 lnet_peer_table_cleanup_locked(struct lnet_net *net,
549                                struct lnet_peer_table *ptable)
550 {
551         int                      i;
552         struct lnet_peer_ni     *next;
553         struct lnet_peer_ni     *lpni;
554         struct lnet_peer        *peer;
555
556         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
557                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
558                                          lpni_hashlist) {
559                         if (net != NULL && net != lpni->lpni_net)
560                                 continue;
561
562                         peer = lpni->lpni_peer_net->lpn_peer;
563                         if (peer->lp_primary_nid != lpni->lpni_nid) {
564                                 lnet_peer_ni_del_locked(lpni, false);
565                                 continue;
566                         }
567                         /*
568                          * Removing the primary NID implies removing
569                          * the entire peer. Advance next beyond any
570                          * peer_ni that belongs to the same peer.
571                          */
572                         list_for_each_entry_from(next, &ptable->pt_hash[i],
573                                                  lpni_hashlist) {
574                                 if (next->lpni_peer_net->lpn_peer != peer)
575                                         break;
576                         }
577                         lnet_peer_del_locked(peer);
578                 }
579         }
580 }
581
582 static void
583 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
584 {
585         wait_var_event_warning(&ptable->pt_zombies,
586                                ptable->pt_zombies == 0,
587                                "Waiting for %d zombies on peer table\n",
588                                ptable->pt_zombies);
589 }
590
591 static void
592 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
593                                 struct lnet_peer_table *ptable)
594 {
595         struct lnet_peer_ni     *lp;
596         struct lnet_peer_ni     *tmp;
597         lnet_nid_t              gw_nid;
598         int                     i;
599
600         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
601                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
602                                          lpni_hashlist) {
603                         if (net != lp->lpni_net)
604                                 continue;
605
606                         if (!lnet_isrouter(lp))
607                                 continue;
608
609                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
610
611                         lnet_net_unlock(LNET_LOCK_EX);
612                         lnet_del_route(LNET_NET_ANY, gw_nid);
613                         lnet_net_lock(LNET_LOCK_EX);
614                 }
615         }
616 }
617
618 void
619 lnet_peer_tables_cleanup(struct lnet_net *net)
620 {
621         int i;
622         struct lnet_peer_table *ptable;
623
624         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
625         /* If just deleting the peers for a NI, get rid of any routes these
626          * peers are gateways for. */
627         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
628                 lnet_net_lock(LNET_LOCK_EX);
629                 lnet_peer_table_del_rtrs_locked(net, ptable);
630                 lnet_net_unlock(LNET_LOCK_EX);
631         }
632
633         /* Start the cleanup process */
634         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
635                 lnet_net_lock(LNET_LOCK_EX);
636                 lnet_peer_table_cleanup_locked(net, ptable);
637                 lnet_net_unlock(LNET_LOCK_EX);
638         }
639
640         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
641                 lnet_peer_ni_finalize_wait(ptable);
642 }
643
644 static struct lnet_peer_ni *
645 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
646 {
647         struct list_head        *peers;
648         struct lnet_peer_ni     *lp;
649
650         if (the_lnet.ln_state != LNET_STATE_RUNNING)
651                 return NULL;
652
653         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
654         list_for_each_entry(lp, peers, lpni_hashlist) {
655                 if (lp->lpni_nid == nid) {
656                         lnet_peer_ni_addref_locked(lp);
657                         return lp;
658                 }
659         }
660
661         return NULL;
662 }
663
664 struct lnet_peer_ni *
665 lnet_find_peer_ni_locked(lnet_nid_t nid)
666 {
667         struct lnet_peer_ni *lpni;
668         struct lnet_peer_table *ptable;
669         int cpt;
670
671         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
672
673         ptable = the_lnet.ln_peer_tables[cpt];
674         lpni = lnet_get_peer_ni_locked(ptable, nid);
675
676         return lpni;
677 }
678
679 struct lnet_peer_ni *
680 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
681 {
682         struct lnet_peer_net *lpn;
683         struct lnet_peer_ni *lpni;
684
685         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
686         if (!lpn)
687                 return NULL;
688
689         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
690                 if (lpni->lpni_nid == nid)
691                         return lpni;
692         }
693
694         return NULL;
695 }
696
697 struct lnet_peer *
698 lnet_find_peer(lnet_nid_t nid)
699 {
700         struct lnet_peer_ni *lpni;
701         struct lnet_peer *lp = NULL;
702         int cpt;
703
704         cpt = lnet_net_lock_current();
705         lpni = lnet_find_peer_ni_locked(nid);
706         if (lpni) {
707                 lp = lpni->lpni_peer_net->lpn_peer;
708                 lnet_peer_addref_locked(lp);
709                 lnet_peer_ni_decref_locked(lpni);
710         }
711         lnet_net_unlock(cpt);
712
713         return lp;
714 }
715
716 struct lnet_peer_net *
717 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
718 {
719         struct lnet_peer_net *net;
720
721         if (!prev_lpn_id) {
722                 /* no net id provided return the first net */
723                 net = list_first_entry_or_null(&lp->lp_peer_nets,
724                                                struct lnet_peer_net,
725                                                lpn_peer_nets);
726
727                 return net;
728         }
729
730         /* find the net after the one provided */
731         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
732                 if (net->lpn_net_id == prev_lpn_id) {
733                         /*
734                          * if we reached the end of the list loop to the
735                          * beginning.
736                          */
737                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
738                                 return list_first_entry_or_null(&lp->lp_peer_nets,
739                                                                 struct lnet_peer_net,
740                                                                 lpn_peer_nets);
741                         else
742                                 return list_next_entry(net, lpn_peer_nets);
743                 }
744         }
745
746         return NULL;
747 }
748
749 struct lnet_peer_ni *
750 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
751                              struct lnet_peer_net *peer_net,
752                              struct lnet_peer_ni *prev)
753 {
754         struct lnet_peer_ni *lpni;
755         struct lnet_peer_net *net = peer_net;
756
757         if (!prev) {
758                 if (!net) {
759                         if (list_empty(&peer->lp_peer_nets))
760                                 return NULL;
761
762                         net = list_entry(peer->lp_peer_nets.next,
763                                          struct lnet_peer_net,
764                                          lpn_peer_nets);
765                 }
766                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
767                                   lpni_peer_nis);
768
769                 return lpni;
770         }
771
772         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
773                 /*
774                  * if you reached the end of the peer ni list and the peer
775                  * net is specified then there are no more peer nis in that
776                  * net.
777                  */
778                 if (net)
779                         return NULL;
780
781                 /*
782                  * we reached the end of this net ni list. move to the
783                  * next net
784                  */
785                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
786                     &peer->lp_peer_nets)
787                         /* no more nets and no more NIs. */
788                         return NULL;
789
790                 /* get the next net */
791                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
792                                  struct lnet_peer_net,
793                                  lpn_peer_nets);
794                 /* get the ni on it */
795                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
796                                   lpni_peer_nis);
797
798                 return lpni;
799         }
800
801         /* there are more nis left */
802         lpni = list_entry(prev->lpni_peer_nis.next,
803                           struct lnet_peer_ni, lpni_peer_nis);
804
805         return lpni;
806 }
807
808 /* Call with the ln_api_mutex held */
809 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
810 {
811         struct lnet_process_id id;
812         struct lnet_peer_table *ptable;
813         struct lnet_peer *lp;
814         __u32 count = 0;
815         __u32 size = 0;
816         int lncpt;
817         int cpt;
818         __u32 i;
819         int rc;
820
821         rc = -ESHUTDOWN;
822         if (the_lnet.ln_state != LNET_STATE_RUNNING)
823                 goto done;
824
825         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
826
827         /*
828          * Count the number of peers, and return E2BIG if the buffer
829          * is too small. We'll also return the desired size.
830          */
831         rc = -E2BIG;
832         for (cpt = 0; cpt < lncpt; cpt++) {
833                 ptable = the_lnet.ln_peer_tables[cpt];
834                 count += ptable->pt_peers;
835         }
836         size = count * sizeof(*ids);
837         if (size > *sizep)
838                 goto done;
839
840         /*
841          * Walk the peer lists and copy out the primary nids.
842          * This is safe because the peer lists are only modified
843          * while the ln_api_mutex is held. So we don't need to
844          * hold the lnet_net_lock as well, and can therefore
845          * directly call copy_to_user().
846          */
847         rc = -EFAULT;
848         memset(&id, 0, sizeof(id));
849         id.pid = LNET_PID_LUSTRE;
850         i = 0;
851         for (cpt = 0; cpt < lncpt; cpt++) {
852                 ptable = the_lnet.ln_peer_tables[cpt];
853                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
854                         if (i >= count)
855                                 goto done;
856                         id.nid = lp->lp_primary_nid;
857                         if (copy_to_user(&ids[i], &id, sizeof(id)))
858                                 goto done;
859                         i++;
860                 }
861         }
862         rc = 0;
863 done:
864         *countp = count;
865         *sizep = size;
866         return rc;
867 }
868
869 /*
870  * Start pushes to peers that need to be updated for a configuration
871  * change on this node.
872  */
873 void
874 lnet_push_update_to_peers(int force)
875 {
876         struct lnet_peer_table *ptable;
877         struct lnet_peer *lp;
878         int lncpt;
879         int cpt;
880
881         lnet_net_lock(LNET_LOCK_EX);
882         if (lnet_peer_discovery_disabled)
883                 force = 0;
884         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
885         for (cpt = 0; cpt < lncpt; cpt++) {
886                 ptable = the_lnet.ln_peer_tables[cpt];
887                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
888                         if (force) {
889                                 spin_lock(&lp->lp_lock);
890                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
891                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
892                                 spin_unlock(&lp->lp_lock);
893                         }
894                         if (lnet_peer_needs_push(lp))
895                                 lnet_peer_queue_for_discovery(lp);
896                 }
897         }
898         lnet_net_unlock(LNET_LOCK_EX);
899         wake_up(&the_lnet.ln_dc_waitq);
900 }
901
902 /* find the NID in the preferred gateways for the remote peer
903  * return:
904  *      false: list is not empty and NID is not preferred
905  *      false: list is empty
906  *      true: nid is found in the list
907  */
908 bool
909 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
910                              lnet_nid_t gw_nid)
911 {
912         struct lnet_nid_list *ne;
913
914         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
915                libcfs_nid2str(lpni->lpni_nid),
916                list_empty(&lpni->lpni_rtr_pref_nids));
917
918         if (list_empty(&lpni->lpni_rtr_pref_nids))
919                 return false;
920
921         /* iterate through all the preferred NIDs and see if any of them
922          * matches the provided gw_nid
923          */
924         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
925                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
926                        libcfs_nid2str(ne->nl_nid),
927                        libcfs_nid2str(gw_nid));
928                 if (ne->nl_nid == gw_nid)
929                         return true;
930         }
931
932         return false;
933 }
934
935 void
936 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
937 {
938         struct list_head zombies;
939         struct lnet_nid_list *ne;
940         struct lnet_nid_list *tmp;
941         int cpt = lpni->lpni_cpt;
942
943         INIT_LIST_HEAD(&zombies);
944
945         lnet_net_lock(cpt);
946         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
947         lnet_net_unlock(cpt);
948
949         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
950                 list_del(&ne->nl_list);
951                 LIBCFS_FREE(ne, sizeof(*ne));
952         }
953 }
954
955 int
956 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
957                        lnet_nid_t gw_nid)
958 {
959         int cpt = lpni->lpni_cpt;
960         struct lnet_nid_list *ne = NULL;
961
962         /* This function is called with api_mutex held. When the api_mutex
963          * is held the list can not be modified, as it is only modified as
964          * a result of applying a UDSP and that happens under api_mutex
965          * lock.
966          */
967         __must_hold(&the_lnet.ln_api_mutex);
968
969         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
970                 if (ne->nl_nid == gw_nid)
971                         return -EEXIST;
972         }
973
974         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
975         if (!ne)
976                 return -ENOMEM;
977
978         ne->nl_nid = gw_nid;
979
980         /* Lock the cpt to protect against addition and checks in the
981          * selection algorithm
982          */
983         lnet_net_lock(cpt);
984         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
985         lnet_net_unlock(cpt);
986
987         return 0;
988 }
989
990 /*
991  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
992  * this is a preferred point-to-point path. Call with lnet_net_lock in
993  * shared mmode.
994  */
995 bool
996 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
997 {
998         struct lnet_nid_list *ne;
999
1000         if (lpni->lpni_pref_nnids == 0)
1001                 return false;
1002         if (lpni->lpni_pref_nnids == 1)
1003                 return lpni->lpni_pref.nid == nid;
1004         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1005                 if (ne->nl_nid == nid)
1006                         return true;
1007         }
1008         return false;
1009 }
1010
1011 /*
1012  * Set a single ni as preferred, provided no preferred ni is already
1013  * defined. Only to be used for non-multi-rail peer_ni.
1014  */
1015 int
1016 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1017 {
1018         int rc = 0;
1019
1020         spin_lock(&lpni->lpni_lock);
1021         if (nid == LNET_NID_ANY) {
1022                 rc = -EINVAL;
1023         } else if (lpni->lpni_pref_nnids > 0) {
1024                 rc = -EPERM;
1025         } else if (lpni->lpni_pref_nnids == 0) {
1026                 lpni->lpni_pref.nid = nid;
1027                 lpni->lpni_pref_nnids = 1;
1028                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1029         }
1030         spin_unlock(&lpni->lpni_lock);
1031
1032         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1033                libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1034         return rc;
1035 }
1036
1037 /*
1038  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1039  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1040  */
1041 int
1042 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1043 {
1044         int rc = 0;
1045
1046         spin_lock(&lpni->lpni_lock);
1047         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1048                 lpni->lpni_pref_nnids = 0;
1049                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1050         } else if (lpni->lpni_pref_nnids == 0) {
1051                 rc = -ENOENT;
1052         } else {
1053                 rc = -EPERM;
1054         }
1055         spin_unlock(&lpni->lpni_lock);
1056
1057         CDEBUG(D_NET, "peer %s: %d\n",
1058                libcfs_nid2str(lpni->lpni_nid), rc);
1059         return rc;
1060 }
1061
1062 void
1063 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1064 {
1065         lpni->lpni_sel_priority = priority;
1066 }
1067
1068 /*
1069  * Clear the preferred NIDs from a non-multi-rail peer.
1070  */
1071 void
1072 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1073 {
1074         struct lnet_peer_ni *lpni = NULL;
1075
1076         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1077                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1078 }
1079
1080 int
1081 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1082 {
1083         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1084         struct lnet_nid_list *ne1 = NULL;
1085         struct lnet_nid_list *ne2 = NULL;
1086         lnet_nid_t tmp_nid = LNET_NID_ANY;
1087         int rc = 0;
1088
1089         if (nid == LNET_NID_ANY) {
1090                 rc = -EINVAL;
1091                 goto out;
1092         }
1093
1094         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1095                 rc = -EEXIST;
1096                 goto out;
1097         }
1098
1099         /* A non-MR node may have only one preferred NI per peer_ni */
1100         if (lpni->lpni_pref_nnids > 0 &&
1101             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1102                 rc = -EPERM;
1103                 goto out;
1104         }
1105
1106         /* add the new preferred nid to the list of preferred nids */
1107         if (lpni->lpni_pref_nnids != 0) {
1108                 size_t alloc_size = sizeof(*ne1);
1109
1110                 if (lpni->lpni_pref_nnids == 1) {
1111                         tmp_nid = lpni->lpni_pref.nid;
1112                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1113                 }
1114
1115                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1116                         if (ne1->nl_nid == nid) {
1117                                 rc = -EEXIST;
1118                                 goto out;
1119                         }
1120                 }
1121
1122                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1123                                  alloc_size);
1124                 if (!ne1) {
1125                         rc = -ENOMEM;
1126                         goto out;
1127                 }
1128
1129                 /* move the originally stored nid to the list */
1130                 if (lpni->lpni_pref_nnids == 1) {
1131                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1132                                 lpni->lpni_cpt, alloc_size);
1133                         if (!ne2) {
1134                                 rc = -ENOMEM;
1135                                 goto out;
1136                         }
1137                         INIT_LIST_HEAD(&ne2->nl_list);
1138                         ne2->nl_nid = tmp_nid;
1139                 }
1140                 ne1->nl_nid = nid;
1141         }
1142
1143         lnet_net_lock(LNET_LOCK_EX);
1144         spin_lock(&lpni->lpni_lock);
1145         if (lpni->lpni_pref_nnids == 0) {
1146                 lpni->lpni_pref.nid = nid;
1147         } else {
1148                 if (ne2)
1149                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1150                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1151         }
1152         lpni->lpni_pref_nnids++;
1153         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1154         spin_unlock(&lpni->lpni_lock);
1155         lnet_net_unlock(LNET_LOCK_EX);
1156
1157 out:
1158         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1159                 spin_lock(&lpni->lpni_lock);
1160                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1161                 spin_unlock(&lpni->lpni_lock);
1162         }
1163         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1164                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1165         return rc;
1166 }
1167
1168 int
1169 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1170 {
1171         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1172         struct lnet_nid_list *ne = NULL;
1173         int rc = 0;
1174
1175         if (lpni->lpni_pref_nnids == 0) {
1176                 rc = -ENOENT;
1177                 goto out;
1178         }
1179
1180         if (lpni->lpni_pref_nnids == 1) {
1181                 if (lpni->lpni_pref.nid != nid) {
1182                         rc = -ENOENT;
1183                         goto out;
1184                 }
1185         } else {
1186                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1187                         if (ne->nl_nid == nid)
1188                                 goto remove_nid_entry;
1189                 }
1190                 rc = -ENOENT;
1191                 ne = NULL;
1192                 goto out;
1193         }
1194
1195 remove_nid_entry:
1196         lnet_net_lock(LNET_LOCK_EX);
1197         spin_lock(&lpni->lpni_lock);
1198         if (lpni->lpni_pref_nnids == 1)
1199                 lpni->lpni_pref.nid = LNET_NID_ANY;
1200         else {
1201                 list_del_init(&ne->nl_list);
1202                 if (lpni->lpni_pref_nnids == 2) {
1203                         struct lnet_nid_list *ne, *tmp;
1204
1205                         list_for_each_entry_safe(ne, tmp,
1206                                                  &lpni->lpni_pref.nids,
1207                                                  nl_list) {
1208                                 lpni->lpni_pref.nid = ne->nl_nid;
1209                                 list_del_init(&ne->nl_list);
1210                                 LIBCFS_FREE(ne, sizeof(*ne));
1211                         }
1212                 }
1213         }
1214         lpni->lpni_pref_nnids--;
1215         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1216         spin_unlock(&lpni->lpni_lock);
1217         lnet_net_unlock(LNET_LOCK_EX);
1218
1219         if (ne)
1220                 LIBCFS_FREE(ne, sizeof(*ne));
1221 out:
1222         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1223                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1224         return rc;
1225 }
1226
1227 void
1228 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1229 {
1230         struct list_head zombies;
1231         struct lnet_nid_list *ne;
1232         struct lnet_nid_list *tmp;
1233
1234         INIT_LIST_HEAD(&zombies);
1235
1236         lnet_net_lock(LNET_LOCK_EX);
1237         if (lpni->lpni_pref_nnids == 1)
1238                 lpni->lpni_pref.nid = LNET_NID_ANY;
1239         else if (lpni->lpni_pref_nnids > 1)
1240                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1241         lpni->lpni_pref_nnids = 0;
1242         lnet_net_unlock(LNET_LOCK_EX);
1243
1244         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1245                 list_del_init(&ne->nl_list);
1246                 LIBCFS_FREE(ne, sizeof(*ne));
1247         }
1248 }
1249
1250 lnet_nid_t
1251 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1252 {
1253         struct lnet_peer_ni *lpni;
1254         lnet_nid_t primary_nid = nid;
1255
1256         lpni = lnet_find_peer_ni_locked(nid);
1257         if (lpni) {
1258                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1259                 lnet_peer_ni_decref_locked(lpni);
1260         }
1261
1262         return primary_nid;
1263 }
1264
1265 bool
1266 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1267 __must_hold(&lp->lp_lock)
1268 {
1269         if (lnet_peer_discovery_disabled)
1270                 return true;
1271
1272         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1273             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1274                 return true;
1275         }
1276
1277         return false;
1278 }
1279
1280 /*
1281  * Peer Discovery
1282  */
1283 bool
1284 lnet_is_discovery_disabled(struct lnet_peer *lp)
1285 {
1286         bool rc = false;
1287
1288         spin_lock(&lp->lp_lock);
1289         rc = lnet_is_discovery_disabled_locked(lp);
1290         spin_unlock(&lp->lp_lock);
1291
1292         return rc;
1293 }
1294
1295 lnet_nid_t
1296 LNetPrimaryNID(lnet_nid_t nid)
1297 {
1298         struct lnet_peer *lp;
1299         struct lnet_peer_ni *lpni;
1300         lnet_nid_t primary_nid = nid;
1301         int rc = 0;
1302         int cpt;
1303
1304         if (nid == LNET_NID_LO_0)
1305                 return LNET_NID_LO_0;
1306
1307         cpt = lnet_net_lock_current();
1308         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1309         if (IS_ERR(lpni)) {
1310                 rc = PTR_ERR(lpni);
1311                 goto out_unlock;
1312         }
1313         lp = lpni->lpni_peer_net->lpn_peer;
1314
1315         while (!lnet_peer_is_uptodate(lp)) {
1316                 spin_lock(&lp->lp_lock);
1317                 /* force a full discovery cycle */
1318                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1319                 spin_unlock(&lp->lp_lock);
1320
1321                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1322                 if (rc)
1323                         goto out_decref;
1324                 lp = lpni->lpni_peer_net->lpn_peer;
1325
1326                 /* Only try once if discovery is disabled */
1327                 if (lnet_is_discovery_disabled(lp))
1328                         break;
1329         }
1330         primary_nid = lp->lp_primary_nid;
1331 out_decref:
1332         lnet_peer_ni_decref_locked(lpni);
1333 out_unlock:
1334         lnet_net_unlock(cpt);
1335
1336         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1337                libcfs_nid2str(primary_nid), rc);
1338         return primary_nid;
1339 }
1340 EXPORT_SYMBOL(LNetPrimaryNID);
1341
1342 struct lnet_peer_net *
1343 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1344 {
1345         struct lnet_peer_net *peer_net;
1346         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1347                 if (peer_net->lpn_net_id == net_id)
1348                         return peer_net;
1349         }
1350         return NULL;
1351 }
1352
1353 /*
1354  * Attach a peer_ni to a peer_net and peer. This function assumes
1355  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1356  * may be attached to a different peer, in which case it will be
1357  * properly detached first. The whole operation is done atomically.
1358  *
1359  * This function consumes the reference on lpni and Always returns 0.
1360  * This is the last function called from functions that do return an
1361  * int, so returning 0 here allows the compiler to do a tail call.
1362  */
1363 static int
1364 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1365                                 struct lnet_peer_net *lpn,
1366                                 struct lnet_peer_ni *lpni,
1367                                 unsigned flags)
1368 {
1369         struct lnet_peer_table *ptable;
1370         bool new_lpn = false;
1371         int rc;
1372
1373         /* Install the new peer_ni */
1374         lnet_net_lock(LNET_LOCK_EX);
1375         /* Add peer_ni to global peer table hash, if necessary. */
1376         if (list_empty(&lpni->lpni_hashlist)) {
1377                 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1378
1379                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1380                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1381                 ptable->pt_version++;
1382                 lnet_peer_ni_addref_locked(lpni);
1383         }
1384
1385         /* Detach the peer_ni from an existing peer, if necessary. */
1386         if (lpni->lpni_peer_net) {
1387                 LASSERT(lpni->lpni_peer_net != lpn);
1388                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1389                 lnet_peer_detach_peer_ni_locked(lpni);
1390                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1391                 lpni->lpni_peer_net = NULL;
1392         }
1393
1394         /* Add peer_ni to peer_net */
1395         lpni->lpni_peer_net = lpn;
1396         list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1397         lnet_update_peer_net_healthv(lpni);
1398         lnet_peer_net_addref_locked(lpn);
1399
1400         /* Add peer_net to peer */
1401         if (!lpn->lpn_peer) {
1402                 new_lpn = true;
1403                 lpn->lpn_peer = lp;
1404                 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1405                 lnet_peer_addref_locked(lp);
1406         }
1407
1408         /* Add peer to global peer list, if necessary */
1409         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1410         if (list_empty(&lp->lp_peer_list)) {
1411                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1412                 ptable->pt_peers++;
1413         }
1414
1415
1416         /* Update peer state */
1417         spin_lock(&lp->lp_lock);
1418         if (flags & LNET_PEER_CONFIGURED) {
1419                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1420                         lp->lp_state |= LNET_PEER_CONFIGURED;
1421         }
1422         if (flags & LNET_PEER_MULTI_RAIL) {
1423                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1424                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1425                         lnet_peer_clr_non_mr_pref_nids(lp);
1426                 }
1427         }
1428         spin_unlock(&lp->lp_lock);
1429
1430         lp->lp_nnis++;
1431
1432         /* apply UDSPs */
1433         if (new_lpn) {
1434                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1435                 if (rc)
1436                         CERROR("Failed to apply UDSPs on lpn %s\n",
1437                                libcfs_net2str(lpn->lpn_net_id));
1438         }
1439         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1440         if (rc)
1441                 CERROR("Failed to apply UDSPs on lpni %s\n",
1442                        libcfs_nid2str(lpni->lpni_nid));
1443
1444         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1445                libcfs_nid2str(lp->lp_primary_nid),
1446                libcfs_nid2str(lpni->lpni_nid), flags);
1447         lnet_peer_ni_decref_locked(lpni);
1448         lnet_net_unlock(LNET_LOCK_EX);
1449
1450         return 0;
1451 }
1452
1453 /*
1454  * Create a new peer, with nid as its primary nid.
1455  *
1456  * Call with the lnet_api_mutex held.
1457  */
1458 static int
1459 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1460 {
1461         struct lnet_peer *lp;
1462         struct lnet_peer_net *lpn;
1463         struct lnet_peer_ni *lpni;
1464         int rc = 0;
1465
1466         LASSERT(nid != LNET_NID_ANY);
1467
1468         /*
1469          * No need for the lnet_net_lock here, because the
1470          * lnet_api_mutex is held.
1471          */
1472         lpni = lnet_find_peer_ni_locked(nid);
1473         if (lpni) {
1474                 /* A peer with this NID already exists. */
1475                 lp = lpni->lpni_peer_net->lpn_peer;
1476                 lnet_peer_ni_decref_locked(lpni);
1477                 /*
1478                  * This is an error if the peer was configured and the
1479                  * primary NID differs or an attempt is made to change
1480                  * the Multi-Rail flag. Otherwise the assumption is
1481                  * that an existing peer is being modified.
1482                  */
1483                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1484                         if (lp->lp_primary_nid != nid)
1485                                 rc = -EEXIST;
1486                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1487                                 rc = -EPERM;
1488                         goto out;
1489                 }
1490                 /* Delete and recreate as a configured peer. */
1491                 lnet_peer_del(lp);
1492         }
1493
1494         /* Create peer, peer_net, and peer_ni. */
1495         rc = -ENOMEM;
1496         lp = lnet_peer_alloc(nid);
1497         if (!lp)
1498                 goto out;
1499         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1500         if (!lpn)
1501                 goto out_free_lp;
1502         lpni = lnet_peer_ni_alloc(nid);
1503         if (!lpni)
1504                 goto out_free_lpn;
1505
1506         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1507
1508 out_free_lpn:
1509         LIBCFS_FREE(lpn, sizeof(*lpn));
1510 out_free_lp:
1511         LIBCFS_FREE(lp, sizeof(*lp));
1512 out:
1513         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1514                libcfs_nid2str(nid), flags, rc);
1515         return rc;
1516 }
1517
1518 /*
1519  * Add a NID to a peer. Call with ln_api_mutex held.
1520  *
1521  * Error codes:
1522  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1523  *  -EEXIST:   The NID was configured by DLC for a different peer.
1524  *  -ENOMEM:   Out of memory.
1525  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1526  *             non-multi-rail peer.
1527  */
1528 static int
1529 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1530 {
1531         struct lnet_peer_net *lpn;
1532         struct lnet_peer_ni *lpni;
1533         int rc = 0;
1534
1535         LASSERT(lp);
1536         LASSERT(nid != LNET_NID_ANY);
1537
1538         /* A configured peer can only be updated through configuration. */
1539         if (!(flags & LNET_PEER_CONFIGURED)) {
1540                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1541                         rc = -EPERM;
1542                         goto out;
1543                 }
1544         }
1545
1546         /*
1547          * The MULTI_RAIL flag can be set but not cleared, because
1548          * that would leave the peer struct in an invalid state.
1549          */
1550         if (flags & LNET_PEER_MULTI_RAIL) {
1551                 spin_lock(&lp->lp_lock);
1552                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1553                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1554                         lnet_peer_clr_non_mr_pref_nids(lp);
1555                 }
1556                 spin_unlock(&lp->lp_lock);
1557         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1558                 rc = -EPERM;
1559                 goto out;
1560         }
1561
1562         lpni = lnet_find_peer_ni_locked(nid);
1563         if (lpni) {
1564                 /*
1565                  * A peer_ni already exists. This is only a problem if
1566                  * it is not connected to this peer and was configured
1567                  * by DLC.
1568                  */
1569                 if (lpni->lpni_peer_net->lpn_peer == lp)
1570                         goto out_free_lpni;
1571                 if (lnet_peer_ni_is_configured(lpni)) {
1572                         rc = -EEXIST;
1573                         goto out_free_lpni;
1574                 }
1575                 /* If this is the primary NID, destroy the peer. */
1576                 if (lnet_peer_ni_is_primary(lpni)) {
1577                         struct lnet_peer *rtr_lp =
1578                                 lpni->lpni_peer_net->lpn_peer;
1579                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1580                         /*
1581                          * if we're trying to delete a router it means
1582                          * we're moving this peer NI to a new peer so must
1583                          * transfer router properties to the new peer
1584                          */
1585                         if (rtr_refcount > 0) {
1586                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1587                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1588                         }
1589                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1590                         lnet_peer_ni_decref_locked(lpni);
1591                         lpni = lnet_peer_ni_alloc(nid);
1592                         if (!lpni) {
1593                                 rc = -ENOMEM;
1594                                 goto out_free_lpni;
1595                         }
1596                 }
1597         } else {
1598                 lpni = lnet_peer_ni_alloc(nid);
1599                 if (!lpni) {
1600                         rc = -ENOMEM;
1601                         goto out_free_lpni;
1602                 }
1603         }
1604
1605         /*
1606          * Get the peer_net. Check that we're not adding a second
1607          * peer_ni on a peer_net of a non-multi-rail peer.
1608          */
1609         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1610         if (!lpn) {
1611                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1612                 if (!lpn) {
1613                         rc = -ENOMEM;
1614                         goto out_free_lpni;
1615                 }
1616         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1617                 rc = -ENOTUNIQ;
1618                 goto out_free_lpni;
1619         }
1620
1621         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1622
1623 out_free_lpni:
1624         lnet_peer_ni_decref_locked(lpni);
1625 out:
1626         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1627                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1628                flags, rc);
1629         return rc;
1630 }
1631
1632 /*
1633  * Update the primary NID of a peer, if possible.
1634  *
1635  * Call with the lnet_api_mutex held.
1636  */
1637 static int
1638 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1639 {
1640         lnet_nid_t old = lp->lp_primary_nid;
1641         int rc = 0;
1642
1643         if (lp->lp_primary_nid == nid)
1644                 goto out;
1645         rc = lnet_peer_add_nid(lp, nid, flags);
1646         if (rc)
1647                 goto out;
1648         lp->lp_primary_nid = nid;
1649 out:
1650         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1651                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1652         return rc;
1653 }
1654
1655 /*
1656  * lpni creation initiated due to traffic either sending or receiving.
1657  */
1658 static int
1659 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1660 {
1661         struct lnet_peer *lp;
1662         struct lnet_peer_net *lpn;
1663         struct lnet_peer_ni *lpni;
1664         unsigned flags = 0;
1665         int rc = 0;
1666
1667         if (nid == LNET_NID_ANY) {
1668                 rc = -EINVAL;
1669                 goto out;
1670         }
1671
1672         /* lnet_net_lock is not needed here because ln_api_lock is held */
1673         lpni = lnet_find_peer_ni_locked(nid);
1674         if (lpni) {
1675                 /*
1676                  * We must have raced with another thread. Since we
1677                  * know next to nothing about a peer_ni created by
1678                  * traffic, we just assume everything is ok and
1679                  * return.
1680                  */
1681                 lnet_peer_ni_decref_locked(lpni);
1682                 goto out;
1683         }
1684
1685         /* Create peer, peer_net, and peer_ni. */
1686         rc = -ENOMEM;
1687         lp = lnet_peer_alloc(nid);
1688         if (!lp)
1689                 goto out;
1690         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1691         if (!lpn)
1692                 goto out_free_lp;
1693         lpni = lnet_peer_ni_alloc(nid);
1694         if (!lpni)
1695                 goto out_free_lpn;
1696         if (pref != LNET_NID_ANY)
1697                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1698
1699         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1700
1701 out_free_lpn:
1702         LIBCFS_FREE(lpn, sizeof(*lpn));
1703 out_free_lp:
1704         LIBCFS_FREE(lp, sizeof(*lp));
1705 out:
1706         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1707         return rc;
1708 }
1709
1710 /*
1711  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1712  *
1713  * This API handles the following combinations:
1714  *   Create a peer with its primary NI if only the prim_nid is provided
1715  *   Add a NID to a peer identified by the prim_nid. The peer identified
1716  *   by the prim_nid must already exist.
1717  *   The peer being created may be non-MR.
1718  *
1719  * The caller must hold ln_api_mutex. This prevents the peer from
1720  * being created/modified/deleted by a different thread.
1721  */
1722 int
1723 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1724 {
1725         struct lnet_peer *lp = NULL;
1726         struct lnet_peer_ni *lpni;
1727         unsigned flags;
1728
1729         /* The prim_nid must always be specified */
1730         if (prim_nid == LNET_NID_ANY)
1731                 return -EINVAL;
1732
1733         flags = LNET_PEER_CONFIGURED;
1734         if (mr)
1735                 flags |= LNET_PEER_MULTI_RAIL;
1736
1737         /*
1738          * If nid isn't specified, we must create a new peer with
1739          * prim_nid as its primary nid.
1740          */
1741         if (nid == LNET_NID_ANY)
1742                 return lnet_peer_add(prim_nid, flags);
1743
1744         /* Look up the prim_nid, which must exist. */
1745         lpni = lnet_find_peer_ni_locked(prim_nid);
1746         if (!lpni)
1747                 return -ENOENT;
1748         lnet_peer_ni_decref_locked(lpni);
1749         lp = lpni->lpni_peer_net->lpn_peer;
1750
1751         /* Peer must have been configured. */
1752         if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1753                 CDEBUG(D_NET, "peer %s was not configured\n",
1754                        libcfs_nid2str(prim_nid));
1755                 return -ENOENT;
1756         }
1757
1758         /* Primary NID must match */
1759         if (lp->lp_primary_nid != prim_nid) {
1760                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1761                        libcfs_nid2str(prim_nid),
1762                        libcfs_nid2str(lp->lp_primary_nid));
1763                 return -ENODEV;
1764         }
1765
1766         /* Multi-Rail flag must match. */
1767         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1768                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1769                        libcfs_nid2str(prim_nid));
1770                 return -EPERM;
1771         }
1772
1773         return lnet_peer_add_nid(lp, nid, flags);
1774 }
1775
1776 /*
1777  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1778  *
1779  * This API handles the following combinations:
1780  *   Delete a NI from a peer if both prim_nid and nid are provided.
1781  *   Delete a peer if only prim_nid is provided.
1782  *   Delete a peer if its primary nid is provided.
1783  *
1784  * The caller must hold ln_api_mutex. This prevents the peer from
1785  * being modified/deleted by a different thread.
1786  */
1787 int
1788 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1789 {
1790         struct lnet_peer *lp;
1791         struct lnet_peer_ni *lpni;
1792         unsigned flags;
1793
1794         if (prim_nid == LNET_NID_ANY)
1795                 return -EINVAL;
1796
1797         lpni = lnet_find_peer_ni_locked(prim_nid);
1798         if (!lpni)
1799                 return -ENOENT;
1800         lnet_peer_ni_decref_locked(lpni);
1801         lp = lpni->lpni_peer_net->lpn_peer;
1802
1803         if (prim_nid != lp->lp_primary_nid) {
1804                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1805                        libcfs_nid2str(prim_nid),
1806                        libcfs_nid2str(lp->lp_primary_nid));
1807                 return -ENODEV;
1808         }
1809
1810         lnet_net_lock(LNET_LOCK_EX);
1811         if (lp->lp_rtr_refcount > 0) {
1812                 lnet_net_unlock(LNET_LOCK_EX);
1813                 CERROR("%s is a router. Can not be deleted\n",
1814                        libcfs_nid2str(prim_nid));
1815                 return -EBUSY;
1816         }
1817         lnet_net_unlock(LNET_LOCK_EX);
1818
1819         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1820                 return lnet_peer_del(lp);
1821
1822         flags = LNET_PEER_CONFIGURED;
1823         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1824                 flags |= LNET_PEER_MULTI_RAIL;
1825
1826         return lnet_peer_del_nid(lp, nid, flags);
1827 }
1828
1829 void
1830 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1831 {
1832         struct lnet_peer_table *ptable;
1833         struct lnet_peer_net *lpn;
1834
1835         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1836
1837         LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1838         LASSERT(list_empty(&lpni->lpni_txq));
1839         LASSERT(lpni->lpni_txqnob == 0);
1840         LASSERT(list_empty(&lpni->lpni_peer_nis));
1841         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1842
1843         lpn = lpni->lpni_peer_net;
1844         lpni->lpni_peer_net = NULL;
1845         lpni->lpni_net = NULL;
1846
1847         if (!list_empty(&lpni->lpni_hashlist)) {
1848                 /* remove the peer ni from the zombie list */
1849                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1850                 spin_lock(&ptable->pt_zombie_lock);
1851                 list_del_init(&lpni->lpni_hashlist);
1852                 ptable->pt_zombies--;
1853                 spin_unlock(&ptable->pt_zombie_lock);
1854         }
1855
1856         if (lpni->lpni_pref_nnids > 1) {
1857                 struct lnet_nid_list *ne, *tmp;
1858
1859                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1860                                          nl_list) {
1861                         list_del_init(&ne->nl_list);
1862                         LIBCFS_FREE(ne, sizeof(*ne));
1863                 }
1864         }
1865         LIBCFS_FREE(lpni, sizeof(*lpni));
1866
1867         if (lpn)
1868                 lnet_peer_net_decref_locked(lpn);
1869 }
1870
1871 struct lnet_peer_ni *
1872 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1873 {
1874         struct lnet_peer_ni *lpni = NULL;
1875         int rc;
1876
1877         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1878                 return ERR_PTR(-ESHUTDOWN);
1879
1880         /*
1881          * find if a peer_ni already exists.
1882          * If so then just return that.
1883          */
1884         lpni = lnet_find_peer_ni_locked(nid);
1885         if (lpni)
1886                 return lpni;
1887
1888         lnet_net_unlock(cpt);
1889
1890         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1891         if (rc) {
1892                 lpni = ERR_PTR(rc);
1893                 goto out_net_relock;
1894         }
1895
1896         lpni = lnet_find_peer_ni_locked(nid);
1897         LASSERT(lpni);
1898
1899 out_net_relock:
1900         lnet_net_lock(cpt);
1901
1902         return lpni;
1903 }
1904
1905 /*
1906  * Get a peer_ni for the given nid, create it if necessary. Takes a
1907  * hold on the peer_ni.
1908  */
1909 struct lnet_peer_ni *
1910 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1911 {
1912         struct lnet_peer_ni *lpni = NULL;
1913         int rc;
1914
1915         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1916                 return ERR_PTR(-ESHUTDOWN);
1917
1918         /*
1919          * find if a peer_ni already exists.
1920          * If so then just return that.
1921          */
1922         lpni = lnet_find_peer_ni_locked(nid);
1923         if (lpni)
1924                 return lpni;
1925
1926         /*
1927          * Slow path:
1928          * use the lnet_api_mutex to serialize the creation of the peer_ni
1929          * and the creation/deletion of the local ni/net. When a local ni is
1930          * created, if there exists a set of peer_nis on that network,
1931          * they need to be traversed and updated. When a local NI is
1932          * deleted, which could result in a network being deleted, then
1933          * all peer nis on that network need to be removed as well.
1934          *
1935          * Creation through traffic should also be serialized with
1936          * creation through DLC.
1937          */
1938         lnet_net_unlock(cpt);
1939         mutex_lock(&the_lnet.ln_api_mutex);
1940         /*
1941          * Shutdown is only set under the ln_api_lock, so a single
1942          * check here is sufficent.
1943          */
1944         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1945                 lpni = ERR_PTR(-ESHUTDOWN);
1946                 goto out_mutex_unlock;
1947         }
1948
1949         rc = lnet_peer_ni_traffic_add(nid, pref);
1950         if (rc) {
1951                 lpni = ERR_PTR(rc);
1952                 goto out_mutex_unlock;
1953         }
1954
1955         lpni = lnet_find_peer_ni_locked(nid);
1956         LASSERT(lpni);
1957
1958 out_mutex_unlock:
1959         mutex_unlock(&the_lnet.ln_api_mutex);
1960         lnet_net_lock(cpt);
1961
1962         /* Lock has been dropped, check again for shutdown. */
1963         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1964                 if (!IS_ERR(lpni))
1965                         lnet_peer_ni_decref_locked(lpni);
1966                 lpni = ERR_PTR(-ESHUTDOWN);
1967         }
1968
1969         return lpni;
1970 }
1971
1972 bool
1973 lnet_peer_gw_discovery(struct lnet_peer *lp)
1974 {
1975         bool rc = false;
1976
1977         spin_lock(&lp->lp_lock);
1978         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
1979                 rc = true;
1980         spin_unlock(&lp->lp_lock);
1981
1982         return rc;
1983 }
1984
1985 bool
1986 lnet_peer_is_uptodate(struct lnet_peer *lp)
1987 {
1988         bool rc;
1989
1990         spin_lock(&lp->lp_lock);
1991         rc = lnet_peer_is_uptodate_locked(lp);
1992         spin_unlock(&lp->lp_lock);
1993         return rc;
1994 }
1995
1996 /*
1997  * Is a peer uptodate from the point of view of discovery?
1998  *
1999  * If it is currently being processed, obviously not.
2000  * A forced Ping or Push is also handled by the discovery thread.
2001  *
2002  * Otherwise look at whether the peer needs rediscovering.
2003  */
2004 bool
2005 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2006 __must_hold(&lp->lp_lock)
2007 {
2008         bool rc;
2009
2010         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2011                             LNET_PEER_FORCE_PING |
2012                             LNET_PEER_FORCE_PUSH)) {
2013                 rc = false;
2014         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2015                 rc = false;
2016         } else if (lnet_peer_needs_push(lp)) {
2017                 rc = false;
2018         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2019                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2020                         rc = true;
2021                 else
2022                         rc = false;
2023         } else {
2024                 rc = false;
2025         }
2026
2027         return rc;
2028 }
2029
2030 /*
2031  * Queue a peer for the attention of the discovery thread.  Call with
2032  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2033  * -EALREADY if the peer was already queued.
2034  */
2035 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2036 {
2037         int rc;
2038
2039         spin_lock(&lp->lp_lock);
2040         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2041                 lp->lp_state |= LNET_PEER_DISCOVERING;
2042         spin_unlock(&lp->lp_lock);
2043         if (list_empty(&lp->lp_dc_list)) {
2044                 lnet_peer_addref_locked(lp);
2045                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2046                 wake_up(&the_lnet.ln_dc_waitq);
2047                 rc = 0;
2048         } else {
2049                 rc = -EALREADY;
2050         }
2051
2052         CDEBUG(D_NET, "Queue peer %s: %d\n",
2053                libcfs_nid2str(lp->lp_primary_nid), rc);
2054
2055         return rc;
2056 }
2057
2058 /*
2059  * Discovery of a peer is complete. Wake all waiters on the peer.
2060  * Call with lnet_net_lock/EX held.
2061  */
2062 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2063 {
2064         struct lnet_msg *msg, *tmp;
2065         int rc = 0;
2066         LIST_HEAD(pending_msgs);
2067
2068         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2069                libcfs_nid2str(lp->lp_primary_nid));
2070
2071         list_del_init(&lp->lp_dc_list);
2072         spin_lock(&lp->lp_lock);
2073         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2074         spin_unlock(&lp->lp_lock);
2075         wake_up_all(&lp->lp_dc_waitq);
2076
2077         if (lp->lp_rtr_refcount > 0)
2078                 lnet_router_discovery_complete(lp);
2079
2080         lnet_net_unlock(LNET_LOCK_EX);
2081
2082         /* iterate through all pending messages and send them again */
2083         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2084                 list_del_init(&msg->msg_list);
2085                 if (lp->lp_dc_error) {
2086                         lnet_finalize(msg, lp->lp_dc_error);
2087                         continue;
2088                 }
2089
2090                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2091                        lnet_msgtyp2str(msg->msg_type),
2092                        libcfs_id2str(msg->msg_target));
2093                 rc = lnet_send(msg->msg_src_nid_param, msg,
2094                                msg->msg_rtr_nid_param);
2095                 if (rc < 0) {
2096                         CNETERR("Error sending %s to %s: %d\n",
2097                                lnet_msgtyp2str(msg->msg_type),
2098                                libcfs_id2str(msg->msg_target), rc);
2099                         lnet_finalize(msg, rc);
2100                 }
2101         }
2102         lnet_net_lock(LNET_LOCK_EX);
2103         lnet_peer_decref_locked(lp);
2104 }
2105
2106 /*
2107  * Handle inbound push.
2108  * Like any event handler, called with lnet_res_lock/CPT held.
2109  */
2110 void lnet_peer_push_event(struct lnet_event *ev)
2111 {
2112         struct lnet_ping_buffer *pbuf;
2113         struct lnet_peer *lp;
2114
2115         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2116
2117         /* lnet_find_peer() adds a refcount */
2118         lp = lnet_find_peer(ev->source.nid);
2119         if (!lp) {
2120                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2121                        libcfs_nid2str(ev->initiator.nid),
2122                        libcfs_nid2str(ev->source.nid));
2123                 pbuf->pb_needs_post = true;
2124                 return;
2125         }
2126
2127         /* Ensure peer state remains consistent while we modify it. */
2128         spin_lock(&lp->lp_lock);
2129
2130         /*
2131          * If some kind of error happened the contents of the message
2132          * cannot be used. Clear the NIDS_UPTODATE and set the
2133          * FORCE_PING flag to trigger a ping.
2134          */
2135         if (ev->status) {
2136                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2137                 lp->lp_state |= LNET_PEER_FORCE_PING;
2138                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2139                        ev->status,
2140                        libcfs_nid2str(lp->lp_primary_nid),
2141                        libcfs_nid2str(ev->source.nid));
2142                 goto out;
2143         }
2144
2145         /*
2146          * A push with invalid or corrupted info. Clear the UPTODATE
2147          * flag to trigger a ping.
2148          */
2149         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2150                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2151                 lp->lp_state |= LNET_PEER_FORCE_PING;
2152                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2153                        libcfs_nid2str(lp->lp_primary_nid));
2154                 goto out;
2155         }
2156
2157         /*
2158          * Make sure we'll allocate the correct size ping buffer when
2159          * pinging the peer.
2160          */
2161         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2162                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2163
2164         /*
2165          * A non-Multi-Rail peer is not supposed to be capable of
2166          * sending a push.
2167          */
2168         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2169                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2170                        libcfs_nid2str(lp->lp_primary_nid));
2171                 goto out;
2172         }
2173
2174         /*
2175          * The peer may have discovery disabled at its end. Set
2176          * NO_DISCOVERY as appropriate.
2177          */
2178         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2179                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2180                        libcfs_nid2str(lp->lp_primary_nid));
2181                 /*
2182                  * Mark the peer for deletion if we already know about it
2183                  * and it's going from discovery set to no discovery set
2184                  */
2185                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2186                                       LNET_PEER_DISCOVERING)) &&
2187                      lp->lp_state & LNET_PEER_DISCOVERED) {
2188                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2189                                libcfs_nid2str(lp->lp_primary_nid),
2190                                lp->lp_state);
2191                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2192                 }
2193                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2194         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2195                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2196                        libcfs_nid2str(lp->lp_primary_nid));
2197                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2198         }
2199
2200         /*
2201          * Update the MULTI_RAIL flag based on the push. If the peer
2202          * was configured with DLC then the setting should match what
2203          * DLC put in.
2204          * NB: We verified above that the MR feature bit is set in pi_features
2205          */
2206         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2207                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2208                        libcfs_nid2str(lp->lp_primary_nid), lp);
2209         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2210                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2211                       libcfs_nid2str(lp->lp_primary_nid));
2212         } else if (lnet_peer_discovery_disabled) {
2213                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2214                        libcfs_nid2str(lp->lp_primary_nid), lp);
2215         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2216                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2217                        libcfs_nid2str(lp->lp_primary_nid), lp);
2218         } else {
2219                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2220                        libcfs_nid2str(lp->lp_primary_nid), lp);
2221                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2222                 lnet_peer_clr_non_mr_pref_nids(lp);
2223         }
2224
2225         /*
2226          * Check for truncation of the Put message. Clear the
2227          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2228          * and tell discovery to allocate a bigger buffer.
2229          */
2230         if (ev->mlength < ev->rlength) {
2231                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2232                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2233                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2234                 lp->lp_state |= LNET_PEER_FORCE_PING;
2235                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2236                        libcfs_nid2str(lp->lp_primary_nid),
2237                        pbuf->pb_info.pi_nnis);
2238                 goto out;
2239         }
2240
2241         /* always assume new data */
2242         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2243         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2244
2245         /*
2246          * If there is data present that hasn't been processed yet,
2247          * we'll replace it if the Put contained newer data and it
2248          * fits. We're racing with a Ping or earlier Push in this
2249          * case.
2250          */
2251         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2252                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2253                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2254                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2255                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2256                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2257                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2258                               libcfs_nid2str(lp->lp_primary_nid),
2259                               LNET_PING_BUFFER_SEQNO(pbuf),
2260                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2261                 }
2262                 goto out;
2263         }
2264
2265         /*
2266          * Allocate a buffer to copy the data. On a failure we drop
2267          * the Push and set FORCE_PING to force the discovery
2268          * thread to fix the problem by pinging the peer.
2269          */
2270         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2271         if (!lp->lp_data) {
2272                 lp->lp_state |= LNET_PEER_FORCE_PING;
2273                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2274                        libcfs_nid2str(lp->lp_primary_nid),
2275                        LNET_PING_BUFFER_SEQNO(pbuf));
2276                 goto out;
2277         }
2278
2279         /* Success */
2280         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2281                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2282         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2283         CDEBUG(D_NET, "Received Push %s %u\n",
2284                libcfs_nid2str(lp->lp_primary_nid),
2285                LNET_PING_BUFFER_SEQNO(pbuf));
2286
2287 out:
2288         /* We've processed this buffer. It can be reposted */
2289         pbuf->pb_needs_post = true;
2290
2291         /*
2292          * Queue the peer for discovery if not done, force it on the request
2293          * queue and wake the discovery thread if the peer was already queued,
2294          * because its status changed.
2295          */
2296         spin_unlock(&lp->lp_lock);
2297         lnet_net_lock(LNET_LOCK_EX);
2298         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2299                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2300                 wake_up(&the_lnet.ln_dc_waitq);
2301         }
2302         /* Drop refcount from lookup */
2303         lnet_peer_decref_locked(lp);
2304         lnet_net_unlock(LNET_LOCK_EX);
2305 }
2306
2307 /*
2308  * Clear the discovery error state, unless we're already discovering
2309  * this peer, in which case the error is current.
2310  */
2311 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2312 {
2313         spin_lock(&lp->lp_lock);
2314         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2315                 lp->lp_dc_error = 0;
2316         spin_unlock(&lp->lp_lock);
2317 }
2318
2319 /*
2320  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2321  * dropped/retaken within this function. An lnet_peer_ni is passed in
2322  * because discovery could tear down an lnet_peer.
2323  */
2324 int
2325 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2326 {
2327         DEFINE_WAIT(wait);
2328         struct lnet_peer *lp;
2329         int rc = 0;
2330         int count = 0;
2331
2332 again:
2333         lnet_net_unlock(cpt);
2334         lnet_net_lock(LNET_LOCK_EX);
2335         lp = lpni->lpni_peer_net->lpn_peer;
2336         lnet_peer_clear_discovery_error(lp);
2337
2338         /*
2339          * We're willing to be interrupted. The lpni can become a
2340          * zombie if we race with DLC, so we must check for that.
2341          */
2342         for (;;) {
2343                 /* Keep lp alive when the lnet_net_lock is unlocked */
2344                 lnet_peer_addref_locked(lp);
2345                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2346                 if (signal_pending(current))
2347                         break;
2348                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2349                         break;
2350                 /*
2351                  * Don't repeat discovery if discovery is disabled. This is
2352                  * done to ensure we can use discovery as a standard ping as
2353                  * well for backwards compatibility with routers which do not
2354                  * have discovery or have discovery disabled
2355                  */
2356                 if (lnet_is_discovery_disabled(lp) && count > 0)
2357                         break;
2358                 if (lp->lp_dc_error)
2359                         break;
2360                 if (lnet_peer_is_uptodate(lp))
2361                         break;
2362                 lnet_peer_queue_for_discovery(lp);
2363                 count++;
2364                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2365
2366                 /*
2367                  * If caller requested a non-blocking operation then
2368                  * return immediately. Once discovery is complete any
2369                  * pending messages that were stopped due to discovery
2370                  * will be transmitted.
2371                  */
2372                 if (!block)
2373                         break;
2374
2375                 lnet_net_unlock(LNET_LOCK_EX);
2376                 schedule();
2377                 finish_wait(&lp->lp_dc_waitq, &wait);
2378                 lnet_net_lock(LNET_LOCK_EX);
2379                 lnet_peer_decref_locked(lp);
2380                 /* Peer may have changed */
2381                 lp = lpni->lpni_peer_net->lpn_peer;
2382         }
2383         finish_wait(&lp->lp_dc_waitq, &wait);
2384
2385         lnet_net_unlock(LNET_LOCK_EX);
2386         lnet_net_lock(cpt);
2387         lnet_peer_decref_locked(lp);
2388         /*
2389          * The peer may have changed, so re-check and rediscover if that turns
2390          * out to have been the case. The reference count on lp ensured that
2391          * even if it was unlinked from lpni the memory could not be recycled.
2392          * Thus the check below is sufficient to determine whether the peer
2393          * changed. If the peer changed, then lp must not be dereferenced.
2394          */
2395         if (lp != lpni->lpni_peer_net->lpn_peer)
2396                 goto again;
2397
2398         if (signal_pending(current))
2399                 rc = -EINTR;
2400         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2401                 rc = -ESHUTDOWN;
2402         else if (lp->lp_dc_error)
2403                 rc = lp->lp_dc_error;
2404         else if (!block)
2405                 CDEBUG(D_NET, "non-blocking discovery\n");
2406         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2407                 goto again;
2408
2409         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2410                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2411                libcfs_nid2str(lpni->lpni_nid), rc,
2412                (!block) ? "pending discovery" : "discovery complete");
2413
2414         return rc;
2415 }
2416
2417 /* Handle an incoming ack for a push. */
2418 static void
2419 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2420 {
2421         struct lnet_ping_buffer *pbuf;
2422
2423         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2424         spin_lock(&lp->lp_lock);
2425         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2426         lp->lp_push_error = ev->status;
2427         if (ev->status)
2428                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2429         else
2430                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2431         spin_unlock(&lp->lp_lock);
2432
2433         CDEBUG(D_NET, "peer %s ev->status %d\n",
2434                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2435 }
2436
2437 /* Handle a Reply message. This is the reply to a Ping message. */
2438 static void
2439 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2440 {
2441         struct lnet_ping_buffer *pbuf;
2442         int rc;
2443
2444         spin_lock(&lp->lp_lock);
2445
2446         lp->lp_disc_src_nid = ev->target.nid;
2447
2448         /*
2449          * If some kind of error happened the contents of message
2450          * cannot be used. Set PING_FAILED to trigger a retry.
2451          */
2452         if (ev->status) {
2453                 lp->lp_state |= LNET_PEER_PING_FAILED;
2454                 lp->lp_ping_error = ev->status;
2455                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2456                        ev->status,
2457                        libcfs_nid2str(lp->lp_primary_nid),
2458                        libcfs_nid2str(ev->source.nid));
2459                 goto out;
2460         }
2461
2462         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2463         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2464                 lnet_swap_pinginfo(pbuf);
2465
2466         /*
2467          * A reply with invalid or corrupted info. Set PING_FAILED to
2468          * trigger a retry.
2469          */
2470         rc = lnet_ping_info_validate(&pbuf->pb_info);
2471         if (rc) {
2472                 lp->lp_state |= LNET_PEER_PING_FAILED;
2473                 lp->lp_ping_error = 0;
2474                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2475                        libcfs_nid2str(lp->lp_primary_nid), rc);
2476                 goto out;
2477         }
2478
2479
2480         /*
2481          * The peer may have discovery disabled at its end. Set
2482          * NO_DISCOVERY as appropriate.
2483          */
2484         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2485             !lnet_peer_discovery_disabled) {
2486                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2487                        libcfs_nid2str(lp->lp_primary_nid));
2488                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2489         } else {
2490                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2491                        libcfs_nid2str(lp->lp_primary_nid));
2492                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2493         }
2494
2495         /*
2496          * Update the MULTI_RAIL flag based on the reply. If the peer
2497          * was configured with DLC then the setting should match what
2498          * DLC put in.
2499          */
2500         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2501                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2502                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2503                                libcfs_nid2str(lp->lp_primary_nid), lp);
2504                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2505                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2506                               libcfs_nid2str(lp->lp_primary_nid));
2507                 } else if (lnet_peer_discovery_disabled) {
2508                         CDEBUG(D_NET,
2509                                "peer %s(%p) not MR: DD disabled locally\n",
2510                                libcfs_nid2str(lp->lp_primary_nid), lp);
2511                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2512                         CDEBUG(D_NET,
2513                                "peer %s(%p) not MR: DD disabled remotely\n",
2514                                libcfs_nid2str(lp->lp_primary_nid), lp);
2515                 } else {
2516                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2517                                libcfs_nid2str(lp->lp_primary_nid), lp);
2518                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2519                         lnet_peer_clr_non_mr_pref_nids(lp);
2520                 }
2521         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2522                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2523                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2524                               libcfs_nid2str(lp->lp_primary_nid));
2525                 } else {
2526                         CERROR("Multi-Rail state vanished from %s\n",
2527                                libcfs_nid2str(lp->lp_primary_nid));
2528                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2529                 }
2530         }
2531
2532         /*
2533          * Make sure we'll allocate the correct size ping buffer when
2534          * pinging the peer.
2535          */
2536         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2537                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2538
2539         /*
2540          * Check for truncation of the Reply. Clear PING_SENT and set
2541          * PING_FAILED to trigger a retry.
2542          */
2543         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2544                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2545                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2546                 lp->lp_state |= LNET_PEER_PING_FAILED;
2547                 lp->lp_ping_error = 0;
2548                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2549                        libcfs_nid2str(lp->lp_primary_nid),
2550                        pbuf->pb_info.pi_nnis);
2551                 goto out;
2552         }
2553
2554         /*
2555          * Check the sequence numbers in the reply. These are only
2556          * available if the reply came from a Multi-Rail peer.
2557          */
2558         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2559             pbuf->pb_info.pi_nnis > 1 &&
2560             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2561                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2562                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2563                                 libcfs_nid2str(lp->lp_primary_nid),
2564                                 LNET_PING_BUFFER_SEQNO(pbuf),
2565                                 lp->lp_peer_seqno);
2566
2567                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2568         }
2569
2570         /* We're happy with the state of the data in the buffer. */
2571         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2572                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2573         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2574                 lnet_ping_buffer_decref(lp->lp_data);
2575         else
2576                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2577         lnet_ping_buffer_addref(pbuf);
2578         lp->lp_data = pbuf;
2579 out:
2580         lp->lp_state &= ~LNET_PEER_PING_SENT;
2581         spin_unlock(&lp->lp_lock);
2582
2583         lnet_net_lock(LNET_LOCK_EX);
2584         /*
2585          * If this peer is a gateway, call the routing callback to
2586          * handle the ping reply
2587          */
2588         if (lp->lp_rtr_refcount > 0)
2589                 lnet_router_discovery_ping_reply(lp);
2590         lnet_net_unlock(LNET_LOCK_EX);
2591 }
2592
2593 /*
2594  * Send event handling. Only matters for error cases, where we clean
2595  * up state on the peer and peer_ni that would otherwise be updated in
2596  * the REPLY event handler for a successful Ping, and the ACK event
2597  * handler for a successful Push.
2598  */
2599 static int
2600 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2601 {
2602         int rc = 0;
2603
2604         if (!ev->status)
2605                 goto out;
2606
2607         spin_lock(&lp->lp_lock);
2608         if (ev->msg_type == LNET_MSG_GET) {
2609                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2610                 lp->lp_state |= LNET_PEER_PING_FAILED;
2611                 lp->lp_ping_error = ev->status;
2612         } else { /* ev->msg_type == LNET_MSG_PUT */
2613                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2614                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2615                 lp->lp_push_error = ev->status;
2616         }
2617         spin_unlock(&lp->lp_lock);
2618         rc = LNET_REDISCOVER_PEER;
2619 out:
2620         CDEBUG(D_NET, "%s Send to %s: %d\n",
2621                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2622                 libcfs_nid2str(ev->target.nid), rc);
2623         return rc;
2624 }
2625
2626 /*
2627  * Unlink event handling. This event is only seen if a call to
2628  * LNetMDUnlink() caused the event to be unlinked. If this call was
2629  * made after the event was set up in LNetGet() or LNetPut() then we
2630  * assume the Ping or Push timed out.
2631  */
2632 static void
2633 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2634 {
2635         spin_lock(&lp->lp_lock);
2636         /* We've passed through LNetGet() */
2637         if (lp->lp_state & LNET_PEER_PING_SENT) {
2638                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2639                 lp->lp_state |= LNET_PEER_PING_FAILED;
2640                 lp->lp_ping_error = -ETIMEDOUT;
2641                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2642                         libcfs_nid2str(lp->lp_primary_nid));
2643         }
2644         /* We've passed through LNetPut() */
2645         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2646                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2647                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2648                 lp->lp_push_error = -ETIMEDOUT;
2649                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2650                         libcfs_nid2str(lp->lp_primary_nid));
2651         }
2652         spin_unlock(&lp->lp_lock);
2653 }
2654
2655 /*
2656  * Event handler for the discovery EQ.
2657  *
2658  * Called with lnet_res_lock(cpt) held. The cpt is the
2659  * lnet_cpt_of_cookie() of the md handle cookie.
2660  */
2661 static void lnet_discovery_event_handler(struct lnet_event *event)
2662 {
2663         struct lnet_peer *lp = event->md_user_ptr;
2664         struct lnet_ping_buffer *pbuf;
2665         int rc;
2666
2667         /* discovery needs to take another look */
2668         rc = LNET_REDISCOVER_PEER;
2669
2670         CDEBUG(D_NET, "Received event: %d\n", event->type);
2671
2672         switch (event->type) {
2673         case LNET_EVENT_ACK:
2674                 lnet_discovery_event_ack(lp, event);
2675                 break;
2676         case LNET_EVENT_REPLY:
2677                 lnet_discovery_event_reply(lp, event);
2678                 break;
2679         case LNET_EVENT_SEND:
2680                 /* Only send failure triggers a retry. */
2681                 rc = lnet_discovery_event_send(lp, event);
2682                 break;
2683         case LNET_EVENT_UNLINK:
2684                 /* LNetMDUnlink() was called */
2685                 lnet_discovery_event_unlink(lp, event);
2686                 break;
2687         default:
2688                 /* Invalid events. */
2689                 LBUG();
2690         }
2691         lnet_net_lock(LNET_LOCK_EX);
2692         if (event->unlinked) {
2693                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2694                 lnet_ping_buffer_decref(pbuf);
2695                 lnet_peer_decref_locked(lp);
2696         }
2697
2698         /* put peer back at end of request queue, if discovery not already
2699          * done */
2700         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2701                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2702                 wake_up(&the_lnet.ln_dc_waitq);
2703         }
2704         lnet_net_unlock(LNET_LOCK_EX);
2705 }
2706
2707 /*
2708  * Build a peer from incoming data.
2709  *
2710  * The NIDs in the incoming data are supposed to be structured as follows:
2711  *  - loopback
2712  *  - primary NID
2713  *  - other NIDs in same net
2714  *  - NIDs in second net
2715  *  - NIDs in third net
2716  *  - ...
2717  * This due to the way the list of NIDs in the data is created.
2718  *
2719  * Note that this function will mark the peer uptodate unless an
2720  * ENOMEM is encontered. All other errors are due to a conflict
2721  * between the DLC configuration and what discovery sees. We treat DLC
2722  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2723  * peer from becoming stuck in discovery.
2724  */
2725 static int lnet_peer_merge_data(struct lnet_peer *lp,
2726                                 struct lnet_ping_buffer *pbuf)
2727 {
2728         struct lnet_peer_ni *lpni;
2729         lnet_nid_t *curnis = NULL;
2730         struct lnet_ni_status *addnis = NULL;
2731         lnet_nid_t *delnis = NULL;
2732         unsigned flags;
2733         int ncurnis;
2734         int naddnis;
2735         int ndelnis;
2736         int nnis = 0;
2737         int i;
2738         int j;
2739         int rc;
2740
2741         flags = LNET_PEER_DISCOVERED;
2742         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2743                 flags |= LNET_PEER_MULTI_RAIL;
2744
2745         /*
2746          * Cache the routing feature for the peer; whether it is enabled
2747          * for disabled as reported by the remote peer.
2748          */
2749         spin_lock(&lp->lp_lock);
2750         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2751                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2752         else
2753                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2754         spin_unlock(&lp->lp_lock);
2755
2756         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2757         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2758         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2759         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2760         if (!curnis || !addnis || !delnis) {
2761                 rc = -ENOMEM;
2762                 goto out;
2763         }
2764         ncurnis = 0;
2765         naddnis = 0;
2766         ndelnis = 0;
2767
2768         /* Construct the list of NIDs present in peer. */
2769         lpni = NULL;
2770         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2771                 curnis[ncurnis++] = lpni->lpni_nid;
2772
2773         /*
2774          * Check for NIDs in pbuf not present in curnis[].
2775          * The loop starts at 1 to skip the loopback NID.
2776          */
2777         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2778                 for (j = 0; j < ncurnis; j++)
2779                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2780                                 break;
2781                 if (j == ncurnis)
2782                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2783         }
2784         /*
2785          * Check for NIDs in curnis[] not present in pbuf.
2786          * The nested loop starts at 1 to skip the loopback NID.
2787          *
2788          * But never add the loopback NID to delnis[]: if it is
2789          * present in curnis[] then this peer is for this node.
2790          */
2791         for (i = 0; i < ncurnis; i++) {
2792                 if (curnis[i] == LNET_NID_LO_0)
2793                         continue;
2794                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2795                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2796                                 /*
2797                                  * update the information we cache for the
2798                                  * peer with the latest information we
2799                                  * received
2800                                  */
2801                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
2802                                 if (lpni) {
2803                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2804                                         lnet_peer_ni_decref_locked(lpni);
2805                                 }
2806                                 break;
2807                         }
2808                 }
2809                 if (j == pbuf->pb_info.pi_nnis)
2810                         delnis[ndelnis++] = curnis[i];
2811         }
2812
2813         /*
2814          * If we get here and the discovery is disabled then we don't want
2815          * to add or delete any NIs. We just updated the ones we have some
2816          * information on, and call it a day
2817          */
2818         rc = 0;
2819         if (lnet_is_discovery_disabled(lp))
2820                 goto out;
2821
2822         for (i = 0; i < naddnis; i++) {
2823                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2824                 if (rc) {
2825                         CERROR("Error adding NID %s to peer %s: %d\n",
2826                                libcfs_nid2str(addnis[i].ns_nid),
2827                                libcfs_nid2str(lp->lp_primary_nid), rc);
2828                         if (rc == -ENOMEM)
2829                                 goto out;
2830                 }
2831                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2832                 if (lpni) {
2833                         lpni->lpni_ns_status = addnis[i].ns_status;
2834                         lnet_peer_ni_decref_locked(lpni);
2835                 }
2836         }
2837
2838         for (i = 0; i < ndelnis; i++) {
2839                 /*
2840                  * for routers it's okay to delete the primary_nid because
2841                  * the upper layers don't really rely on it. So if we're
2842                  * being told that the router changed its primary_nid
2843                  * then it's okay to delete it.
2844                  */
2845                 if (lp->lp_rtr_refcount > 0)
2846                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2847                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2848                 if (rc) {
2849                         CERROR("Error deleting NID %s from peer %s: %d\n",
2850                                libcfs_nid2str(delnis[i]),
2851                                libcfs_nid2str(lp->lp_primary_nid), rc);
2852                         if (rc == -ENOMEM)
2853                                 goto out;
2854                 }
2855         }
2856         /*
2857          * Errors other than -ENOMEM are due to peers having been
2858          * configured with DLC. Ignore these because DLC overrides
2859          * Discovery.
2860          */
2861         rc = 0;
2862 out:
2863         CFS_FREE_PTR_ARRAY(curnis, nnis);
2864         CFS_FREE_PTR_ARRAY(addnis, nnis);
2865         CFS_FREE_PTR_ARRAY(delnis, nnis);
2866         lnet_ping_buffer_decref(pbuf);
2867         CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2868
2869         if (rc) {
2870                 spin_lock(&lp->lp_lock);
2871                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2872                 lp->lp_state |= LNET_PEER_FORCE_PING;
2873                 spin_unlock(&lp->lp_lock);
2874         }
2875         return rc;
2876 }
2877
2878 /*
2879  * The data in pbuf says lp is its primary peer, but the data was
2880  * received by a different peer. Try to update lp with the data.
2881  */
2882 static int
2883 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2884 {
2885         struct lnet_handle_md mdh;
2886
2887         /* Queue lp for discovery, and force it on the request queue. */
2888         lnet_net_lock(LNET_LOCK_EX);
2889         if (lnet_peer_queue_for_discovery(lp))
2890                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2891         lnet_net_unlock(LNET_LOCK_EX);
2892
2893         LNetInvalidateMDHandle(&mdh);
2894
2895         /*
2896          * Decide whether we can move the peer to the DATA_PRESENT state.
2897          *
2898          * We replace stale data for a multi-rail peer, repair PING_FAILED
2899          * status, and preempt FORCE_PING.
2900          *
2901          * If after that we have DATA_PRESENT, we merge it into this peer.
2902          */
2903         spin_lock(&lp->lp_lock);
2904         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2905                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2906                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2907                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2908                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2909                         lnet_ping_buffer_decref(pbuf);
2910                         pbuf = lp->lp_data;
2911                         lp->lp_data = NULL;
2912                 }
2913         }
2914         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2915                 lnet_ping_buffer_decref(lp->lp_data);
2916                 lp->lp_data = NULL;
2917                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2918         }
2919         if (lp->lp_state & LNET_PEER_PING_FAILED) {
2920                 mdh = lp->lp_ping_mdh;
2921                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2922                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2923                 lp->lp_ping_error = 0;
2924         }
2925         if (lp->lp_state & LNET_PEER_FORCE_PING)
2926                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2927         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2928         spin_unlock(&lp->lp_lock);
2929
2930         if (!LNetMDHandleIsInvalid(mdh))
2931                 LNetMDUnlink(mdh);
2932
2933         if (pbuf)
2934                 return lnet_peer_merge_data(lp, pbuf);
2935
2936         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2937         return 0;
2938 }
2939
2940 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
2941 {
2942         int i;
2943
2944         for (i = 0; i < pinfo->pi_nnis; i++) {
2945                 if (pinfo->pi_ni[i].ns_nid == nid)
2946                         return true;
2947         }
2948
2949         return false;
2950 }
2951
2952 /*
2953  * Update a peer using the data received.
2954  */
2955 static int lnet_peer_data_present(struct lnet_peer *lp)
2956 __must_hold(&lp->lp_lock)
2957 {
2958         struct lnet_ping_buffer *pbuf;
2959         struct lnet_peer_ni *lpni;
2960         lnet_nid_t nid = LNET_NID_ANY;
2961         unsigned flags;
2962         int rc = 0;
2963
2964         pbuf = lp->lp_data;
2965         lp->lp_data = NULL;
2966         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2967         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2968         spin_unlock(&lp->lp_lock);
2969
2970         /*
2971          * Modifications of peer structures are done while holding the
2972          * ln_api_mutex. A global lock is required because we may be
2973          * modifying multiple peer structures, and a mutex greatly
2974          * simplifies memory management.
2975          *
2976          * The actual changes to the data structures must also protect
2977          * against concurrent lookups, for which the lnet_net_lock in
2978          * LNET_LOCK_EX mode is used.
2979          */
2980         mutex_lock(&the_lnet.ln_api_mutex);
2981         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2982                 rc = -ESHUTDOWN;
2983                 goto out;
2984         }
2985
2986         /*
2987          * If this peer is not on the peer list then it is being torn
2988          * down, and our reference count may be all that is keeping it
2989          * alive. Don't do any work on it.
2990          */
2991         if (list_empty(&lp->lp_peer_list))
2992                 goto out;
2993
2994         flags = LNET_PEER_DISCOVERED;
2995         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2996                 flags |= LNET_PEER_MULTI_RAIL;
2997
2998         /*
2999          * Check whether the primary NID in the message matches the
3000          * primary NID of the peer. If it does, update the peer, if
3001          * it it does not, check whether there is already a peer with
3002          * that primary NID. If no such peer exists, try to update
3003          * the primary NID of the current peer (allowed if it was
3004          * created due to message traffic) and complete the update.
3005          * If the peer did exist, hand off the data to it.
3006          *
3007          * The peer for the loopback interface is a special case: this
3008          * is the peer for the local node, and we want to set its
3009          * primary NID to the correct value here. Moreover, this peer
3010          * can show up with only the loopback NID in the ping buffer.
3011          */
3012         if (pbuf->pb_info.pi_nnis <= 1)
3013                 goto out;
3014         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3015         if (lp->lp_primary_nid == LNET_NID_LO_0) {
3016                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3017                 if (!rc)
3018                         rc = lnet_peer_merge_data(lp, pbuf);
3019         /*
3020          * if the primary nid of the peer is present in the ping info returned
3021          * from the peer, but it's not the local primary peer we have
3022          * cached and discovery is disabled, then we don't want to update
3023          * our local peer info, by adding or removing NIDs, we just want
3024          * to update the status of the nids that we currently have
3025          * recorded in that peer.
3026          */
3027         } else if (lp->lp_primary_nid == nid ||
3028                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3029                     lnet_is_discovery_disabled(lp))) {
3030                 rc = lnet_peer_merge_data(lp, pbuf);
3031         } else {
3032                 lpni = lnet_find_peer_ni_locked(nid);
3033                 if (!lpni) {
3034                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3035                         if (rc) {
3036                                 CERROR("Primary NID error %s versus %s: %d\n",
3037                                        libcfs_nid2str(lp->lp_primary_nid),
3038                                        libcfs_nid2str(nid), rc);
3039                         } else {
3040                                 rc = lnet_peer_merge_data(lp, pbuf);
3041                         }
3042                 } else {
3043                         struct lnet_peer *new_lp;
3044                         new_lp = lpni->lpni_peer_net->lpn_peer;
3045                         /*
3046                          * if lp has discovery/MR enabled that means new_lp
3047                          * should have discovery/MR enabled as well, since
3048                          * it's the same peer, which we're about to merge
3049                          */
3050                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3051                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3052                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3053                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3054
3055                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3056                         lnet_consolidate_routes_locked(lp, new_lp);
3057                         lnet_peer_ni_decref_locked(lpni);
3058                 }
3059         }
3060 out:
3061         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3062                lp->lp_state);
3063         mutex_unlock(&the_lnet.ln_api_mutex);
3064
3065         spin_lock(&lp->lp_lock);
3066         /* Tell discovery to re-check the peer immediately. */
3067         if (!rc)
3068                 rc = LNET_REDISCOVER_PEER;
3069         return rc;
3070 }
3071
3072 /*
3073  * A ping failed. Clear the PING_FAILED state and set the
3074  * FORCE_PING state, to ensure a retry even if discovery is
3075  * disabled. This avoids being left with incorrect state.
3076  */
3077 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3078 __must_hold(&lp->lp_lock)
3079 {
3080         struct lnet_handle_md mdh;
3081         int rc;
3082
3083         mdh = lp->lp_ping_mdh;
3084         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3085         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3086         lp->lp_state |= LNET_PEER_FORCE_PING;
3087         rc = lp->lp_ping_error;
3088         lp->lp_ping_error = 0;
3089         spin_unlock(&lp->lp_lock);
3090
3091         if (!LNetMDHandleIsInvalid(mdh))
3092                 LNetMDUnlink(mdh);
3093
3094         CDEBUG(D_NET, "peer %s:%d\n",
3095                libcfs_nid2str(lp->lp_primary_nid), rc);
3096
3097         spin_lock(&lp->lp_lock);
3098         return rc ? rc : LNET_REDISCOVER_PEER;
3099 }
3100
3101 /*
3102  * Select NID to send a Ping or Push to.
3103  */
3104 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
3105 {
3106         struct lnet_peer_ni *lpni;
3107
3108         /* Look for a direct-connected NID for this peer. */
3109         lpni = NULL;
3110         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3111                 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
3112                         continue;
3113                 break;
3114         }
3115         if (lpni)
3116                 return lpni->lpni_nid;
3117
3118         /* Look for a routed-connected NID for this peer. */
3119         lpni = NULL;
3120         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3121                 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
3122                         continue;
3123                 break;
3124         }
3125         if (lpni)
3126                 return lpni->lpni_nid;
3127
3128         return LNET_NID_ANY;
3129 }
3130
3131 /* Active side of ping. */
3132 static int lnet_peer_send_ping(struct lnet_peer *lp)
3133 __must_hold(&lp->lp_lock)
3134 {
3135         lnet_nid_t pnid;
3136         int nnis;
3137         int rc;
3138         int cpt;
3139
3140         lp->lp_state |= LNET_PEER_PING_SENT;
3141         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3142         spin_unlock(&lp->lp_lock);
3143
3144         cpt = lnet_net_lock_current();
3145         /* Refcount for MD. */
3146         lnet_peer_addref_locked(lp);
3147         pnid = lnet_peer_select_nid(lp);
3148         lnet_net_unlock(cpt);
3149
3150         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3151
3152         rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
3153                             the_lnet.ln_dc_handler, false);
3154
3155         /*
3156          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3157          * refcount on the peer, otherwise LNetMDUnlink will be called
3158          * which will eventually do that.
3159          */
3160         if (rc > 0) {
3161                 lnet_net_lock(cpt);
3162                 lnet_peer_decref_locked(lp);
3163                 lnet_net_unlock(cpt);
3164                 rc = -rc; /* change the rc to negative value */
3165                 goto fail_error;
3166         } else if (rc < 0) {
3167                 goto fail_error;
3168         }
3169
3170         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3171
3172         spin_lock(&lp->lp_lock);
3173         return 0;
3174
3175 fail_error:
3176         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3177         /*
3178          * The errors that get us here are considered hard errors and
3179          * cause Discovery to terminate. So we clear PING_SENT, but do
3180          * not set either PING_FAILED or FORCE_PING. In fact we need
3181          * to clear PING_FAILED, because the unlink event handler will
3182          * have set it if we called LNetMDUnlink() above.
3183          */
3184         spin_lock(&lp->lp_lock);
3185         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3186         return rc;
3187 }
3188
3189 /*
3190  * This function exists because you cannot call LNetMDUnlink() from an
3191  * event handler.
3192  */
3193 static int lnet_peer_push_failed(struct lnet_peer *lp)
3194 __must_hold(&lp->lp_lock)
3195 {
3196         struct lnet_handle_md mdh;
3197         int rc;
3198
3199         mdh = lp->lp_push_mdh;
3200         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3201         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3202         rc = lp->lp_push_error;
3203         lp->lp_push_error = 0;
3204         spin_unlock(&lp->lp_lock);
3205
3206         if (!LNetMDHandleIsInvalid(mdh))
3207                 LNetMDUnlink(mdh);
3208
3209         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3210         spin_lock(&lp->lp_lock);
3211         return rc ? rc : LNET_REDISCOVER_PEER;
3212 }
3213
3214 /*
3215  * Mark the peer as discovered.
3216  */
3217 static int lnet_peer_discovered(struct lnet_peer *lp)
3218 __must_hold(&lp->lp_lock)
3219 {
3220         lp->lp_state |= LNET_PEER_DISCOVERED;
3221         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3222                           LNET_PEER_REDISCOVER);
3223
3224         lp->lp_dc_error = 0;
3225
3226         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3227
3228         return 0;
3229 }
3230
3231 /* Active side of push. */
3232 static int lnet_peer_send_push(struct lnet_peer *lp)
3233 __must_hold(&lp->lp_lock)
3234 {
3235         struct lnet_ping_buffer *pbuf;
3236         struct lnet_process_id id;
3237         struct lnet_md md;
3238         int cpt;
3239         int rc;
3240
3241         /* Don't push to a non-multi-rail peer. */
3242         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3243                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3244                 /* if peer's NIDs are uptodate then peer is discovered */
3245                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3246                         rc = lnet_peer_discovered(lp);
3247                         return rc;
3248                 }
3249
3250                 return 0;
3251         }
3252
3253         lp->lp_state |= LNET_PEER_PUSH_SENT;
3254         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3255         spin_unlock(&lp->lp_lock);
3256
3257         cpt = lnet_net_lock_current();
3258         pbuf = the_lnet.ln_ping_target;
3259         lnet_ping_buffer_addref(pbuf);
3260         lnet_net_unlock(cpt);
3261
3262         /* Push source MD */
3263         md.start     = &pbuf->pb_info;
3264         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3265         md.threshold = 2; /* Put/Ack */
3266         md.max_size  = 0;
3267         md.options   = LNET_MD_TRACK_RESPONSE;
3268         md.handler   = the_lnet.ln_dc_handler;
3269         md.user_ptr  = lp;
3270
3271         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3272         if (rc) {
3273                 lnet_ping_buffer_decref(pbuf);
3274                 CERROR("Can't bind push source MD: %d\n", rc);
3275                 goto fail_error;
3276         }
3277         cpt = lnet_net_lock_current();
3278         /* Refcount for MD. */
3279         lnet_peer_addref_locked(lp);
3280         id.pid = LNET_PID_LUSTRE;
3281         id.nid = lnet_peer_select_nid(lp);
3282         lnet_net_unlock(cpt);
3283
3284         if (id.nid == LNET_NID_ANY) {
3285                 rc = -EHOSTUNREACH;
3286                 goto fail_unlink;
3287         }
3288
3289         rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3290                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3291                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3292
3293         /*
3294          * reset the discovery nid. There is no need to restrict sending
3295          * from that source, if we call lnet_push_update_to_peers(). It'll
3296          * get set to a specific NID, if we initiate discovery from the
3297          * scratch
3298          */
3299         lp->lp_disc_src_nid = LNET_NID_ANY;
3300
3301         if (rc)
3302                 goto fail_unlink;
3303
3304         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3305
3306         spin_lock(&lp->lp_lock);
3307         return 0;
3308
3309 fail_unlink:
3310         LNetMDUnlink(lp->lp_push_mdh);
3311         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3312 fail_error:
3313         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3314         /*
3315          * The errors that get us here are considered hard errors and
3316          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3317          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3318          * because the unlink event handler will have set it if we
3319          * called LNetMDUnlink() above.
3320          */
3321         spin_lock(&lp->lp_lock);
3322         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3323         return rc;
3324 }
3325
3326 /*
3327  * An unrecoverable error was encountered during discovery.
3328  * Set error status in peer and abort discovery.
3329  */
3330 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3331 {
3332         CDEBUG(D_NET, "Discovery error %s: %d\n",
3333                libcfs_nid2str(lp->lp_primary_nid), error);
3334
3335         spin_lock(&lp->lp_lock);
3336         lp->lp_dc_error = error;
3337         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3338         lp->lp_state |= LNET_PEER_REDISCOVER;
3339         spin_unlock(&lp->lp_lock);
3340 }
3341
3342 /*
3343  * Discovering this peer is taking too long. Cancel any Ping or Push
3344  * that discovery is waiting on by unlinking the relevant MDs. The
3345  * lnet_discovery_event_handler() will proceed from here and complete
3346  * the cleanup.
3347  */
3348 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3349 {
3350         struct lnet_handle_md ping_mdh;
3351         struct lnet_handle_md push_mdh;
3352
3353         LNetInvalidateMDHandle(&ping_mdh);
3354         LNetInvalidateMDHandle(&push_mdh);
3355
3356         spin_lock(&lp->lp_lock);
3357         if (lp->lp_state & LNET_PEER_PING_SENT) {
3358                 ping_mdh = lp->lp_ping_mdh;
3359                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3360         }
3361         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3362                 push_mdh = lp->lp_push_mdh;
3363                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3364         }
3365         spin_unlock(&lp->lp_lock);
3366
3367         if (!LNetMDHandleIsInvalid(ping_mdh))
3368                 LNetMDUnlink(ping_mdh);
3369         if (!LNetMDHandleIsInvalid(push_mdh))
3370                 LNetMDUnlink(push_mdh);
3371 }
3372
3373 /*
3374  * Wait for work to be queued or some other change that must be
3375  * attended to. Returns non-zero if the discovery thread should shut
3376  * down.
3377  */
3378 static int lnet_peer_discovery_wait_for_work(void)
3379 {
3380         int cpt;
3381         int rc = 0;
3382
3383         DEFINE_WAIT(wait);
3384
3385         cpt = lnet_net_lock_current();
3386         for (;;) {
3387                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3388                                 TASK_INTERRUPTIBLE);
3389                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3390                         break;
3391                 if (lnet_push_target_resize_needed() ||
3392                     the_lnet.ln_push_target->pb_needs_post)
3393                         break;
3394                 if (!list_empty(&the_lnet.ln_dc_request))
3395                         break;
3396                 if (!list_empty(&the_lnet.ln_msg_resend))
3397                         break;
3398                 lnet_net_unlock(cpt);
3399
3400                 /*
3401                  * wakeup max every second to check if there are peers that
3402                  * have been stuck on the working queue for greater than
3403                  * the peer timeout.
3404                  */
3405                 schedule_timeout(cfs_time_seconds(1));
3406                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3407                 cpt = lnet_net_lock_current();
3408         }
3409         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3410
3411         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3412                 rc = -ESHUTDOWN;
3413
3414         lnet_net_unlock(cpt);
3415
3416         CDEBUG(D_NET, "woken: %d\n", rc);
3417
3418         return rc;
3419 }
3420
3421 /*
3422  * Messages that were pending on a destroyed peer will be put on a global
3423  * resend list. The message resend list will be checked by
3424  * the discovery thread when it wakes up, and will resend messages. These
3425  * messages can still be sendable in the case the lpni which was the initial
3426  * cause of the message re-queue was transfered to another peer.
3427  *
3428  * It is possible that LNet could be shutdown while we're iterating
3429  * through the list. lnet_shudown_lndnets() will attempt to access the
3430  * resend list, but will have to wait until the spinlock is released, by
3431  * which time there shouldn't be any more messages on the resend list.
3432  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3433  * for the messages so they can be released. The other case is that
3434  * lnet_shudown_lndnets() can finalize all the messages before this
3435  * function can visit the resend list, in which case this function will be
3436  * a no-op.
3437  */
3438 static void lnet_resend_msgs(void)
3439 {
3440         struct lnet_msg *msg, *tmp;
3441         LIST_HEAD(resend);
3442         int rc;
3443
3444         spin_lock(&the_lnet.ln_msg_resend_lock);
3445         list_splice(&the_lnet.ln_msg_resend, &resend);
3446         spin_unlock(&the_lnet.ln_msg_resend_lock);
3447
3448         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3449                 list_del_init(&msg->msg_list);
3450                 rc = lnet_send(msg->msg_src_nid_param, msg,
3451                                msg->msg_rtr_nid_param);
3452                 if (rc < 0) {
3453                         CNETERR("Error sending %s to %s: %d\n",
3454                                lnet_msgtyp2str(msg->msg_type),
3455                                libcfs_id2str(msg->msg_target), rc);
3456                         lnet_finalize(msg, rc);
3457                 }
3458         }
3459 }
3460
3461 /* The discovery thread. */
3462 static int lnet_peer_discovery(void *arg)
3463 {
3464         struct lnet_peer *lp;
3465         int rc;
3466
3467         wait_for_completion(&the_lnet.ln_started);
3468
3469         CDEBUG(D_NET, "started\n");
3470
3471         for (;;) {
3472                 if (lnet_peer_discovery_wait_for_work())
3473                         break;
3474
3475                 if (lnet_push_target_resize_needed())
3476                         lnet_push_target_resize();
3477                 else if (the_lnet.ln_push_target->pb_needs_post)
3478                         lnet_push_target_post(the_lnet.ln_push_target,
3479                                               &the_lnet.ln_push_target_md);
3480
3481                 lnet_resend_msgs();
3482
3483                 lnet_net_lock(LNET_LOCK_EX);
3484                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3485                         lnet_net_unlock(LNET_LOCK_EX);
3486                         break;
3487                 }
3488
3489                 /*
3490                  * Process all incoming discovery work requests.  When
3491                  * discovery must wait on a peer to change state, it
3492                  * is added to the tail of the ln_dc_working queue. A
3493                  * timestamp keeps track of when the peer was added,
3494                  * so we can time out discovery requests that take too
3495                  * long.
3496                  */
3497                 while (!list_empty(&the_lnet.ln_dc_request)) {
3498                         lp = list_first_entry(&the_lnet.ln_dc_request,
3499                                               struct lnet_peer, lp_dc_list);
3500                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3501                         /*
3502                          * set the time the peer was put on the dc_working
3503                          * queue. It shouldn't remain on the queue
3504                          * forever, in case the GET message (for ping)
3505                          * doesn't get a REPLY or the PUT message (for
3506                          * push) doesn't get an ACK.
3507                          */
3508                         lp->lp_last_queued = ktime_get_real_seconds();
3509                         lnet_net_unlock(LNET_LOCK_EX);
3510
3511                         if (lnet_push_target_resize_needed())
3512                                 lnet_push_target_resize();
3513                         else if (the_lnet.ln_push_target->pb_needs_post)
3514                                 lnet_push_target_post(the_lnet.ln_push_target,
3515                                                       &the_lnet.ln_push_target_md);
3516
3517                         /*
3518                          * Select an action depending on the state of
3519                          * the peer and whether discovery is disabled.
3520                          * The check whether discovery is disabled is
3521                          * done after the code that handles processing
3522                          * for arrived data, cleanup for failures, and
3523                          * forcing a Ping or Push.
3524                          */
3525                         spin_lock(&lp->lp_lock);
3526                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3527                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3528                                 lp->lp_state);
3529                         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3530                                 rc = lnet_peer_data_present(lp);
3531                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3532                                 rc = lnet_peer_ping_failed(lp);
3533                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3534                                 rc = lnet_peer_push_failed(lp);
3535                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3536                                 rc = lnet_peer_send_ping(lp);
3537                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3538                                 rc = lnet_peer_send_push(lp);
3539                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3540                                 rc = lnet_peer_send_ping(lp);
3541                         else if (lnet_peer_needs_push(lp))
3542                                 rc = lnet_peer_send_push(lp);
3543                         else
3544                                 rc = lnet_peer_discovered(lp);
3545                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3546                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3547                                 lp->lp_state, rc);
3548                         spin_unlock(&lp->lp_lock);
3549
3550                         lnet_net_lock(LNET_LOCK_EX);
3551                         if (rc == LNET_REDISCOVER_PEER) {
3552                                 list_move(&lp->lp_dc_list,
3553                                           &the_lnet.ln_dc_request);
3554                         } else if (rc) {
3555                                 lnet_peer_discovery_error(lp, rc);
3556                         }
3557                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3558                                 lnet_peer_discovery_complete(lp);
3559                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3560                                 break;
3561
3562                         if (lp->lp_state & LNET_PEER_MARK_DELETION) {
3563                                 struct list_head rlist;
3564                                 struct lnet_route *route, *tmp;
3565                                 int sensitivity = lp->lp_health_sensitivity;
3566
3567                                 INIT_LIST_HEAD(&rlist);
3568
3569                                 /*
3570                                  * remove the peer from the discovery work
3571                                  * queue if it's on there in preparation
3572                                  * of deleting it.
3573                                  */
3574                                 if (!list_empty(&lp->lp_dc_list))
3575                                         list_del(&lp->lp_dc_list);
3576
3577                                 lnet_net_unlock(LNET_LOCK_EX);
3578
3579                                 mutex_lock(&the_lnet.ln_api_mutex);
3580
3581                                 lnet_net_lock(LNET_LOCK_EX);
3582                                 list_for_each_entry_safe(route, tmp,
3583                                                          &lp->lp_routes,
3584                                                          lr_gwlist)
3585                                         lnet_move_route(route, NULL, &rlist);
3586                                 lnet_net_unlock(LNET_LOCK_EX);
3587
3588                                 /* delete the peer */
3589                                 lnet_peer_del(lp);
3590
3591                                 list_for_each_entry_safe(route, tmp,
3592                                                          &rlist, lr_list) {
3593                                         /* re-add these routes */
3594                                         lnet_add_route(route->lr_net,
3595                                                        route->lr_hops,
3596                                                        route->lr_nid,
3597                                                        route->lr_priority,
3598                                                        sensitivity);
3599                                         LIBCFS_FREE(route, sizeof(*route));
3600                                 }
3601                                 mutex_unlock(&the_lnet.ln_api_mutex);
3602
3603                                 lnet_net_lock(LNET_LOCK_EX);
3604                         }
3605                 }
3606
3607                 lnet_net_unlock(LNET_LOCK_EX);
3608         }
3609
3610         CDEBUG(D_NET, "stopping\n");
3611         /*
3612          * Clean up before telling lnet_peer_discovery_stop() that
3613          * we're done. Use wake_up() below to somewhat reduce the
3614          * size of the thundering herd if there are multiple threads
3615          * waiting on discovery of a single peer.
3616          */
3617
3618         /* Queue cleanup 1: stop all pending pings and pushes. */
3619         lnet_net_lock(LNET_LOCK_EX);
3620         while (!list_empty(&the_lnet.ln_dc_working)) {
3621                 lp = list_first_entry(&the_lnet.ln_dc_working,
3622                                       struct lnet_peer, lp_dc_list);
3623                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3624                 lnet_net_unlock(LNET_LOCK_EX);
3625                 lnet_peer_cancel_discovery(lp);
3626                 lnet_net_lock(LNET_LOCK_EX);
3627         }
3628         lnet_net_unlock(LNET_LOCK_EX);
3629
3630         /* Queue cleanup 2: wait for the expired queue to clear. */
3631         while (!list_empty(&the_lnet.ln_dc_expired))
3632                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3633
3634         /* Queue cleanup 3: clear the request queue. */
3635         lnet_net_lock(LNET_LOCK_EX);
3636         while (!list_empty(&the_lnet.ln_dc_request)) {
3637                 lp = list_first_entry(&the_lnet.ln_dc_request,
3638                                       struct lnet_peer, lp_dc_list);
3639                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3640                 lnet_peer_discovery_complete(lp);
3641         }
3642         lnet_net_unlock(LNET_LOCK_EX);
3643
3644         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3645         the_lnet.ln_dc_handler = NULL;
3646
3647         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3648         wake_up(&the_lnet.ln_dc_waitq);
3649
3650         CDEBUG(D_NET, "stopped\n");
3651
3652         return 0;
3653 }
3654
3655 /* ln_api_mutex is held on entry. */
3656 int lnet_peer_discovery_start(void)
3657 {
3658         struct task_struct *task;
3659         int rc = 0;
3660
3661         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3662                 return -EALREADY;
3663
3664         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3665         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3666         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3667         if (IS_ERR(task)) {
3668                 rc = PTR_ERR(task);
3669                 CERROR("Can't start peer discovery thread: %d\n", rc);
3670
3671                 the_lnet.ln_dc_handler = NULL;
3672
3673                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3674         }
3675
3676         CDEBUG(D_NET, "discovery start: %d\n", rc);
3677
3678         return rc;
3679 }
3680
3681 /* ln_api_mutex is held on entry. */
3682 void lnet_peer_discovery_stop(void)
3683 {
3684         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3685                 return;
3686
3687         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3688         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3689
3690         /* In the LNetNIInit() path we may be stopping discovery before it
3691          * entered its work loop
3692          */
3693         if (!completion_done(&the_lnet.ln_started))
3694                 complete(&the_lnet.ln_started);
3695         else
3696                 wake_up(&the_lnet.ln_dc_waitq);
3697
3698         wait_event(the_lnet.ln_dc_waitq,
3699                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3700
3701         LASSERT(list_empty(&the_lnet.ln_dc_request));
3702         LASSERT(list_empty(&the_lnet.ln_dc_working));
3703         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3704
3705         CDEBUG(D_NET, "discovery stopped\n");
3706 }
3707
3708 /* Debugging */
3709
3710 void
3711 lnet_debug_peer(lnet_nid_t nid)
3712 {
3713         char                    *aliveness = "NA";
3714         struct lnet_peer_ni     *lp;
3715         int                     cpt;
3716
3717         cpt = lnet_cpt_of_nid(nid, NULL);
3718         lnet_net_lock(cpt);
3719
3720         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3721         if (IS_ERR(lp)) {
3722                 lnet_net_unlock(cpt);
3723                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3724                 return;
3725         }
3726
3727         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3728                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3729
3730         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3731                libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3732                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3733                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3734                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3735
3736         lnet_peer_ni_decref_locked(lp);
3737
3738         lnet_net_unlock(cpt);
3739 }
3740
3741 /* Gathering information for userspace. */
3742
3743 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3744                           char aliveness[LNET_MAX_STR_LEN],
3745                           __u32 *cpt_iter, __u32 *refcount,
3746                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3747                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3748                           __u32 *peer_tx_qnob)
3749 {
3750         struct lnet_peer_table          *peer_table;
3751         struct lnet_peer_ni             *lp;
3752         int                             j;
3753         int                             lncpt;
3754         bool                            found = false;
3755
3756         /* get the number of CPTs */
3757         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3758
3759         /* if the cpt number to be examined is >= the number of cpts in
3760          * the system then indicate that there are no more cpts to examin
3761          */
3762         if (*cpt_iter >= lncpt)
3763                 return -ENOENT;
3764
3765         /* get the current table */
3766         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3767         /* if the ptable is NULL then there are no more cpts to examine */
3768         if (peer_table == NULL)
3769                 return -ENOENT;
3770
3771         lnet_net_lock(*cpt_iter);
3772
3773         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3774                 struct list_head *peers = &peer_table->pt_hash[j];
3775
3776                 list_for_each_entry(lp, peers, lpni_hashlist) {
3777                         if (peer_index-- > 0)
3778                                 continue;
3779
3780                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3781                         if (lnet_isrouter(lp) ||
3782                                 lnet_peer_aliveness_enabled(lp))
3783                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3784                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3785
3786                         *nid = lp->lpni_nid;
3787                         *refcount = atomic_read(&lp->lpni_refcount);
3788                         *ni_peer_tx_credits =
3789                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3790                         *peer_tx_credits = lp->lpni_txcredits;
3791                         *peer_rtr_credits = lp->lpni_rtrcredits;
3792                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
3793                         *peer_tx_qnob = lp->lpni_txqnob;
3794
3795                         found = true;
3796                 }
3797
3798         }
3799         lnet_net_unlock(*cpt_iter);
3800
3801         *cpt_iter = lncpt;
3802
3803         return found ? 0 : -ENOENT;
3804 }
3805
3806 /* ln_api_mutex is held, which keeps the peer list stable */
3807 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3808 {
3809         struct lnet_ioctl_element_stats *lpni_stats;
3810         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3811         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3812         struct lnet_peer_ni_credit_info *lpni_info;
3813         struct lnet_peer_ni *lpni;
3814         struct lnet_peer *lp;
3815         lnet_nid_t nid;
3816         __u32 size;
3817         int rc;
3818
3819         lp = lnet_find_peer(cfg->prcfg_prim_nid);
3820
3821         if (!lp) {
3822                 rc = -ENOENT;
3823                 goto out;
3824         }
3825
3826         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3827                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3828         size *= lp->lp_nnis;
3829         if (size > cfg->prcfg_size) {
3830                 cfg->prcfg_size = size;
3831                 rc = -E2BIG;
3832                 goto out_lp_decref;
3833         }
3834
3835         cfg->prcfg_prim_nid = lp->lp_primary_nid;
3836         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3837         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3838         cfg->prcfg_count = lp->lp_nnis;
3839         cfg->prcfg_size = size;
3840         cfg->prcfg_state = lp->lp_state;
3841
3842         /* Allocate helper buffers. */
3843         rc = -ENOMEM;
3844         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3845         if (!lpni_info)
3846                 goto out_lp_decref;
3847         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3848         if (!lpni_stats)
3849                 goto out_free_info;
3850         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3851         if (!lpni_msg_stats)
3852                 goto out_free_stats;
3853         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3854         if (!lpni_hstats)
3855                 goto out_free_msg_stats;
3856
3857
3858         lpni = NULL;
3859         rc = -EFAULT;
3860         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3861                 nid = lpni->lpni_nid;
3862                 if (copy_to_user(bulk, &nid, sizeof(nid)))
3863                         goto out_free_hstats;
3864                 bulk += sizeof(nid);
3865
3866                 memset(lpni_info, 0, sizeof(*lpni_info));
3867                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3868                 if (lnet_isrouter(lpni) ||
3869                         lnet_peer_aliveness_enabled(lpni))
3870                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3871                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3872
3873                 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3874                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3875                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3876                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3877                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3878                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3879                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3880                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3881                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3882                         goto out_free_hstats;
3883                 bulk += sizeof(*lpni_info);
3884
3885                 memset(lpni_stats, 0, sizeof(*lpni_stats));
3886                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3887                                                             LNET_STATS_TYPE_SEND);
3888                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3889                                                             LNET_STATS_TYPE_RECV);
3890                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3891                                                             LNET_STATS_TYPE_DROP);
3892                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3893                         goto out_free_hstats;
3894                 bulk += sizeof(*lpni_stats);
3895                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3896                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3897                         goto out_free_hstats;
3898                 bulk += sizeof(*lpni_msg_stats);
3899                 lpni_hstats->hlpni_network_timeout =
3900                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3901                 lpni_hstats->hlpni_remote_dropped =
3902                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3903                 lpni_hstats->hlpni_remote_timeout =
3904                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3905                 lpni_hstats->hlpni_remote_error =
3906                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3907                 lpni_hstats->hlpni_health_value =
3908                   atomic_read(&lpni->lpni_healthv);
3909                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3910                         goto out_free_hstats;
3911                 bulk += sizeof(*lpni_hstats);
3912         }
3913         rc = 0;
3914
3915 out_free_hstats:
3916         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3917 out_free_msg_stats:
3918         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3919 out_free_stats:
3920         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3921 out_free_info:
3922         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3923 out_lp_decref:
3924         lnet_peer_decref_locked(lp);
3925 out:
3926         return rc;
3927 }
3928
3929 void
3930 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3931 {
3932         /* the mt could've shutdown and cleaned up the queues */
3933         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3934                 return;
3935
3936         if (list_empty(&lpni->lpni_recovery) &&
3937             atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3938                 CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
3939                         libcfs_nid2str(lpni->lpni_nid),
3940                         atomic_read(&lpni->lpni_healthv));
3941                 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3942                 lnet_peer_ni_addref_locked(lpni);
3943         }
3944 }
3945
3946 /* Call with the ln_api_mutex held */
3947 void
3948 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3949 {
3950         struct lnet_peer_table *ptable;
3951         struct lnet_peer *lp;
3952         struct lnet_peer_net *lpn;
3953         struct lnet_peer_ni *lpni;
3954         int lncpt;
3955         int cpt;
3956
3957         if (the_lnet.ln_state != LNET_STATE_RUNNING)
3958                 return;
3959
3960         if (!all) {
3961                 lnet_net_lock(LNET_LOCK_EX);
3962                 lpni = lnet_find_peer_ni_locked(nid);
3963                 if (!lpni) {
3964                         lnet_net_unlock(LNET_LOCK_EX);
3965                         return;
3966                 }
3967                 atomic_set(&lpni->lpni_healthv, value);
3968                 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3969                 lnet_peer_ni_decref_locked(lpni);
3970                 lnet_net_unlock(LNET_LOCK_EX);
3971                 return;
3972         }
3973
3974         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3975
3976         /*
3977          * Walk all the peers and reset the healhv for each one to the
3978          * maximum value.
3979          */
3980         lnet_net_lock(LNET_LOCK_EX);
3981         for (cpt = 0; cpt < lncpt; cpt++) {
3982                 ptable = the_lnet.ln_peer_tables[cpt];
3983                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
3984                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
3985                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
3986                                                     lpni_peer_nis) {
3987                                         atomic_set(&lpni->lpni_healthv, value);
3988                                         lnet_peer_ni_add_to_recoveryq_locked(lpni);
3989                                 }
3990                         }
3991                 }
3992         }
3993         lnet_net_unlock(LNET_LOCK_EX);
3994 }
3995