Whamcloud - gitweb
5f6b3ad670b6eb460d0a4f2968b1662e652d1528
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/peer.c
33  */
34
35 #define DEBUG_SUBSYSTEM S_LNET
36
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
40 #endif
41 #include <linux/uaccess.h>
42
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         atomic_set(&lpni->lpni_refcount, 1);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NIDNET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(lnet_nid_t nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = nid;
265         lp->lp_disc_src_nid = LNET_NID_ANY;
266         if (lnet_peers_start_down())
267                 lp->lp_alive = false;
268         else
269                 lp->lp_alive = true;
270
271         /*
272          * all peers created on a router should have health on
273          * if it's not already on.
274          */
275         if (the_lnet.ln_routing && !lnet_health_sensitivity)
276                 lp->lp_health_sensitivity = 1;
277
278         /*
279          * Turn off discovery for loopback peer. If you're creating a peer
280          * for the loopback interface then that was initiated when we
281          * attempted to send a message over the loopback. There is no need
282          * to ever use a different interface when sending messages to
283          * myself.
284          */
285         if (nid == LNET_NID_LO_0)
286                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
287         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
288
289         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
290
291         return lp;
292 }
293
294 void
295 lnet_destroy_peer_locked(struct lnet_peer *lp)
296 {
297         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
298
299         LASSERT(atomic_read(&lp->lp_refcount) == 0);
300         LASSERT(lp->lp_rtr_refcount == 0);
301         LASSERT(list_empty(&lp->lp_peer_nets));
302         LASSERT(list_empty(&lp->lp_peer_list));
303         LASSERT(list_empty(&lp->lp_dc_list));
304
305         if (lp->lp_data)
306                 lnet_ping_buffer_decref(lp->lp_data);
307
308         /*
309          * if there are messages still on the pending queue, then make
310          * sure to queue them on the ln_msg_resend list so they can be
311          * resent at a later point if the discovery thread is still
312          * running.
313          * If the discovery thread has stopped, then the wakeup will be a
314          * no-op, and it is expected the lnet_shutdown_lndnets() will
315          * eventually be called, which will traverse this list and
316          * finalize the messages on the list.
317          * We can not resend them now because we're holding the cpt lock.
318          * Releasing the lock can cause an inconsistent state
319          */
320         spin_lock(&the_lnet.ln_msg_resend_lock);
321         spin_lock(&lp->lp_lock);
322         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
323         spin_unlock(&lp->lp_lock);
324         spin_unlock(&the_lnet.ln_msg_resend_lock);
325         wake_up(&the_lnet.ln_dc_waitq);
326
327         LIBCFS_FREE(lp, sizeof(*lp));
328 }
329
330 /*
331  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
332  * that peer_net, detach the peer_net from the peer.
333  *
334  * Call with lnet_net_lock/EX held
335  */
336 static void
337 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
338 {
339         struct lnet_peer_table *ptable;
340         struct lnet_peer_net *lpn;
341         struct lnet_peer *lp;
342
343         /*
344          * Belts and suspenders: gracefully handle teardown of a
345          * partially connected peer_ni.
346          */
347         lpn = lpni->lpni_peer_net;
348
349         list_del_init(&lpni->lpni_peer_nis);
350         /*
351          * If there are no lpni's left, we detach lpn from
352          * lp_peer_nets, so it cannot be found anymore.
353          */
354         if (list_empty(&lpn->lpn_peer_nis))
355                 list_del_init(&lpn->lpn_peer_nets);
356
357         /* Update peer NID count. */
358         lp = lpn->lpn_peer;
359         lp->lp_nnis--;
360
361         /*
362          * If there are no more peer nets, make the peer unfindable
363          * via the peer_tables.
364          *
365          * Otherwise, if the peer is DISCOVERED, tell discovery to
366          * take another look at it. This is a no-op if discovery for
367          * this peer did the detaching.
368          */
369         if (list_empty(&lp->lp_peer_nets)) {
370                 list_del_init(&lp->lp_peer_list);
371                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
372                 ptable->pt_peers--;
373         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
374                 /* Discovery isn't running, nothing to do here. */
375         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
376                 lnet_peer_queue_for_discovery(lp);
377                 wake_up(&the_lnet.ln_dc_waitq);
378         }
379         CDEBUG(D_NET, "peer %s NID %s\n",
380                 libcfs_nid2str(lp->lp_primary_nid),
381                 libcfs_nid2str(lpni->lpni_nid));
382 }
383
384 /* called with lnet_net_lock LNET_LOCK_EX held */
385 static int
386 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
387 {
388         struct lnet_peer_table *ptable = NULL;
389
390         /* don't remove a peer_ni if it's also a gateway */
391         if (lnet_isrouter(lpni) && !force) {
392                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
393                        libcfs_nid2str(lpni->lpni_nid));
394                 return -EBUSY;
395         }
396
397         lnet_peer_remove_from_remote_list(lpni);
398
399         /* remove peer ni from the hash list. */
400         list_del_init(&lpni->lpni_hashlist);
401
402         /*
403          * indicate the peer is being deleted so the monitor thread can
404          * remove it from the recovery queue.
405          */
406         spin_lock(&lpni->lpni_lock);
407         lpni->lpni_state |= LNET_PEER_NI_DELETING;
408         spin_unlock(&lpni->lpni_lock);
409
410         /* decrement the ref count on the peer table */
411         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
412
413         /*
414          * The peer_ni can no longer be found with a lookup. But there
415          * can be current users, so keep track of it on the zombie
416          * list until the reference count has gone to zero.
417          *
418          * The last reference may be lost in a place where the
419          * lnet_net_lock locks only a single cpt, and that cpt may not
420          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
421          * has its own lock.
422          */
423         spin_lock(&ptable->pt_zombie_lock);
424         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
425         ptable->pt_zombies++;
426         spin_unlock(&ptable->pt_zombie_lock);
427
428         /* no need to keep this peer_ni on the hierarchy anymore */
429         lnet_peer_detach_peer_ni_locked(lpni);
430
431         /* remove hashlist reference on peer_ni */
432         lnet_peer_ni_decref_locked(lpni);
433
434         return 0;
435 }
436
437 void lnet_peer_uninit(void)
438 {
439         struct lnet_peer_ni *lpni, *tmp;
440
441         lnet_net_lock(LNET_LOCK_EX);
442
443         /* remove all peer_nis from the remote peer and the hash list */
444         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
445                                  lpni_on_remote_peer_ni_list)
446                 lnet_peer_ni_del_locked(lpni, false);
447
448         lnet_peer_tables_destroy();
449
450         lnet_net_unlock(LNET_LOCK_EX);
451 }
452
453 static int
454 lnet_peer_del_locked(struct lnet_peer *peer)
455 {
456         struct lnet_peer_ni *lpni = NULL, *lpni2;
457         int rc = 0, rc2 = 0;
458
459         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
460
461         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
462         while (lpni != NULL) {
463                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
464                 rc = lnet_peer_ni_del_locked(lpni, false);
465                 if (rc != 0)
466                         rc2 = rc;
467                 lpni = lpni2;
468         }
469
470         return rc2;
471 }
472
473 static int
474 lnet_peer_del(struct lnet_peer *peer)
475 {
476         lnet_net_lock(LNET_LOCK_EX);
477         lnet_peer_del_locked(peer);
478         lnet_net_unlock(LNET_LOCK_EX);
479
480         return 0;
481 }
482
483 /*
484  * Delete a NID from a peer. Call with ln_api_mutex held.
485  *
486  * Error codes:
487  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
488  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
489  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
490  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
491  */
492 static int
493 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
494 {
495         struct lnet_peer_ni *lpni;
496         lnet_nid_t primary_nid = lp->lp_primary_nid;
497         int rc = 0;
498         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
499
500         if (!(flags & LNET_PEER_CONFIGURED)) {
501                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
502                         rc = -EPERM;
503                         goto out;
504                 }
505         }
506         lpni = lnet_find_peer_ni_locked(nid);
507         if (!lpni) {
508                 rc = -ENOENT;
509                 goto out;
510         }
511         lnet_peer_ni_decref_locked(lpni);
512         if (lp != lpni->lpni_peer_net->lpn_peer) {
513                 rc = -ECHILD;
514                 goto out;
515         }
516
517         /*
518          * This function only allows deletion of the primary NID if it
519          * is the only NID.
520          */
521         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
522                 rc = -EBUSY;
523                 goto out;
524         }
525
526         lnet_net_lock(LNET_LOCK_EX);
527
528         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
529                 struct lnet_peer_ni *lpni2;
530                 /* assign the next peer_ni to be the primary */
531                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
532                 LASSERT(lpni2);
533                 lp->lp_primary_nid = lpni2->lpni_nid;
534         }
535         rc = lnet_peer_ni_del_locked(lpni, force);
536
537         lnet_net_unlock(LNET_LOCK_EX);
538
539 out:
540         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
541                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
542
543         return rc;
544 }
545
546 static void
547 lnet_peer_table_cleanup_locked(struct lnet_net *net,
548                                struct lnet_peer_table *ptable)
549 {
550         int                      i;
551         struct lnet_peer_ni     *next;
552         struct lnet_peer_ni     *lpni;
553         struct lnet_peer        *peer;
554
555         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
556                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
557                                          lpni_hashlist) {
558                         if (net != NULL && net != lpni->lpni_net)
559                                 continue;
560
561                         peer = lpni->lpni_peer_net->lpn_peer;
562                         if (peer->lp_primary_nid != lpni->lpni_nid) {
563                                 lnet_peer_ni_del_locked(lpni, false);
564                                 continue;
565                         }
566                         /*
567                          * Removing the primary NID implies removing
568                          * the entire peer. Advance next beyond any
569                          * peer_ni that belongs to the same peer.
570                          */
571                         list_for_each_entry_from(next, &ptable->pt_hash[i],
572                                                  lpni_hashlist) {
573                                 if (next->lpni_peer_net->lpn_peer != peer)
574                                         break;
575                         }
576                         lnet_peer_del_locked(peer);
577                 }
578         }
579 }
580
581 static void
582 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
583 {
584         wait_var_event_warning(&ptable->pt_zombies,
585                                ptable->pt_zombies == 0,
586                                "Waiting for %d zombies on peer table\n",
587                                ptable->pt_zombies);
588 }
589
590 static void
591 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
592                                 struct lnet_peer_table *ptable)
593 {
594         struct lnet_peer_ni     *lp;
595         struct lnet_peer_ni     *tmp;
596         lnet_nid_t              gw_nid;
597         int                     i;
598
599         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
600                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
601                                          lpni_hashlist) {
602                         if (net != lp->lpni_net)
603                                 continue;
604
605                         if (!lnet_isrouter(lp))
606                                 continue;
607
608                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
609
610                         lnet_net_unlock(LNET_LOCK_EX);
611                         lnet_del_route(LNET_NET_ANY, gw_nid);
612                         lnet_net_lock(LNET_LOCK_EX);
613                 }
614         }
615 }
616
617 void
618 lnet_peer_tables_cleanup(struct lnet_net *net)
619 {
620         int i;
621         struct lnet_peer_table *ptable;
622
623         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
624         /* If just deleting the peers for a NI, get rid of any routes these
625          * peers are gateways for. */
626         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
627                 lnet_net_lock(LNET_LOCK_EX);
628                 lnet_peer_table_del_rtrs_locked(net, ptable);
629                 lnet_net_unlock(LNET_LOCK_EX);
630         }
631
632         /* Start the cleanup process */
633         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
634                 lnet_net_lock(LNET_LOCK_EX);
635                 lnet_peer_table_cleanup_locked(net, ptable);
636                 lnet_net_unlock(LNET_LOCK_EX);
637         }
638
639         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
640                 lnet_peer_ni_finalize_wait(ptable);
641 }
642
643 static struct lnet_peer_ni *
644 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
645 {
646         struct list_head        *peers;
647         struct lnet_peer_ni     *lp;
648
649         if (the_lnet.ln_state != LNET_STATE_RUNNING)
650                 return NULL;
651
652         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
653         list_for_each_entry(lp, peers, lpni_hashlist) {
654                 if (lp->lpni_nid == nid) {
655                         lnet_peer_ni_addref_locked(lp);
656                         return lp;
657                 }
658         }
659
660         return NULL;
661 }
662
663 struct lnet_peer_ni *
664 lnet_find_peer_ni_locked(lnet_nid_t nid)
665 {
666         struct lnet_peer_ni *lpni;
667         struct lnet_peer_table *ptable;
668         int cpt;
669
670         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
671
672         ptable = the_lnet.ln_peer_tables[cpt];
673         lpni = lnet_get_peer_ni_locked(ptable, nid);
674
675         return lpni;
676 }
677
678 struct lnet_peer_ni *
679 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
680 {
681         struct lnet_peer_net *lpn;
682         struct lnet_peer_ni *lpni;
683
684         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
685         if (!lpn)
686                 return NULL;
687
688         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
689                 if (lpni->lpni_nid == nid)
690                         return lpni;
691         }
692
693         return NULL;
694 }
695
696 struct lnet_peer *
697 lnet_find_peer(lnet_nid_t nid)
698 {
699         struct lnet_peer_ni *lpni;
700         struct lnet_peer *lp = NULL;
701         int cpt;
702
703         cpt = lnet_net_lock_current();
704         lpni = lnet_find_peer_ni_locked(nid);
705         if (lpni) {
706                 lp = lpni->lpni_peer_net->lpn_peer;
707                 lnet_peer_addref_locked(lp);
708                 lnet_peer_ni_decref_locked(lpni);
709         }
710         lnet_net_unlock(cpt);
711
712         return lp;
713 }
714
715 struct lnet_peer_net *
716 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
717 {
718         struct lnet_peer_net *net;
719
720         if (!prev_lpn_id) {
721                 /* no net id provided return the first net */
722                 net = list_first_entry_or_null(&lp->lp_peer_nets,
723                                                struct lnet_peer_net,
724                                                lpn_peer_nets);
725
726                 return net;
727         }
728
729         /* find the net after the one provided */
730         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
731                 if (net->lpn_net_id == prev_lpn_id) {
732                         /*
733                          * if we reached the end of the list loop to the
734                          * beginning.
735                          */
736                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
737                                 return list_first_entry_or_null(&lp->lp_peer_nets,
738                                                                 struct lnet_peer_net,
739                                                                 lpn_peer_nets);
740                         else
741                                 return list_next_entry(net, lpn_peer_nets);
742                 }
743         }
744
745         return NULL;
746 }
747
748 struct lnet_peer_ni *
749 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
750                              struct lnet_peer_net *peer_net,
751                              struct lnet_peer_ni *prev)
752 {
753         struct lnet_peer_ni *lpni;
754         struct lnet_peer_net *net = peer_net;
755
756         if (!prev) {
757                 if (!net) {
758                         if (list_empty(&peer->lp_peer_nets))
759                                 return NULL;
760
761                         net = list_entry(peer->lp_peer_nets.next,
762                                          struct lnet_peer_net,
763                                          lpn_peer_nets);
764                 }
765                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
766                                   lpni_peer_nis);
767
768                 return lpni;
769         }
770
771         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
772                 /*
773                  * if you reached the end of the peer ni list and the peer
774                  * net is specified then there are no more peer nis in that
775                  * net.
776                  */
777                 if (net)
778                         return NULL;
779
780                 /*
781                  * we reached the end of this net ni list. move to the
782                  * next net
783                  */
784                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
785                     &peer->lp_peer_nets)
786                         /* no more nets and no more NIs. */
787                         return NULL;
788
789                 /* get the next net */
790                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
791                                  struct lnet_peer_net,
792                                  lpn_peer_nets);
793                 /* get the ni on it */
794                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
795                                   lpni_peer_nis);
796
797                 return lpni;
798         }
799
800         /* there are more nis left */
801         lpni = list_entry(prev->lpni_peer_nis.next,
802                           struct lnet_peer_ni, lpni_peer_nis);
803
804         return lpni;
805 }
806
807 /* Call with the ln_api_mutex held */
808 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
809 {
810         struct lnet_process_id id;
811         struct lnet_peer_table *ptable;
812         struct lnet_peer *lp;
813         __u32 count = 0;
814         __u32 size = 0;
815         int lncpt;
816         int cpt;
817         __u32 i;
818         int rc;
819
820         rc = -ESHUTDOWN;
821         if (the_lnet.ln_state != LNET_STATE_RUNNING)
822                 goto done;
823
824         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
825
826         /*
827          * Count the number of peers, and return E2BIG if the buffer
828          * is too small. We'll also return the desired size.
829          */
830         rc = -E2BIG;
831         for (cpt = 0; cpt < lncpt; cpt++) {
832                 ptable = the_lnet.ln_peer_tables[cpt];
833                 count += ptable->pt_peers;
834         }
835         size = count * sizeof(*ids);
836         if (size > *sizep)
837                 goto done;
838
839         /*
840          * Walk the peer lists and copy out the primary nids.
841          * This is safe because the peer lists are only modified
842          * while the ln_api_mutex is held. So we don't need to
843          * hold the lnet_net_lock as well, and can therefore
844          * directly call copy_to_user().
845          */
846         rc = -EFAULT;
847         memset(&id, 0, sizeof(id));
848         id.pid = LNET_PID_LUSTRE;
849         i = 0;
850         for (cpt = 0; cpt < lncpt; cpt++) {
851                 ptable = the_lnet.ln_peer_tables[cpt];
852                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
853                         if (i >= count)
854                                 goto done;
855                         id.nid = lp->lp_primary_nid;
856                         if (copy_to_user(&ids[i], &id, sizeof(id)))
857                                 goto done;
858                         i++;
859                 }
860         }
861         rc = 0;
862 done:
863         *countp = count;
864         *sizep = size;
865         return rc;
866 }
867
868 /*
869  * Start pushes to peers that need to be updated for a configuration
870  * change on this node.
871  */
872 void
873 lnet_push_update_to_peers(int force)
874 {
875         struct lnet_peer_table *ptable;
876         struct lnet_peer *lp;
877         int lncpt;
878         int cpt;
879
880         lnet_net_lock(LNET_LOCK_EX);
881         if (lnet_peer_discovery_disabled)
882                 force = 0;
883         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
884         for (cpt = 0; cpt < lncpt; cpt++) {
885                 ptable = the_lnet.ln_peer_tables[cpt];
886                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
887                         if (force) {
888                                 spin_lock(&lp->lp_lock);
889                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
890                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
891                                 spin_unlock(&lp->lp_lock);
892                         }
893                         if (lnet_peer_needs_push(lp))
894                                 lnet_peer_queue_for_discovery(lp);
895                 }
896         }
897         lnet_net_unlock(LNET_LOCK_EX);
898         wake_up(&the_lnet.ln_dc_waitq);
899 }
900
901 /* find the NID in the preferred gateways for the remote peer
902  * return:
903  *      false: list is not empty and NID is not preferred
904  *      false: list is empty
905  *      true: nid is found in the list
906  */
907 bool
908 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
909                              lnet_nid_t gw_nid)
910 {
911         struct lnet_nid_list *ne;
912
913         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
914                libcfs_nid2str(lpni->lpni_nid),
915                list_empty(&lpni->lpni_rtr_pref_nids));
916
917         if (list_empty(&lpni->lpni_rtr_pref_nids))
918                 return false;
919
920         /* iterate through all the preferred NIDs and see if any of them
921          * matches the provided gw_nid
922          */
923         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
924                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
925                        libcfs_nid2str(ne->nl_nid),
926                        libcfs_nid2str(gw_nid));
927                 if (ne->nl_nid == gw_nid)
928                         return true;
929         }
930
931         return false;
932 }
933
934 void
935 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
936 {
937         struct list_head zombies;
938         struct lnet_nid_list *ne;
939         struct lnet_nid_list *tmp;
940         int cpt = lpni->lpni_cpt;
941
942         INIT_LIST_HEAD(&zombies);
943
944         lnet_net_lock(cpt);
945         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
946         lnet_net_unlock(cpt);
947
948         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
949                 list_del(&ne->nl_list);
950                 LIBCFS_FREE(ne, sizeof(*ne));
951         }
952 }
953
954 int
955 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
956                        lnet_nid_t gw_nid)
957 {
958         int cpt = lpni->lpni_cpt;
959         struct lnet_nid_list *ne = NULL;
960
961         /* This function is called with api_mutex held. When the api_mutex
962          * is held the list can not be modified, as it is only modified as
963          * a result of applying a UDSP and that happens under api_mutex
964          * lock.
965          */
966         __must_hold(&the_lnet.ln_api_mutex);
967
968         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
969                 if (ne->nl_nid == gw_nid)
970                         return -EEXIST;
971         }
972
973         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
974         if (!ne)
975                 return -ENOMEM;
976
977         ne->nl_nid = gw_nid;
978
979         /* Lock the cpt to protect against addition and checks in the
980          * selection algorithm
981          */
982         lnet_net_lock(cpt);
983         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
984         lnet_net_unlock(cpt);
985
986         return 0;
987 }
988
989 /*
990  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
991  * this is a preferred point-to-point path. Call with lnet_net_lock in
992  * shared mmode.
993  */
994 bool
995 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
996 {
997         struct lnet_nid_list *ne;
998
999         if (lpni->lpni_pref_nnids == 0)
1000                 return false;
1001         if (lpni->lpni_pref_nnids == 1)
1002                 return lpni->lpni_pref.nid == nid;
1003         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1004                 if (ne->nl_nid == nid)
1005                         return true;
1006         }
1007         return false;
1008 }
1009
1010 /*
1011  * Set a single ni as preferred, provided no preferred ni is already
1012  * defined. Only to be used for non-multi-rail peer_ni.
1013  */
1014 int
1015 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1016 {
1017         int rc = 0;
1018
1019         spin_lock(&lpni->lpni_lock);
1020         if (nid == LNET_NID_ANY) {
1021                 rc = -EINVAL;
1022         } else if (lpni->lpni_pref_nnids > 0) {
1023                 rc = -EPERM;
1024         } else if (lpni->lpni_pref_nnids == 0) {
1025                 lpni->lpni_pref.nid = nid;
1026                 lpni->lpni_pref_nnids = 1;
1027                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1028         }
1029         spin_unlock(&lpni->lpni_lock);
1030
1031         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1032                libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
1033         return rc;
1034 }
1035
1036 /*
1037  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1038  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1039  */
1040 int
1041 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1042 {
1043         int rc = 0;
1044
1045         spin_lock(&lpni->lpni_lock);
1046         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1047                 lpni->lpni_pref_nnids = 0;
1048                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1049         } else if (lpni->lpni_pref_nnids == 0) {
1050                 rc = -ENOENT;
1051         } else {
1052                 rc = -EPERM;
1053         }
1054         spin_unlock(&lpni->lpni_lock);
1055
1056         CDEBUG(D_NET, "peer %s: %d\n",
1057                libcfs_nid2str(lpni->lpni_nid), rc);
1058         return rc;
1059 }
1060
1061 /*
1062  * Clear the preferred NIDs from a non-multi-rail peer.
1063  */
1064 void
1065 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1066 {
1067         struct lnet_peer_ni *lpni = NULL;
1068
1069         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1070                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1071 }
1072
1073 int
1074 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1075 {
1076         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1077         struct lnet_nid_list *ne1 = NULL;
1078         struct lnet_nid_list *ne2 = NULL;
1079         lnet_nid_t tmp_nid = LNET_NID_ANY;
1080         int rc = 0;
1081
1082         if (nid == LNET_NID_ANY) {
1083                 rc = -EINVAL;
1084                 goto out;
1085         }
1086
1087         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1088                 rc = -EEXIST;
1089                 goto out;
1090         }
1091
1092         /* A non-MR node may have only one preferred NI per peer_ni */
1093         if (lpni->lpni_pref_nnids > 0 &&
1094             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1095                 rc = -EPERM;
1096                 goto out;
1097         }
1098
1099         /* add the new preferred nid to the list of preferred nids */
1100         if (lpni->lpni_pref_nnids != 0) {
1101                 size_t alloc_size = sizeof(*ne1);
1102
1103                 if (lpni->lpni_pref_nnids == 1) {
1104                         tmp_nid = lpni->lpni_pref.nid;
1105                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1106                 }
1107
1108                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1109                         if (ne1->nl_nid == nid) {
1110                                 rc = -EEXIST;
1111                                 goto out;
1112                         }
1113                 }
1114
1115                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1116                                  alloc_size);
1117                 if (!ne1) {
1118                         rc = -ENOMEM;
1119                         goto out;
1120                 }
1121
1122                 /* move the originally stored nid to the list */
1123                 if (lpni->lpni_pref_nnids == 1) {
1124                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1125                                 lpni->lpni_cpt, alloc_size);
1126                         if (!ne2) {
1127                                 rc = -ENOMEM;
1128                                 goto out;
1129                         }
1130                         INIT_LIST_HEAD(&ne2->nl_list);
1131                         ne2->nl_nid = tmp_nid;
1132                 }
1133                 ne1->nl_nid = nid;
1134         }
1135
1136         lnet_net_lock(LNET_LOCK_EX);
1137         spin_lock(&lpni->lpni_lock);
1138         if (lpni->lpni_pref_nnids == 0) {
1139                 lpni->lpni_pref.nid = nid;
1140         } else {
1141                 if (ne2)
1142                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1143                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1144         }
1145         lpni->lpni_pref_nnids++;
1146         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1147         spin_unlock(&lpni->lpni_lock);
1148         lnet_net_unlock(LNET_LOCK_EX);
1149
1150 out:
1151         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1152                 spin_lock(&lpni->lpni_lock);
1153                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1154                 spin_unlock(&lpni->lpni_lock);
1155         }
1156         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1157                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1158         return rc;
1159 }
1160
1161 int
1162 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1163 {
1164         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1165         struct lnet_nid_list *ne = NULL;
1166         int rc = 0;
1167
1168         if (lpni->lpni_pref_nnids == 0) {
1169                 rc = -ENOENT;
1170                 goto out;
1171         }
1172
1173         if (lpni->lpni_pref_nnids == 1) {
1174                 if (lpni->lpni_pref.nid != nid) {
1175                         rc = -ENOENT;
1176                         goto out;
1177                 }
1178         } else {
1179                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1180                         if (ne->nl_nid == nid)
1181                                 goto remove_nid_entry;
1182                 }
1183                 rc = -ENOENT;
1184                 ne = NULL;
1185                 goto out;
1186         }
1187
1188 remove_nid_entry:
1189         lnet_net_lock(LNET_LOCK_EX);
1190         spin_lock(&lpni->lpni_lock);
1191         if (lpni->lpni_pref_nnids == 1)
1192                 lpni->lpni_pref.nid = LNET_NID_ANY;
1193         else {
1194                 list_del_init(&ne->nl_list);
1195                 if (lpni->lpni_pref_nnids == 2) {
1196                         struct lnet_nid_list *ne, *tmp;
1197
1198                         list_for_each_entry_safe(ne, tmp,
1199                                                  &lpni->lpni_pref.nids,
1200                                                  nl_list) {
1201                                 lpni->lpni_pref.nid = ne->nl_nid;
1202                                 list_del_init(&ne->nl_list);
1203                                 LIBCFS_FREE(ne, sizeof(*ne));
1204                         }
1205                 }
1206         }
1207         lpni->lpni_pref_nnids--;
1208         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1209         spin_unlock(&lpni->lpni_lock);
1210         lnet_net_unlock(LNET_LOCK_EX);
1211
1212         if (ne)
1213                 LIBCFS_FREE(ne, sizeof(*ne));
1214 out:
1215         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1216                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1217         return rc;
1218 }
1219
1220 void
1221 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1222 {
1223         struct list_head zombies;
1224         struct lnet_nid_list *ne;
1225         struct lnet_nid_list *tmp;
1226
1227         INIT_LIST_HEAD(&zombies);
1228
1229         lnet_net_lock(LNET_LOCK_EX);
1230         if (lpni->lpni_pref_nnids == 1)
1231                 lpni->lpni_pref.nid = LNET_NID_ANY;
1232         else if (lpni->lpni_pref_nnids > 1)
1233                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1234         lpni->lpni_pref_nnids = 0;
1235         lnet_net_unlock(LNET_LOCK_EX);
1236
1237         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1238                 list_del_init(&ne->nl_list);
1239                 LIBCFS_FREE(ne, sizeof(*ne));
1240         }
1241 }
1242
1243 lnet_nid_t
1244 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1245 {
1246         struct lnet_peer_ni *lpni;
1247         lnet_nid_t primary_nid = nid;
1248
1249         lpni = lnet_find_peer_ni_locked(nid);
1250         if (lpni) {
1251                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1252                 lnet_peer_ni_decref_locked(lpni);
1253         }
1254
1255         return primary_nid;
1256 }
1257
1258 bool
1259 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1260 __must_hold(&lp->lp_lock)
1261 {
1262         if (lnet_peer_discovery_disabled)
1263                 return true;
1264
1265         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1266             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1267                 return true;
1268         }
1269
1270         return false;
1271 }
1272
1273 /*
1274  * Peer Discovery
1275  */
1276 bool
1277 lnet_is_discovery_disabled(struct lnet_peer *lp)
1278 {
1279         bool rc = false;
1280
1281         spin_lock(&lp->lp_lock);
1282         rc = lnet_is_discovery_disabled_locked(lp);
1283         spin_unlock(&lp->lp_lock);
1284
1285         return rc;
1286 }
1287
1288 lnet_nid_t
1289 LNetPrimaryNID(lnet_nid_t nid)
1290 {
1291         struct lnet_peer *lp;
1292         struct lnet_peer_ni *lpni;
1293         lnet_nid_t primary_nid = nid;
1294         int rc = 0;
1295         int cpt;
1296
1297         if (nid == LNET_NID_LO_0)
1298                 return LNET_NID_LO_0;
1299
1300         cpt = lnet_net_lock_current();
1301         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1302         if (IS_ERR(lpni)) {
1303                 rc = PTR_ERR(lpni);
1304                 goto out_unlock;
1305         }
1306         lp = lpni->lpni_peer_net->lpn_peer;
1307
1308         while (!lnet_peer_is_uptodate(lp)) {
1309                 spin_lock(&lp->lp_lock);
1310                 /* force a full discovery cycle */
1311                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1312                 spin_unlock(&lp->lp_lock);
1313
1314                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1315                 if (rc)
1316                         goto out_decref;
1317                 lp = lpni->lpni_peer_net->lpn_peer;
1318
1319                 /* Only try once if discovery is disabled */
1320                 if (lnet_is_discovery_disabled(lp))
1321                         break;
1322         }
1323         primary_nid = lp->lp_primary_nid;
1324 out_decref:
1325         lnet_peer_ni_decref_locked(lpni);
1326 out_unlock:
1327         lnet_net_unlock(cpt);
1328
1329         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1330                libcfs_nid2str(primary_nid), rc);
1331         return primary_nid;
1332 }
1333 EXPORT_SYMBOL(LNetPrimaryNID);
1334
1335 struct lnet_peer_net *
1336 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1337 {
1338         struct lnet_peer_net *peer_net;
1339         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1340                 if (peer_net->lpn_net_id == net_id)
1341                         return peer_net;
1342         }
1343         return NULL;
1344 }
1345
1346 /*
1347  * Attach a peer_ni to a peer_net and peer. This function assumes
1348  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1349  * may be attached to a different peer, in which case it will be
1350  * properly detached first. The whole operation is done atomically.
1351  *
1352  * This function consumes the reference on lpni and Always returns 0.
1353  * This is the last function called from functions that do return an
1354  * int, so returning 0 here allows the compiler to do a tail call.
1355  */
1356 static int
1357 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1358                                 struct lnet_peer_net *lpn,
1359                                 struct lnet_peer_ni *lpni,
1360                                 unsigned flags)
1361 {
1362         struct lnet_peer_table *ptable;
1363
1364         /* Install the new peer_ni */
1365         lnet_net_lock(LNET_LOCK_EX);
1366         /* Add peer_ni to global peer table hash, if necessary. */
1367         if (list_empty(&lpni->lpni_hashlist)) {
1368                 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1369
1370                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1371                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1372                 ptable->pt_version++;
1373                 lnet_peer_ni_addref_locked(lpni);
1374         }
1375
1376         /* Detach the peer_ni from an existing peer, if necessary. */
1377         if (lpni->lpni_peer_net) {
1378                 LASSERT(lpni->lpni_peer_net != lpn);
1379                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1380                 lnet_peer_detach_peer_ni_locked(lpni);
1381                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1382                 lpni->lpni_peer_net = NULL;
1383         }
1384
1385         /* Add peer_ni to peer_net */
1386         lpni->lpni_peer_net = lpn;
1387         list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1388         lnet_update_peer_net_healthv(lpni);
1389         lnet_peer_net_addref_locked(lpn);
1390
1391         /* Add peer_net to peer */
1392         if (!lpn->lpn_peer) {
1393                 lpn->lpn_peer = lp;
1394                 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1395                 lnet_peer_addref_locked(lp);
1396         }
1397
1398         /* Add peer to global peer list, if necessary */
1399         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1400         if (list_empty(&lp->lp_peer_list)) {
1401                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1402                 ptable->pt_peers++;
1403         }
1404
1405
1406         /* Update peer state */
1407         spin_lock(&lp->lp_lock);
1408         if (flags & LNET_PEER_CONFIGURED) {
1409                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1410                         lp->lp_state |= LNET_PEER_CONFIGURED;
1411         }
1412         if (flags & LNET_PEER_MULTI_RAIL) {
1413                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1414                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1415                         lnet_peer_clr_non_mr_pref_nids(lp);
1416                 }
1417         }
1418         spin_unlock(&lp->lp_lock);
1419
1420         lp->lp_nnis++;
1421
1422         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1423                libcfs_nid2str(lp->lp_primary_nid),
1424                libcfs_nid2str(lpni->lpni_nid), flags);
1425         lnet_peer_ni_decref_locked(lpni);
1426         lnet_net_unlock(LNET_LOCK_EX);
1427
1428         return 0;
1429 }
1430
1431 /*
1432  * Create a new peer, with nid as its primary nid.
1433  *
1434  * Call with the lnet_api_mutex held.
1435  */
1436 static int
1437 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1438 {
1439         struct lnet_peer *lp;
1440         struct lnet_peer_net *lpn;
1441         struct lnet_peer_ni *lpni;
1442         int rc = 0;
1443
1444         LASSERT(nid != LNET_NID_ANY);
1445
1446         /*
1447          * No need for the lnet_net_lock here, because the
1448          * lnet_api_mutex is held.
1449          */
1450         lpni = lnet_find_peer_ni_locked(nid);
1451         if (lpni) {
1452                 /* A peer with this NID already exists. */
1453                 lp = lpni->lpni_peer_net->lpn_peer;
1454                 lnet_peer_ni_decref_locked(lpni);
1455                 /*
1456                  * This is an error if the peer was configured and the
1457                  * primary NID differs or an attempt is made to change
1458                  * the Multi-Rail flag. Otherwise the assumption is
1459                  * that an existing peer is being modified.
1460                  */
1461                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1462                         if (lp->lp_primary_nid != nid)
1463                                 rc = -EEXIST;
1464                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1465                                 rc = -EPERM;
1466                         goto out;
1467                 }
1468                 /* Delete and recreate as a configured peer. */
1469                 lnet_peer_del(lp);
1470         }
1471
1472         /* Create peer, peer_net, and peer_ni. */
1473         rc = -ENOMEM;
1474         lp = lnet_peer_alloc(nid);
1475         if (!lp)
1476                 goto out;
1477         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1478         if (!lpn)
1479                 goto out_free_lp;
1480         lpni = lnet_peer_ni_alloc(nid);
1481         if (!lpni)
1482                 goto out_free_lpn;
1483
1484         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1485
1486 out_free_lpn:
1487         LIBCFS_FREE(lpn, sizeof(*lpn));
1488 out_free_lp:
1489         LIBCFS_FREE(lp, sizeof(*lp));
1490 out:
1491         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1492                libcfs_nid2str(nid), flags, rc);
1493         return rc;
1494 }
1495
1496 /*
1497  * Add a NID to a peer. Call with ln_api_mutex held.
1498  *
1499  * Error codes:
1500  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1501  *  -EEXIST:   The NID was configured by DLC for a different peer.
1502  *  -ENOMEM:   Out of memory.
1503  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1504  *             non-multi-rail peer.
1505  */
1506 static int
1507 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1508 {
1509         struct lnet_peer_net *lpn;
1510         struct lnet_peer_ni *lpni;
1511         int rc = 0;
1512
1513         LASSERT(lp);
1514         LASSERT(nid != LNET_NID_ANY);
1515
1516         /* A configured peer can only be updated through configuration. */
1517         if (!(flags & LNET_PEER_CONFIGURED)) {
1518                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1519                         rc = -EPERM;
1520                         goto out;
1521                 }
1522         }
1523
1524         /*
1525          * The MULTI_RAIL flag can be set but not cleared, because
1526          * that would leave the peer struct in an invalid state.
1527          */
1528         if (flags & LNET_PEER_MULTI_RAIL) {
1529                 spin_lock(&lp->lp_lock);
1530                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1531                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1532                         lnet_peer_clr_non_mr_pref_nids(lp);
1533                 }
1534                 spin_unlock(&lp->lp_lock);
1535         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1536                 rc = -EPERM;
1537                 goto out;
1538         }
1539
1540         lpni = lnet_find_peer_ni_locked(nid);
1541         if (lpni) {
1542                 /*
1543                  * A peer_ni already exists. This is only a problem if
1544                  * it is not connected to this peer and was configured
1545                  * by DLC.
1546                  */
1547                 if (lpni->lpni_peer_net->lpn_peer == lp)
1548                         goto out_free_lpni;
1549                 if (lnet_peer_ni_is_configured(lpni)) {
1550                         rc = -EEXIST;
1551                         goto out_free_lpni;
1552                 }
1553                 /* If this is the primary NID, destroy the peer. */
1554                 if (lnet_peer_ni_is_primary(lpni)) {
1555                         struct lnet_peer *rtr_lp =
1556                                 lpni->lpni_peer_net->lpn_peer;
1557                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1558                         /*
1559                          * if we're trying to delete a router it means
1560                          * we're moving this peer NI to a new peer so must
1561                          * transfer router properties to the new peer
1562                          */
1563                         if (rtr_refcount > 0) {
1564                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1565                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1566                         }
1567                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1568                         lnet_peer_ni_decref_locked(lpni);
1569                         lpni = lnet_peer_ni_alloc(nid);
1570                         if (!lpni) {
1571                                 rc = -ENOMEM;
1572                                 goto out_free_lpni;
1573                         }
1574                 }
1575         } else {
1576                 lpni = lnet_peer_ni_alloc(nid);
1577                 if (!lpni) {
1578                         rc = -ENOMEM;
1579                         goto out_free_lpni;
1580                 }
1581         }
1582
1583         /*
1584          * Get the peer_net. Check that we're not adding a second
1585          * peer_ni on a peer_net of a non-multi-rail peer.
1586          */
1587         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1588         if (!lpn) {
1589                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1590                 if (!lpn) {
1591                         rc = -ENOMEM;
1592                         goto out_free_lpni;
1593                 }
1594         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1595                 rc = -ENOTUNIQ;
1596                 goto out_free_lpni;
1597         }
1598
1599         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1600
1601 out_free_lpni:
1602         lnet_peer_ni_decref_locked(lpni);
1603 out:
1604         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1605                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1606                flags, rc);
1607         return rc;
1608 }
1609
1610 /*
1611  * Update the primary NID of a peer, if possible.
1612  *
1613  * Call with the lnet_api_mutex held.
1614  */
1615 static int
1616 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1617 {
1618         lnet_nid_t old = lp->lp_primary_nid;
1619         int rc = 0;
1620
1621         if (lp->lp_primary_nid == nid)
1622                 goto out;
1623         rc = lnet_peer_add_nid(lp, nid, flags);
1624         if (rc)
1625                 goto out;
1626         lp->lp_primary_nid = nid;
1627 out:
1628         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1629                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1630         return rc;
1631 }
1632
1633 /*
1634  * lpni creation initiated due to traffic either sending or receiving.
1635  */
1636 static int
1637 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1638 {
1639         struct lnet_peer *lp;
1640         struct lnet_peer_net *lpn;
1641         struct lnet_peer_ni *lpni;
1642         unsigned flags = 0;
1643         int rc = 0;
1644
1645         if (nid == LNET_NID_ANY) {
1646                 rc = -EINVAL;
1647                 goto out;
1648         }
1649
1650         /* lnet_net_lock is not needed here because ln_api_lock is held */
1651         lpni = lnet_find_peer_ni_locked(nid);
1652         if (lpni) {
1653                 /*
1654                  * We must have raced with another thread. Since we
1655                  * know next to nothing about a peer_ni created by
1656                  * traffic, we just assume everything is ok and
1657                  * return.
1658                  */
1659                 lnet_peer_ni_decref_locked(lpni);
1660                 goto out;
1661         }
1662
1663         /* Create peer, peer_net, and peer_ni. */
1664         rc = -ENOMEM;
1665         lp = lnet_peer_alloc(nid);
1666         if (!lp)
1667                 goto out;
1668         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1669         if (!lpn)
1670                 goto out_free_lp;
1671         lpni = lnet_peer_ni_alloc(nid);
1672         if (!lpni)
1673                 goto out_free_lpn;
1674         if (pref != LNET_NID_ANY)
1675                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1676
1677         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1678
1679 out_free_lpn:
1680         LIBCFS_FREE(lpn, sizeof(*lpn));
1681 out_free_lp:
1682         LIBCFS_FREE(lp, sizeof(*lp));
1683 out:
1684         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1685         return rc;
1686 }
1687
1688 /*
1689  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1690  *
1691  * This API handles the following combinations:
1692  *   Create a peer with its primary NI if only the prim_nid is provided
1693  *   Add a NID to a peer identified by the prim_nid. The peer identified
1694  *   by the prim_nid must already exist.
1695  *   The peer being created may be non-MR.
1696  *
1697  * The caller must hold ln_api_mutex. This prevents the peer from
1698  * being created/modified/deleted by a different thread.
1699  */
1700 int
1701 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1702 {
1703         struct lnet_peer *lp = NULL;
1704         struct lnet_peer_ni *lpni;
1705         unsigned flags;
1706
1707         /* The prim_nid must always be specified */
1708         if (prim_nid == LNET_NID_ANY)
1709                 return -EINVAL;
1710
1711         flags = LNET_PEER_CONFIGURED;
1712         if (mr)
1713                 flags |= LNET_PEER_MULTI_RAIL;
1714
1715         /*
1716          * If nid isn't specified, we must create a new peer with
1717          * prim_nid as its primary nid.
1718          */
1719         if (nid == LNET_NID_ANY)
1720                 return lnet_peer_add(prim_nid, flags);
1721
1722         /* Look up the prim_nid, which must exist. */
1723         lpni = lnet_find_peer_ni_locked(prim_nid);
1724         if (!lpni)
1725                 return -ENOENT;
1726         lnet_peer_ni_decref_locked(lpni);
1727         lp = lpni->lpni_peer_net->lpn_peer;
1728
1729         /* Peer must have been configured. */
1730         if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1731                 CDEBUG(D_NET, "peer %s was not configured\n",
1732                        libcfs_nid2str(prim_nid));
1733                 return -ENOENT;
1734         }
1735
1736         /* Primary NID must match */
1737         if (lp->lp_primary_nid != prim_nid) {
1738                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1739                        libcfs_nid2str(prim_nid),
1740                        libcfs_nid2str(lp->lp_primary_nid));
1741                 return -ENODEV;
1742         }
1743
1744         /* Multi-Rail flag must match. */
1745         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1746                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1747                        libcfs_nid2str(prim_nid));
1748                 return -EPERM;
1749         }
1750
1751         return lnet_peer_add_nid(lp, nid, flags);
1752 }
1753
1754 /*
1755  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1756  *
1757  * This API handles the following combinations:
1758  *   Delete a NI from a peer if both prim_nid and nid are provided.
1759  *   Delete a peer if only prim_nid is provided.
1760  *   Delete a peer if its primary nid is provided.
1761  *
1762  * The caller must hold ln_api_mutex. This prevents the peer from
1763  * being modified/deleted by a different thread.
1764  */
1765 int
1766 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1767 {
1768         struct lnet_peer *lp;
1769         struct lnet_peer_ni *lpni;
1770         unsigned flags;
1771
1772         if (prim_nid == LNET_NID_ANY)
1773                 return -EINVAL;
1774
1775         lpni = lnet_find_peer_ni_locked(prim_nid);
1776         if (!lpni)
1777                 return -ENOENT;
1778         lnet_peer_ni_decref_locked(lpni);
1779         lp = lpni->lpni_peer_net->lpn_peer;
1780
1781         if (prim_nid != lp->lp_primary_nid) {
1782                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1783                        libcfs_nid2str(prim_nid),
1784                        libcfs_nid2str(lp->lp_primary_nid));
1785                 return -ENODEV;
1786         }
1787
1788         lnet_net_lock(LNET_LOCK_EX);
1789         if (lp->lp_rtr_refcount > 0) {
1790                 lnet_net_unlock(LNET_LOCK_EX);
1791                 CERROR("%s is a router. Can not be deleted\n",
1792                        libcfs_nid2str(prim_nid));
1793                 return -EBUSY;
1794         }
1795         lnet_net_unlock(LNET_LOCK_EX);
1796
1797         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1798                 return lnet_peer_del(lp);
1799
1800         flags = LNET_PEER_CONFIGURED;
1801         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1802                 flags |= LNET_PEER_MULTI_RAIL;
1803
1804         return lnet_peer_del_nid(lp, nid, flags);
1805 }
1806
1807 void
1808 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1809 {
1810         struct lnet_peer_table *ptable;
1811         struct lnet_peer_net *lpn;
1812
1813         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1814
1815         LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1816         LASSERT(list_empty(&lpni->lpni_txq));
1817         LASSERT(lpni->lpni_txqnob == 0);
1818         LASSERT(list_empty(&lpni->lpni_peer_nis));
1819         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1820
1821         lpn = lpni->lpni_peer_net;
1822         lpni->lpni_peer_net = NULL;
1823         lpni->lpni_net = NULL;
1824
1825         if (!list_empty(&lpni->lpni_hashlist)) {
1826                 /* remove the peer ni from the zombie list */
1827                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1828                 spin_lock(&ptable->pt_zombie_lock);
1829                 list_del_init(&lpni->lpni_hashlist);
1830                 ptable->pt_zombies--;
1831                 spin_unlock(&ptable->pt_zombie_lock);
1832         }
1833
1834         if (lpni->lpni_pref_nnids > 1) {
1835                 struct lnet_nid_list *ne, *tmp;
1836
1837                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
1838                                          nl_list) {
1839                         list_del_init(&ne->nl_list);
1840                         LIBCFS_FREE(ne, sizeof(*ne));
1841                 }
1842         }
1843         LIBCFS_FREE(lpni, sizeof(*lpni));
1844
1845         if (lpn)
1846                 lnet_peer_net_decref_locked(lpn);
1847 }
1848
1849 struct lnet_peer_ni *
1850 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1851 {
1852         struct lnet_peer_ni *lpni = NULL;
1853         int rc;
1854
1855         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1856                 return ERR_PTR(-ESHUTDOWN);
1857
1858         /*
1859          * find if a peer_ni already exists.
1860          * If so then just return that.
1861          */
1862         lpni = lnet_find_peer_ni_locked(nid);
1863         if (lpni)
1864                 return lpni;
1865
1866         lnet_net_unlock(cpt);
1867
1868         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1869         if (rc) {
1870                 lpni = ERR_PTR(rc);
1871                 goto out_net_relock;
1872         }
1873
1874         lpni = lnet_find_peer_ni_locked(nid);
1875         LASSERT(lpni);
1876
1877 out_net_relock:
1878         lnet_net_lock(cpt);
1879
1880         return lpni;
1881 }
1882
1883 /*
1884  * Get a peer_ni for the given nid, create it if necessary. Takes a
1885  * hold on the peer_ni.
1886  */
1887 struct lnet_peer_ni *
1888 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1889 {
1890         struct lnet_peer_ni *lpni = NULL;
1891         int rc;
1892
1893         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1894                 return ERR_PTR(-ESHUTDOWN);
1895
1896         /*
1897          * find if a peer_ni already exists.
1898          * If so then just return that.
1899          */
1900         lpni = lnet_find_peer_ni_locked(nid);
1901         if (lpni)
1902                 return lpni;
1903
1904         /*
1905          * Slow path:
1906          * use the lnet_api_mutex to serialize the creation of the peer_ni
1907          * and the creation/deletion of the local ni/net. When a local ni is
1908          * created, if there exists a set of peer_nis on that network,
1909          * they need to be traversed and updated. When a local NI is
1910          * deleted, which could result in a network being deleted, then
1911          * all peer nis on that network need to be removed as well.
1912          *
1913          * Creation through traffic should also be serialized with
1914          * creation through DLC.
1915          */
1916         lnet_net_unlock(cpt);
1917         mutex_lock(&the_lnet.ln_api_mutex);
1918         /*
1919          * Shutdown is only set under the ln_api_lock, so a single
1920          * check here is sufficent.
1921          */
1922         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1923                 lpni = ERR_PTR(-ESHUTDOWN);
1924                 goto out_mutex_unlock;
1925         }
1926
1927         rc = lnet_peer_ni_traffic_add(nid, pref);
1928         if (rc) {
1929                 lpni = ERR_PTR(rc);
1930                 goto out_mutex_unlock;
1931         }
1932
1933         lpni = lnet_find_peer_ni_locked(nid);
1934         LASSERT(lpni);
1935
1936 out_mutex_unlock:
1937         mutex_unlock(&the_lnet.ln_api_mutex);
1938         lnet_net_lock(cpt);
1939
1940         /* Lock has been dropped, check again for shutdown. */
1941         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1942                 if (!IS_ERR(lpni))
1943                         lnet_peer_ni_decref_locked(lpni);
1944                 lpni = ERR_PTR(-ESHUTDOWN);
1945         }
1946
1947         return lpni;
1948 }
1949
1950 bool
1951 lnet_peer_gw_discovery(struct lnet_peer *lp)
1952 {
1953         bool rc = false;
1954
1955         spin_lock(&lp->lp_lock);
1956         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
1957                 rc = true;
1958         spin_unlock(&lp->lp_lock);
1959
1960         return rc;
1961 }
1962
1963 bool
1964 lnet_peer_is_uptodate(struct lnet_peer *lp)
1965 {
1966         bool rc;
1967
1968         spin_lock(&lp->lp_lock);
1969         rc = lnet_peer_is_uptodate_locked(lp);
1970         spin_unlock(&lp->lp_lock);
1971         return rc;
1972 }
1973
1974 /*
1975  * Is a peer uptodate from the point of view of discovery?
1976  *
1977  * If it is currently being processed, obviously not.
1978  * A forced Ping or Push is also handled by the discovery thread.
1979  *
1980  * Otherwise look at whether the peer needs rediscovering.
1981  */
1982 bool
1983 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
1984 __must_hold(&lp->lp_lock)
1985 {
1986         bool rc;
1987
1988         if (lp->lp_state & (LNET_PEER_DISCOVERING |
1989                             LNET_PEER_FORCE_PING |
1990                             LNET_PEER_FORCE_PUSH)) {
1991                 rc = false;
1992         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
1993                 rc = false;
1994         } else if (lnet_peer_needs_push(lp)) {
1995                 rc = false;
1996         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
1997                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
1998                         rc = true;
1999                 else
2000                         rc = false;
2001         } else {
2002                 rc = false;
2003         }
2004
2005         return rc;
2006 }
2007
2008 /*
2009  * Queue a peer for the attention of the discovery thread.  Call with
2010  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2011  * -EALREADY if the peer was already queued.
2012  */
2013 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2014 {
2015         int rc;
2016
2017         spin_lock(&lp->lp_lock);
2018         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2019                 lp->lp_state |= LNET_PEER_DISCOVERING;
2020         spin_unlock(&lp->lp_lock);
2021         if (list_empty(&lp->lp_dc_list)) {
2022                 lnet_peer_addref_locked(lp);
2023                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2024                 wake_up(&the_lnet.ln_dc_waitq);
2025                 rc = 0;
2026         } else {
2027                 rc = -EALREADY;
2028         }
2029
2030         CDEBUG(D_NET, "Queue peer %s: %d\n",
2031                libcfs_nid2str(lp->lp_primary_nid), rc);
2032
2033         return rc;
2034 }
2035
2036 /*
2037  * Discovery of a peer is complete. Wake all waiters on the peer.
2038  * Call with lnet_net_lock/EX held.
2039  */
2040 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2041 {
2042         struct lnet_msg *msg, *tmp;
2043         int rc = 0;
2044         LIST_HEAD(pending_msgs);
2045
2046         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2047                libcfs_nid2str(lp->lp_primary_nid));
2048
2049         list_del_init(&lp->lp_dc_list);
2050         spin_lock(&lp->lp_lock);
2051         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2052         spin_unlock(&lp->lp_lock);
2053         wake_up_all(&lp->lp_dc_waitq);
2054
2055         if (lp->lp_rtr_refcount > 0)
2056                 lnet_router_discovery_complete(lp);
2057
2058         lnet_net_unlock(LNET_LOCK_EX);
2059
2060         /* iterate through all pending messages and send them again */
2061         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2062                 list_del_init(&msg->msg_list);
2063                 if (lp->lp_dc_error) {
2064                         lnet_finalize(msg, lp->lp_dc_error);
2065                         continue;
2066                 }
2067
2068                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2069                        lnet_msgtyp2str(msg->msg_type),
2070                        libcfs_id2str(msg->msg_target));
2071                 rc = lnet_send(msg->msg_src_nid_param, msg,
2072                                msg->msg_rtr_nid_param);
2073                 if (rc < 0) {
2074                         CNETERR("Error sending %s to %s: %d\n",
2075                                lnet_msgtyp2str(msg->msg_type),
2076                                libcfs_id2str(msg->msg_target), rc);
2077                         lnet_finalize(msg, rc);
2078                 }
2079         }
2080         lnet_net_lock(LNET_LOCK_EX);
2081         lnet_peer_decref_locked(lp);
2082 }
2083
2084 /*
2085  * Handle inbound push.
2086  * Like any event handler, called with lnet_res_lock/CPT held.
2087  */
2088 void lnet_peer_push_event(struct lnet_event *ev)
2089 {
2090         struct lnet_ping_buffer *pbuf;
2091         struct lnet_peer *lp;
2092
2093         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2094
2095         /* lnet_find_peer() adds a refcount */
2096         lp = lnet_find_peer(ev->source.nid);
2097         if (!lp) {
2098                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2099                        libcfs_nid2str(ev->initiator.nid),
2100                        libcfs_nid2str(ev->source.nid));
2101                 pbuf->pb_needs_post = true;
2102                 return;
2103         }
2104
2105         /* Ensure peer state remains consistent while we modify it. */
2106         spin_lock(&lp->lp_lock);
2107
2108         /*
2109          * If some kind of error happened the contents of the message
2110          * cannot be used. Clear the NIDS_UPTODATE and set the
2111          * FORCE_PING flag to trigger a ping.
2112          */
2113         if (ev->status) {
2114                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2115                 lp->lp_state |= LNET_PEER_FORCE_PING;
2116                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2117                        ev->status,
2118                        libcfs_nid2str(lp->lp_primary_nid),
2119                        libcfs_nid2str(ev->source.nid));
2120                 goto out;
2121         }
2122
2123         /*
2124          * A push with invalid or corrupted info. Clear the UPTODATE
2125          * flag to trigger a ping.
2126          */
2127         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2128                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2129                 lp->lp_state |= LNET_PEER_FORCE_PING;
2130                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2131                        libcfs_nid2str(lp->lp_primary_nid));
2132                 goto out;
2133         }
2134
2135         /*
2136          * Make sure we'll allocate the correct size ping buffer when
2137          * pinging the peer.
2138          */
2139         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2140                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2141
2142         /*
2143          * A non-Multi-Rail peer is not supposed to be capable of
2144          * sending a push.
2145          */
2146         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2147                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2148                        libcfs_nid2str(lp->lp_primary_nid));
2149                 goto out;
2150         }
2151
2152         /*
2153          * The peer may have discovery disabled at its end. Set
2154          * NO_DISCOVERY as appropriate.
2155          */
2156         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2157                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2158                        libcfs_nid2str(lp->lp_primary_nid));
2159                 /*
2160                  * Mark the peer for deletion if we already know about it
2161                  * and it's going from discovery set to no discovery set
2162                  */
2163                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2164                                       LNET_PEER_DISCOVERING)) &&
2165                      lp->lp_state & LNET_PEER_DISCOVERED) {
2166                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2167                                libcfs_nid2str(lp->lp_primary_nid),
2168                                lp->lp_state);
2169                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2170                 }
2171                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2172         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2173                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2174                        libcfs_nid2str(lp->lp_primary_nid));
2175                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2176         }
2177
2178         /*
2179          * Update the MULTI_RAIL flag based on the push. If the peer
2180          * was configured with DLC then the setting should match what
2181          * DLC put in.
2182          * NB: We verified above that the MR feature bit is set in pi_features
2183          */
2184         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2185                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2186                        libcfs_nid2str(lp->lp_primary_nid), lp);
2187         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2188                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2189                       libcfs_nid2str(lp->lp_primary_nid));
2190         } else if (lnet_peer_discovery_disabled) {
2191                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2192                        libcfs_nid2str(lp->lp_primary_nid), lp);
2193         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2194                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2195                        libcfs_nid2str(lp->lp_primary_nid), lp);
2196         } else {
2197                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2198                        libcfs_nid2str(lp->lp_primary_nid), lp);
2199                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2200                 lnet_peer_clr_non_mr_pref_nids(lp);
2201         }
2202
2203         /*
2204          * Check for truncation of the Put message. Clear the
2205          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2206          * and tell discovery to allocate a bigger buffer.
2207          */
2208         if (ev->mlength < ev->rlength) {
2209                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2210                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2211                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2212                 lp->lp_state |= LNET_PEER_FORCE_PING;
2213                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2214                        libcfs_nid2str(lp->lp_primary_nid),
2215                        pbuf->pb_info.pi_nnis);
2216                 goto out;
2217         }
2218
2219         /* always assume new data */
2220         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2221         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2222
2223         /*
2224          * If there is data present that hasn't been processed yet,
2225          * we'll replace it if the Put contained newer data and it
2226          * fits. We're racing with a Ping or earlier Push in this
2227          * case.
2228          */
2229         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2230                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2231                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2232                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2233                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2234                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2235                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2236                               libcfs_nid2str(lp->lp_primary_nid),
2237                               LNET_PING_BUFFER_SEQNO(pbuf),
2238                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2239                 }
2240                 goto out;
2241         }
2242
2243         /*
2244          * Allocate a buffer to copy the data. On a failure we drop
2245          * the Push and set FORCE_PING to force the discovery
2246          * thread to fix the problem by pinging the peer.
2247          */
2248         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2249         if (!lp->lp_data) {
2250                 lp->lp_state |= LNET_PEER_FORCE_PING;
2251                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2252                        libcfs_nid2str(lp->lp_primary_nid),
2253                        LNET_PING_BUFFER_SEQNO(pbuf));
2254                 goto out;
2255         }
2256
2257         /* Success */
2258         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2259                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2260         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2261         CDEBUG(D_NET, "Received Push %s %u\n",
2262                libcfs_nid2str(lp->lp_primary_nid),
2263                LNET_PING_BUFFER_SEQNO(pbuf));
2264
2265 out:
2266         /* We've processed this buffer. It can be reposted */
2267         pbuf->pb_needs_post = true;
2268
2269         /*
2270          * Queue the peer for discovery if not done, force it on the request
2271          * queue and wake the discovery thread if the peer was already queued,
2272          * because its status changed.
2273          */
2274         spin_unlock(&lp->lp_lock);
2275         lnet_net_lock(LNET_LOCK_EX);
2276         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2277                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2278                 wake_up(&the_lnet.ln_dc_waitq);
2279         }
2280         /* Drop refcount from lookup */
2281         lnet_peer_decref_locked(lp);
2282         lnet_net_unlock(LNET_LOCK_EX);
2283 }
2284
2285 /*
2286  * Clear the discovery error state, unless we're already discovering
2287  * this peer, in which case the error is current.
2288  */
2289 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2290 {
2291         spin_lock(&lp->lp_lock);
2292         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2293                 lp->lp_dc_error = 0;
2294         spin_unlock(&lp->lp_lock);
2295 }
2296
2297 /*
2298  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2299  * dropped/retaken within this function. An lnet_peer_ni is passed in
2300  * because discovery could tear down an lnet_peer.
2301  */
2302 int
2303 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2304 {
2305         DEFINE_WAIT(wait);
2306         struct lnet_peer *lp;
2307         int rc = 0;
2308         int count = 0;
2309
2310 again:
2311         lnet_net_unlock(cpt);
2312         lnet_net_lock(LNET_LOCK_EX);
2313         lp = lpni->lpni_peer_net->lpn_peer;
2314         lnet_peer_clear_discovery_error(lp);
2315
2316         /*
2317          * We're willing to be interrupted. The lpni can become a
2318          * zombie if we race with DLC, so we must check for that.
2319          */
2320         for (;;) {
2321                 /* Keep lp alive when the lnet_net_lock is unlocked */
2322                 lnet_peer_addref_locked(lp);
2323                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2324                 if (signal_pending(current))
2325                         break;
2326                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2327                         break;
2328                 /*
2329                  * Don't repeat discovery if discovery is disabled. This is
2330                  * done to ensure we can use discovery as a standard ping as
2331                  * well for backwards compatibility with routers which do not
2332                  * have discovery or have discovery disabled
2333                  */
2334                 if (lnet_is_discovery_disabled(lp) && count > 0)
2335                         break;
2336                 if (lp->lp_dc_error)
2337                         break;
2338                 if (lnet_peer_is_uptodate(lp))
2339                         break;
2340                 lnet_peer_queue_for_discovery(lp);
2341                 count++;
2342                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2343
2344                 /*
2345                  * If caller requested a non-blocking operation then
2346                  * return immediately. Once discovery is complete any
2347                  * pending messages that were stopped due to discovery
2348                  * will be transmitted.
2349                  */
2350                 if (!block)
2351                         break;
2352
2353                 lnet_net_unlock(LNET_LOCK_EX);
2354                 schedule();
2355                 finish_wait(&lp->lp_dc_waitq, &wait);
2356                 lnet_net_lock(LNET_LOCK_EX);
2357                 lnet_peer_decref_locked(lp);
2358                 /* Peer may have changed */
2359                 lp = lpni->lpni_peer_net->lpn_peer;
2360         }
2361         finish_wait(&lp->lp_dc_waitq, &wait);
2362
2363         lnet_net_unlock(LNET_LOCK_EX);
2364         lnet_net_lock(cpt);
2365         lnet_peer_decref_locked(lp);
2366         /*
2367          * The peer may have changed, so re-check and rediscover if that turns
2368          * out to have been the case. The reference count on lp ensured that
2369          * even if it was unlinked from lpni the memory could not be recycled.
2370          * Thus the check below is sufficient to determine whether the peer
2371          * changed. If the peer changed, then lp must not be dereferenced.
2372          */
2373         if (lp != lpni->lpni_peer_net->lpn_peer)
2374                 goto again;
2375
2376         if (signal_pending(current))
2377                 rc = -EINTR;
2378         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2379                 rc = -ESHUTDOWN;
2380         else if (lp->lp_dc_error)
2381                 rc = lp->lp_dc_error;
2382         else if (!block)
2383                 CDEBUG(D_NET, "non-blocking discovery\n");
2384         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2385                 goto again;
2386
2387         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2388                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2389                libcfs_nid2str(lpni->lpni_nid), rc,
2390                (!block) ? "pending discovery" : "discovery complete");
2391
2392         return rc;
2393 }
2394
2395 /* Handle an incoming ack for a push. */
2396 static void
2397 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2398 {
2399         struct lnet_ping_buffer *pbuf;
2400
2401         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2402         spin_lock(&lp->lp_lock);
2403         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2404         lp->lp_push_error = ev->status;
2405         if (ev->status)
2406                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2407         else
2408                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2409         spin_unlock(&lp->lp_lock);
2410
2411         CDEBUG(D_NET, "peer %s ev->status %d\n",
2412                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2413 }
2414
2415 /* Handle a Reply message. This is the reply to a Ping message. */
2416 static void
2417 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2418 {
2419         struct lnet_ping_buffer *pbuf;
2420         int rc;
2421
2422         spin_lock(&lp->lp_lock);
2423
2424         lp->lp_disc_src_nid = ev->target.nid;
2425
2426         /*
2427          * If some kind of error happened the contents of message
2428          * cannot be used. Set PING_FAILED to trigger a retry.
2429          */
2430         if (ev->status) {
2431                 lp->lp_state |= LNET_PEER_PING_FAILED;
2432                 lp->lp_ping_error = ev->status;
2433                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2434                        ev->status,
2435                        libcfs_nid2str(lp->lp_primary_nid),
2436                        libcfs_nid2str(ev->source.nid));
2437                 goto out;
2438         }
2439
2440         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2441         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2442                 lnet_swap_pinginfo(pbuf);
2443
2444         /*
2445          * A reply with invalid or corrupted info. Set PING_FAILED to
2446          * trigger a retry.
2447          */
2448         rc = lnet_ping_info_validate(&pbuf->pb_info);
2449         if (rc) {
2450                 lp->lp_state |= LNET_PEER_PING_FAILED;
2451                 lp->lp_ping_error = 0;
2452                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2453                        libcfs_nid2str(lp->lp_primary_nid), rc);
2454                 goto out;
2455         }
2456
2457
2458         /*
2459          * The peer may have discovery disabled at its end. Set
2460          * NO_DISCOVERY as appropriate.
2461          */
2462         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2463             !lnet_peer_discovery_disabled) {
2464                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2465                        libcfs_nid2str(lp->lp_primary_nid));
2466                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2467         } else {
2468                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2469                        libcfs_nid2str(lp->lp_primary_nid));
2470                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2471         }
2472
2473         /*
2474          * Update the MULTI_RAIL flag based on the reply. If the peer
2475          * was configured with DLC then the setting should match what
2476          * DLC put in.
2477          */
2478         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2479                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2480                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2481                                libcfs_nid2str(lp->lp_primary_nid), lp);
2482                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2483                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2484                               libcfs_nid2str(lp->lp_primary_nid));
2485                 } else if (lnet_peer_discovery_disabled) {
2486                         CDEBUG(D_NET,
2487                                "peer %s(%p) not MR: DD disabled locally\n",
2488                                libcfs_nid2str(lp->lp_primary_nid), lp);
2489                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2490                         CDEBUG(D_NET,
2491                                "peer %s(%p) not MR: DD disabled remotely\n",
2492                                libcfs_nid2str(lp->lp_primary_nid), lp);
2493                 } else {
2494                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2495                                libcfs_nid2str(lp->lp_primary_nid), lp);
2496                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2497                         lnet_peer_clr_non_mr_pref_nids(lp);
2498                 }
2499         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2500                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2501                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2502                               libcfs_nid2str(lp->lp_primary_nid));
2503                 } else {
2504                         CERROR("Multi-Rail state vanished from %s\n",
2505                                libcfs_nid2str(lp->lp_primary_nid));
2506                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2507                 }
2508         }
2509
2510         /*
2511          * Make sure we'll allocate the correct size ping buffer when
2512          * pinging the peer.
2513          */
2514         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2515                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2516
2517         /*
2518          * Check for truncation of the Reply. Clear PING_SENT and set
2519          * PING_FAILED to trigger a retry.
2520          */
2521         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2522                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2523                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2524                 lp->lp_state |= LNET_PEER_PING_FAILED;
2525                 lp->lp_ping_error = 0;
2526                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2527                        libcfs_nid2str(lp->lp_primary_nid),
2528                        pbuf->pb_info.pi_nnis);
2529                 goto out;
2530         }
2531
2532         /*
2533          * Check the sequence numbers in the reply. These are only
2534          * available if the reply came from a Multi-Rail peer.
2535          */
2536         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2537             pbuf->pb_info.pi_nnis > 1 &&
2538             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2539                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2540                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2541                                 libcfs_nid2str(lp->lp_primary_nid),
2542                                 LNET_PING_BUFFER_SEQNO(pbuf),
2543                                 lp->lp_peer_seqno);
2544
2545                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2546         }
2547
2548         /* We're happy with the state of the data in the buffer. */
2549         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2550                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2551         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2552                 lnet_ping_buffer_decref(lp->lp_data);
2553         else
2554                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2555         lnet_ping_buffer_addref(pbuf);
2556         lp->lp_data = pbuf;
2557 out:
2558         lp->lp_state &= ~LNET_PEER_PING_SENT;
2559         spin_unlock(&lp->lp_lock);
2560
2561         lnet_net_lock(LNET_LOCK_EX);
2562         /*
2563          * If this peer is a gateway, call the routing callback to
2564          * handle the ping reply
2565          */
2566         if (lp->lp_rtr_refcount > 0)
2567                 lnet_router_discovery_ping_reply(lp);
2568         lnet_net_unlock(LNET_LOCK_EX);
2569 }
2570
2571 /*
2572  * Send event handling. Only matters for error cases, where we clean
2573  * up state on the peer and peer_ni that would otherwise be updated in
2574  * the REPLY event handler for a successful Ping, and the ACK event
2575  * handler for a successful Push.
2576  */
2577 static int
2578 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2579 {
2580         int rc = 0;
2581
2582         if (!ev->status)
2583                 goto out;
2584
2585         spin_lock(&lp->lp_lock);
2586         if (ev->msg_type == LNET_MSG_GET) {
2587                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2588                 lp->lp_state |= LNET_PEER_PING_FAILED;
2589                 lp->lp_ping_error = ev->status;
2590         } else { /* ev->msg_type == LNET_MSG_PUT */
2591                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2592                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2593                 lp->lp_push_error = ev->status;
2594         }
2595         spin_unlock(&lp->lp_lock);
2596         rc = LNET_REDISCOVER_PEER;
2597 out:
2598         CDEBUG(D_NET, "%s Send to %s: %d\n",
2599                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2600                 libcfs_nid2str(ev->target.nid), rc);
2601         return rc;
2602 }
2603
2604 /*
2605  * Unlink event handling. This event is only seen if a call to
2606  * LNetMDUnlink() caused the event to be unlinked. If this call was
2607  * made after the event was set up in LNetGet() or LNetPut() then we
2608  * assume the Ping or Push timed out.
2609  */
2610 static void
2611 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2612 {
2613         spin_lock(&lp->lp_lock);
2614         /* We've passed through LNetGet() */
2615         if (lp->lp_state & LNET_PEER_PING_SENT) {
2616                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2617                 lp->lp_state |= LNET_PEER_PING_FAILED;
2618                 lp->lp_ping_error = -ETIMEDOUT;
2619                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2620                         libcfs_nid2str(lp->lp_primary_nid));
2621         }
2622         /* We've passed through LNetPut() */
2623         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2624                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2625                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2626                 lp->lp_push_error = -ETIMEDOUT;
2627                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2628                         libcfs_nid2str(lp->lp_primary_nid));
2629         }
2630         spin_unlock(&lp->lp_lock);
2631 }
2632
2633 /*
2634  * Event handler for the discovery EQ.
2635  *
2636  * Called with lnet_res_lock(cpt) held. The cpt is the
2637  * lnet_cpt_of_cookie() of the md handle cookie.
2638  */
2639 static void lnet_discovery_event_handler(struct lnet_event *event)
2640 {
2641         struct lnet_peer *lp = event->md_user_ptr;
2642         struct lnet_ping_buffer *pbuf;
2643         int rc;
2644
2645         /* discovery needs to take another look */
2646         rc = LNET_REDISCOVER_PEER;
2647
2648         CDEBUG(D_NET, "Received event: %d\n", event->type);
2649
2650         switch (event->type) {
2651         case LNET_EVENT_ACK:
2652                 lnet_discovery_event_ack(lp, event);
2653                 break;
2654         case LNET_EVENT_REPLY:
2655                 lnet_discovery_event_reply(lp, event);
2656                 break;
2657         case LNET_EVENT_SEND:
2658                 /* Only send failure triggers a retry. */
2659                 rc = lnet_discovery_event_send(lp, event);
2660                 break;
2661         case LNET_EVENT_UNLINK:
2662                 /* LNetMDUnlink() was called */
2663                 lnet_discovery_event_unlink(lp, event);
2664                 break;
2665         default:
2666                 /* Invalid events. */
2667                 LBUG();
2668         }
2669         lnet_net_lock(LNET_LOCK_EX);
2670         if (event->unlinked) {
2671                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2672                 lnet_ping_buffer_decref(pbuf);
2673                 lnet_peer_decref_locked(lp);
2674         }
2675
2676         /* put peer back at end of request queue, if discovery not already
2677          * done */
2678         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2679                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2680                 wake_up(&the_lnet.ln_dc_waitq);
2681         }
2682         lnet_net_unlock(LNET_LOCK_EX);
2683 }
2684
2685 /*
2686  * Build a peer from incoming data.
2687  *
2688  * The NIDs in the incoming data are supposed to be structured as follows:
2689  *  - loopback
2690  *  - primary NID
2691  *  - other NIDs in same net
2692  *  - NIDs in second net
2693  *  - NIDs in third net
2694  *  - ...
2695  * This due to the way the list of NIDs in the data is created.
2696  *
2697  * Note that this function will mark the peer uptodate unless an
2698  * ENOMEM is encontered. All other errors are due to a conflict
2699  * between the DLC configuration and what discovery sees. We treat DLC
2700  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2701  * peer from becoming stuck in discovery.
2702  */
2703 static int lnet_peer_merge_data(struct lnet_peer *lp,
2704                                 struct lnet_ping_buffer *pbuf)
2705 {
2706         struct lnet_peer_ni *lpni;
2707         lnet_nid_t *curnis = NULL;
2708         struct lnet_ni_status *addnis = NULL;
2709         lnet_nid_t *delnis = NULL;
2710         unsigned flags;
2711         int ncurnis;
2712         int naddnis;
2713         int ndelnis;
2714         int nnis = 0;
2715         int i;
2716         int j;
2717         int rc;
2718
2719         flags = LNET_PEER_DISCOVERED;
2720         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2721                 flags |= LNET_PEER_MULTI_RAIL;
2722
2723         /*
2724          * Cache the routing feature for the peer; whether it is enabled
2725          * for disabled as reported by the remote peer.
2726          */
2727         spin_lock(&lp->lp_lock);
2728         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2729                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2730         else
2731                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2732         spin_unlock(&lp->lp_lock);
2733
2734         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2735         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2736         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2737         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2738         if (!curnis || !addnis || !delnis) {
2739                 rc = -ENOMEM;
2740                 goto out;
2741         }
2742         ncurnis = 0;
2743         naddnis = 0;
2744         ndelnis = 0;
2745
2746         /* Construct the list of NIDs present in peer. */
2747         lpni = NULL;
2748         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2749                 curnis[ncurnis++] = lpni->lpni_nid;
2750
2751         /*
2752          * Check for NIDs in pbuf not present in curnis[].
2753          * The loop starts at 1 to skip the loopback NID.
2754          */
2755         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2756                 for (j = 0; j < ncurnis; j++)
2757                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2758                                 break;
2759                 if (j == ncurnis)
2760                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2761         }
2762         /*
2763          * Check for NIDs in curnis[] not present in pbuf.
2764          * The nested loop starts at 1 to skip the loopback NID.
2765          *
2766          * But never add the loopback NID to delnis[]: if it is
2767          * present in curnis[] then this peer is for this node.
2768          */
2769         for (i = 0; i < ncurnis; i++) {
2770                 if (curnis[i] == LNET_NID_LO_0)
2771                         continue;
2772                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2773                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2774                                 /*
2775                                  * update the information we cache for the
2776                                  * peer with the latest information we
2777                                  * received
2778                                  */
2779                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
2780                                 if (lpni) {
2781                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2782                                         lnet_peer_ni_decref_locked(lpni);
2783                                 }
2784                                 break;
2785                         }
2786                 }
2787                 if (j == pbuf->pb_info.pi_nnis)
2788                         delnis[ndelnis++] = curnis[i];
2789         }
2790
2791         /*
2792          * If we get here and the discovery is disabled then we don't want
2793          * to add or delete any NIs. We just updated the ones we have some
2794          * information on, and call it a day
2795          */
2796         rc = 0;
2797         if (lnet_is_discovery_disabled(lp))
2798                 goto out;
2799
2800         for (i = 0; i < naddnis; i++) {
2801                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2802                 if (rc) {
2803                         CERROR("Error adding NID %s to peer %s: %d\n",
2804                                libcfs_nid2str(addnis[i].ns_nid),
2805                                libcfs_nid2str(lp->lp_primary_nid), rc);
2806                         if (rc == -ENOMEM)
2807                                 goto out;
2808                 }
2809                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2810                 if (lpni) {
2811                         lpni->lpni_ns_status = addnis[i].ns_status;
2812                         lnet_peer_ni_decref_locked(lpni);
2813                 }
2814         }
2815
2816         for (i = 0; i < ndelnis; i++) {
2817                 /*
2818                  * for routers it's okay to delete the primary_nid because
2819                  * the upper layers don't really rely on it. So if we're
2820                  * being told that the router changed its primary_nid
2821                  * then it's okay to delete it.
2822                  */
2823                 if (lp->lp_rtr_refcount > 0)
2824                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2825                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2826                 if (rc) {
2827                         CERROR("Error deleting NID %s from peer %s: %d\n",
2828                                libcfs_nid2str(delnis[i]),
2829                                libcfs_nid2str(lp->lp_primary_nid), rc);
2830                         if (rc == -ENOMEM)
2831                                 goto out;
2832                 }
2833         }
2834         /*
2835          * Errors other than -ENOMEM are due to peers having been
2836          * configured with DLC. Ignore these because DLC overrides
2837          * Discovery.
2838          */
2839         rc = 0;
2840 out:
2841         CFS_FREE_PTR_ARRAY(curnis, nnis);
2842         CFS_FREE_PTR_ARRAY(addnis, nnis);
2843         CFS_FREE_PTR_ARRAY(delnis, nnis);
2844         lnet_ping_buffer_decref(pbuf);
2845         CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2846
2847         if (rc) {
2848                 spin_lock(&lp->lp_lock);
2849                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2850                 lp->lp_state |= LNET_PEER_FORCE_PING;
2851                 spin_unlock(&lp->lp_lock);
2852         }
2853         return rc;
2854 }
2855
2856 /*
2857  * The data in pbuf says lp is its primary peer, but the data was
2858  * received by a different peer. Try to update lp with the data.
2859  */
2860 static int
2861 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2862 {
2863         struct lnet_handle_md mdh;
2864
2865         /* Queue lp for discovery, and force it on the request queue. */
2866         lnet_net_lock(LNET_LOCK_EX);
2867         if (lnet_peer_queue_for_discovery(lp))
2868                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2869         lnet_net_unlock(LNET_LOCK_EX);
2870
2871         LNetInvalidateMDHandle(&mdh);
2872
2873         /*
2874          * Decide whether we can move the peer to the DATA_PRESENT state.
2875          *
2876          * We replace stale data for a multi-rail peer, repair PING_FAILED
2877          * status, and preempt FORCE_PING.
2878          *
2879          * If after that we have DATA_PRESENT, we merge it into this peer.
2880          */
2881         spin_lock(&lp->lp_lock);
2882         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2883                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2884                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2885                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2886                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2887                         lnet_ping_buffer_decref(pbuf);
2888                         pbuf = lp->lp_data;
2889                         lp->lp_data = NULL;
2890                 }
2891         }
2892         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2893                 lnet_ping_buffer_decref(lp->lp_data);
2894                 lp->lp_data = NULL;
2895                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2896         }
2897         if (lp->lp_state & LNET_PEER_PING_FAILED) {
2898                 mdh = lp->lp_ping_mdh;
2899                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2900                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2901                 lp->lp_ping_error = 0;
2902         }
2903         if (lp->lp_state & LNET_PEER_FORCE_PING)
2904                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2905         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2906         spin_unlock(&lp->lp_lock);
2907
2908         if (!LNetMDHandleIsInvalid(mdh))
2909                 LNetMDUnlink(mdh);
2910
2911         if (pbuf)
2912                 return lnet_peer_merge_data(lp, pbuf);
2913
2914         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2915         return 0;
2916 }
2917
2918 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
2919 {
2920         int i;
2921
2922         for (i = 0; i < pinfo->pi_nnis; i++) {
2923                 if (pinfo->pi_ni[i].ns_nid == nid)
2924                         return true;
2925         }
2926
2927         return false;
2928 }
2929
2930 /*
2931  * Update a peer using the data received.
2932  */
2933 static int lnet_peer_data_present(struct lnet_peer *lp)
2934 __must_hold(&lp->lp_lock)
2935 {
2936         struct lnet_ping_buffer *pbuf;
2937         struct lnet_peer_ni *lpni;
2938         lnet_nid_t nid = LNET_NID_ANY;
2939         unsigned flags;
2940         int rc = 0;
2941
2942         pbuf = lp->lp_data;
2943         lp->lp_data = NULL;
2944         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2945         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2946         spin_unlock(&lp->lp_lock);
2947
2948         /*
2949          * Modifications of peer structures are done while holding the
2950          * ln_api_mutex. A global lock is required because we may be
2951          * modifying multiple peer structures, and a mutex greatly
2952          * simplifies memory management.
2953          *
2954          * The actual changes to the data structures must also protect
2955          * against concurrent lookups, for which the lnet_net_lock in
2956          * LNET_LOCK_EX mode is used.
2957          */
2958         mutex_lock(&the_lnet.ln_api_mutex);
2959         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2960                 rc = -ESHUTDOWN;
2961                 goto out;
2962         }
2963
2964         /*
2965          * If this peer is not on the peer list then it is being torn
2966          * down, and our reference count may be all that is keeping it
2967          * alive. Don't do any work on it.
2968          */
2969         if (list_empty(&lp->lp_peer_list))
2970                 goto out;
2971
2972         flags = LNET_PEER_DISCOVERED;
2973         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2974                 flags |= LNET_PEER_MULTI_RAIL;
2975
2976         /*
2977          * Check whether the primary NID in the message matches the
2978          * primary NID of the peer. If it does, update the peer, if
2979          * it it does not, check whether there is already a peer with
2980          * that primary NID. If no such peer exists, try to update
2981          * the primary NID of the current peer (allowed if it was
2982          * created due to message traffic) and complete the update.
2983          * If the peer did exist, hand off the data to it.
2984          *
2985          * The peer for the loopback interface is a special case: this
2986          * is the peer for the local node, and we want to set its
2987          * primary NID to the correct value here. Moreover, this peer
2988          * can show up with only the loopback NID in the ping buffer.
2989          */
2990         if (pbuf->pb_info.pi_nnis <= 1)
2991                 goto out;
2992         nid = pbuf->pb_info.pi_ni[1].ns_nid;
2993         if (lp->lp_primary_nid == LNET_NID_LO_0) {
2994                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2995                 if (!rc)
2996                         rc = lnet_peer_merge_data(lp, pbuf);
2997         /*
2998          * if the primary nid of the peer is present in the ping info returned
2999          * from the peer, but it's not the local primary peer we have
3000          * cached and discovery is disabled, then we don't want to update
3001          * our local peer info, by adding or removing NIDs, we just want
3002          * to update the status of the nids that we currently have
3003          * recorded in that peer.
3004          */
3005         } else if (lp->lp_primary_nid == nid ||
3006                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3007                     lnet_is_discovery_disabled(lp))) {
3008                 rc = lnet_peer_merge_data(lp, pbuf);
3009         } else {
3010                 lpni = lnet_find_peer_ni_locked(nid);
3011                 if (!lpni) {
3012                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3013                         if (rc) {
3014                                 CERROR("Primary NID error %s versus %s: %d\n",
3015                                        libcfs_nid2str(lp->lp_primary_nid),
3016                                        libcfs_nid2str(nid), rc);
3017                         } else {
3018                                 rc = lnet_peer_merge_data(lp, pbuf);
3019                         }
3020                 } else {
3021                         struct lnet_peer *new_lp;
3022                         new_lp = lpni->lpni_peer_net->lpn_peer;
3023                         /*
3024                          * if lp has discovery/MR enabled that means new_lp
3025                          * should have discovery/MR enabled as well, since
3026                          * it's the same peer, which we're about to merge
3027                          */
3028                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3029                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3030                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3031                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3032
3033                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3034                         lnet_consolidate_routes_locked(lp, new_lp);
3035                         lnet_peer_ni_decref_locked(lpni);
3036                 }
3037         }
3038 out:
3039         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3040                lp->lp_state);
3041         mutex_unlock(&the_lnet.ln_api_mutex);
3042
3043         spin_lock(&lp->lp_lock);
3044         /* Tell discovery to re-check the peer immediately. */
3045         if (!rc)
3046                 rc = LNET_REDISCOVER_PEER;
3047         return rc;
3048 }
3049
3050 /*
3051  * A ping failed. Clear the PING_FAILED state and set the
3052  * FORCE_PING state, to ensure a retry even if discovery is
3053  * disabled. This avoids being left with incorrect state.
3054  */
3055 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3056 __must_hold(&lp->lp_lock)
3057 {
3058         struct lnet_handle_md mdh;
3059         int rc;
3060
3061         mdh = lp->lp_ping_mdh;
3062         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3063         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3064         lp->lp_state |= LNET_PEER_FORCE_PING;
3065         rc = lp->lp_ping_error;
3066         lp->lp_ping_error = 0;
3067         spin_unlock(&lp->lp_lock);
3068
3069         if (!LNetMDHandleIsInvalid(mdh))
3070                 LNetMDUnlink(mdh);
3071
3072         CDEBUG(D_NET, "peer %s:%d\n",
3073                libcfs_nid2str(lp->lp_primary_nid), rc);
3074
3075         spin_lock(&lp->lp_lock);
3076         return rc ? rc : LNET_REDISCOVER_PEER;
3077 }
3078
3079 /*
3080  * Select NID to send a Ping or Push to.
3081  */
3082 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
3083 {
3084         struct lnet_peer_ni *lpni;
3085
3086         /* Look for a direct-connected NID for this peer. */
3087         lpni = NULL;
3088         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3089                 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
3090                         continue;
3091                 break;
3092         }
3093         if (lpni)
3094                 return lpni->lpni_nid;
3095
3096         /* Look for a routed-connected NID for this peer. */
3097         lpni = NULL;
3098         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3099                 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
3100                         continue;
3101                 break;
3102         }
3103         if (lpni)
3104                 return lpni->lpni_nid;
3105
3106         return LNET_NID_ANY;
3107 }
3108
3109 /* Active side of ping. */
3110 static int lnet_peer_send_ping(struct lnet_peer *lp)
3111 __must_hold(&lp->lp_lock)
3112 {
3113         lnet_nid_t pnid;
3114         int nnis;
3115         int rc;
3116         int cpt;
3117
3118         lp->lp_state |= LNET_PEER_PING_SENT;
3119         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3120         spin_unlock(&lp->lp_lock);
3121
3122         cpt = lnet_net_lock_current();
3123         /* Refcount for MD. */
3124         lnet_peer_addref_locked(lp);
3125         pnid = lnet_peer_select_nid(lp);
3126         lnet_net_unlock(cpt);
3127
3128         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3129
3130         rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
3131                             the_lnet.ln_dc_handler, false);
3132
3133         /*
3134          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3135          * refcount on the peer, otherwise LNetMDUnlink will be called
3136          * which will eventually do that.
3137          */
3138         if (rc > 0) {
3139                 lnet_net_lock(cpt);
3140                 lnet_peer_decref_locked(lp);
3141                 lnet_net_unlock(cpt);
3142                 rc = -rc; /* change the rc to negative value */
3143                 goto fail_error;
3144         } else if (rc < 0) {
3145                 goto fail_error;
3146         }
3147
3148         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3149
3150         spin_lock(&lp->lp_lock);
3151         return 0;
3152
3153 fail_error:
3154         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3155         /*
3156          * The errors that get us here are considered hard errors and
3157          * cause Discovery to terminate. So we clear PING_SENT, but do
3158          * not set either PING_FAILED or FORCE_PING. In fact we need
3159          * to clear PING_FAILED, because the unlink event handler will
3160          * have set it if we called LNetMDUnlink() above.
3161          */
3162         spin_lock(&lp->lp_lock);
3163         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3164         return rc;
3165 }
3166
3167 /*
3168  * This function exists because you cannot call LNetMDUnlink() from an
3169  * event handler.
3170  */
3171 static int lnet_peer_push_failed(struct lnet_peer *lp)
3172 __must_hold(&lp->lp_lock)
3173 {
3174         struct lnet_handle_md mdh;
3175         int rc;
3176
3177         mdh = lp->lp_push_mdh;
3178         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3179         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3180         rc = lp->lp_push_error;
3181         lp->lp_push_error = 0;
3182         spin_unlock(&lp->lp_lock);
3183
3184         if (!LNetMDHandleIsInvalid(mdh))
3185                 LNetMDUnlink(mdh);
3186
3187         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3188         spin_lock(&lp->lp_lock);
3189         return rc ? rc : LNET_REDISCOVER_PEER;
3190 }
3191
3192 /*
3193  * Mark the peer as discovered.
3194  */
3195 static int lnet_peer_discovered(struct lnet_peer *lp)
3196 __must_hold(&lp->lp_lock)
3197 {
3198         lp->lp_state |= LNET_PEER_DISCOVERED;
3199         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3200                           LNET_PEER_REDISCOVER);
3201
3202         lp->lp_dc_error = 0;
3203
3204         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3205
3206         return 0;
3207 }
3208
3209 /* Active side of push. */
3210 static int lnet_peer_send_push(struct lnet_peer *lp)
3211 __must_hold(&lp->lp_lock)
3212 {
3213         struct lnet_ping_buffer *pbuf;
3214         struct lnet_process_id id;
3215         struct lnet_md md;
3216         int cpt;
3217         int rc;
3218
3219         /* Don't push to a non-multi-rail peer. */
3220         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3221                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3222                 /* if peer's NIDs are uptodate then peer is discovered */
3223                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3224                         rc = lnet_peer_discovered(lp);
3225                         return rc;
3226                 }
3227
3228                 return 0;
3229         }
3230
3231         lp->lp_state |= LNET_PEER_PUSH_SENT;
3232         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3233         spin_unlock(&lp->lp_lock);
3234
3235         cpt = lnet_net_lock_current();
3236         pbuf = the_lnet.ln_ping_target;
3237         lnet_ping_buffer_addref(pbuf);
3238         lnet_net_unlock(cpt);
3239
3240         /* Push source MD */
3241         md.start     = &pbuf->pb_info;
3242         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3243         md.threshold = 2; /* Put/Ack */
3244         md.max_size  = 0;
3245         md.options   = LNET_MD_TRACK_RESPONSE;
3246         md.handler   = the_lnet.ln_dc_handler;
3247         md.user_ptr  = lp;
3248
3249         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3250         if (rc) {
3251                 lnet_ping_buffer_decref(pbuf);
3252                 CERROR("Can't bind push source MD: %d\n", rc);
3253                 goto fail_error;
3254         }
3255         cpt = lnet_net_lock_current();
3256         /* Refcount for MD. */
3257         lnet_peer_addref_locked(lp);
3258         id.pid = LNET_PID_LUSTRE;
3259         id.nid = lnet_peer_select_nid(lp);
3260         lnet_net_unlock(cpt);
3261
3262         if (id.nid == LNET_NID_ANY) {
3263                 rc = -EHOSTUNREACH;
3264                 goto fail_unlink;
3265         }
3266
3267         rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3268                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3269                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3270
3271         /*
3272          * reset the discovery nid. There is no need to restrict sending
3273          * from that source, if we call lnet_push_update_to_peers(). It'll
3274          * get set to a specific NID, if we initiate discovery from the
3275          * scratch
3276          */
3277         lp->lp_disc_src_nid = LNET_NID_ANY;
3278
3279         if (rc)
3280                 goto fail_unlink;
3281
3282         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3283
3284         spin_lock(&lp->lp_lock);
3285         return 0;
3286
3287 fail_unlink:
3288         LNetMDUnlink(lp->lp_push_mdh);
3289         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3290 fail_error:
3291         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3292         /*
3293          * The errors that get us here are considered hard errors and
3294          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3295          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3296          * because the unlink event handler will have set it if we
3297          * called LNetMDUnlink() above.
3298          */
3299         spin_lock(&lp->lp_lock);
3300         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3301         return rc;
3302 }
3303
3304 /*
3305  * An unrecoverable error was encountered during discovery.
3306  * Set error status in peer and abort discovery.
3307  */
3308 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3309 {
3310         CDEBUG(D_NET, "Discovery error %s: %d\n",
3311                libcfs_nid2str(lp->lp_primary_nid), error);
3312
3313         spin_lock(&lp->lp_lock);
3314         lp->lp_dc_error = error;
3315         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3316         lp->lp_state |= LNET_PEER_REDISCOVER;
3317         spin_unlock(&lp->lp_lock);
3318 }
3319
3320 /*
3321  * Discovering this peer is taking too long. Cancel any Ping or Push
3322  * that discovery is waiting on by unlinking the relevant MDs. The
3323  * lnet_discovery_event_handler() will proceed from here and complete
3324  * the cleanup.
3325  */
3326 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3327 {
3328         struct lnet_handle_md ping_mdh;
3329         struct lnet_handle_md push_mdh;
3330
3331         LNetInvalidateMDHandle(&ping_mdh);
3332         LNetInvalidateMDHandle(&push_mdh);
3333
3334         spin_lock(&lp->lp_lock);
3335         if (lp->lp_state & LNET_PEER_PING_SENT) {
3336                 ping_mdh = lp->lp_ping_mdh;
3337                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3338         }
3339         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3340                 push_mdh = lp->lp_push_mdh;
3341                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3342         }
3343         spin_unlock(&lp->lp_lock);
3344
3345         if (!LNetMDHandleIsInvalid(ping_mdh))
3346                 LNetMDUnlink(ping_mdh);
3347         if (!LNetMDHandleIsInvalid(push_mdh))
3348                 LNetMDUnlink(push_mdh);
3349 }
3350
3351 /*
3352  * Wait for work to be queued or some other change that must be
3353  * attended to. Returns non-zero if the discovery thread should shut
3354  * down.
3355  */
3356 static int lnet_peer_discovery_wait_for_work(void)
3357 {
3358         int cpt;
3359         int rc = 0;
3360
3361         DEFINE_WAIT(wait);
3362
3363         cpt = lnet_net_lock_current();
3364         for (;;) {
3365                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3366                                 TASK_INTERRUPTIBLE);
3367                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3368                         break;
3369                 if (lnet_push_target_resize_needed() ||
3370                     the_lnet.ln_push_target->pb_needs_post)
3371                         break;
3372                 if (!list_empty(&the_lnet.ln_dc_request))
3373                         break;
3374                 if (!list_empty(&the_lnet.ln_msg_resend))
3375                         break;
3376                 lnet_net_unlock(cpt);
3377
3378                 /*
3379                  * wakeup max every second to check if there are peers that
3380                  * have been stuck on the working queue for greater than
3381                  * the peer timeout.
3382                  */
3383                 schedule_timeout(cfs_time_seconds(1));
3384                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3385                 cpt = lnet_net_lock_current();
3386         }
3387         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3388
3389         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3390                 rc = -ESHUTDOWN;
3391
3392         lnet_net_unlock(cpt);
3393
3394         CDEBUG(D_NET, "woken: %d\n", rc);
3395
3396         return rc;
3397 }
3398
3399 /*
3400  * Messages that were pending on a destroyed peer will be put on a global
3401  * resend list. The message resend list will be checked by
3402  * the discovery thread when it wakes up, and will resend messages. These
3403  * messages can still be sendable in the case the lpni which was the initial
3404  * cause of the message re-queue was transfered to another peer.
3405  *
3406  * It is possible that LNet could be shutdown while we're iterating
3407  * through the list. lnet_shudown_lndnets() will attempt to access the
3408  * resend list, but will have to wait until the spinlock is released, by
3409  * which time there shouldn't be any more messages on the resend list.
3410  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3411  * for the messages so they can be released. The other case is that
3412  * lnet_shudown_lndnets() can finalize all the messages before this
3413  * function can visit the resend list, in which case this function will be
3414  * a no-op.
3415  */
3416 static void lnet_resend_msgs(void)
3417 {
3418         struct lnet_msg *msg, *tmp;
3419         LIST_HEAD(resend);
3420         int rc;
3421
3422         spin_lock(&the_lnet.ln_msg_resend_lock);
3423         list_splice(&the_lnet.ln_msg_resend, &resend);
3424         spin_unlock(&the_lnet.ln_msg_resend_lock);
3425
3426         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3427                 list_del_init(&msg->msg_list);
3428                 rc = lnet_send(msg->msg_src_nid_param, msg,
3429                                msg->msg_rtr_nid_param);
3430                 if (rc < 0) {
3431                         CNETERR("Error sending %s to %s: %d\n",
3432                                lnet_msgtyp2str(msg->msg_type),
3433                                libcfs_id2str(msg->msg_target), rc);
3434                         lnet_finalize(msg, rc);
3435                 }
3436         }
3437 }
3438
3439 /* The discovery thread. */
3440 static int lnet_peer_discovery(void *arg)
3441 {
3442         struct lnet_peer *lp;
3443         int rc;
3444
3445         wait_for_completion(&the_lnet.ln_started);
3446
3447         CDEBUG(D_NET, "started\n");
3448
3449         for (;;) {
3450                 if (lnet_peer_discovery_wait_for_work())
3451                         break;
3452
3453                 if (lnet_push_target_resize_needed())
3454                         lnet_push_target_resize();
3455                 else if (the_lnet.ln_push_target->pb_needs_post)
3456                         lnet_push_target_post(the_lnet.ln_push_target,
3457                                               &the_lnet.ln_push_target_md);
3458
3459                 lnet_resend_msgs();
3460
3461                 lnet_net_lock(LNET_LOCK_EX);
3462                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3463                         lnet_net_unlock(LNET_LOCK_EX);
3464                         break;
3465                 }
3466
3467                 /*
3468                  * Process all incoming discovery work requests.  When
3469                  * discovery must wait on a peer to change state, it
3470                  * is added to the tail of the ln_dc_working queue. A
3471                  * timestamp keeps track of when the peer was added,
3472                  * so we can time out discovery requests that take too
3473                  * long.
3474                  */
3475                 while (!list_empty(&the_lnet.ln_dc_request)) {
3476                         lp = list_first_entry(&the_lnet.ln_dc_request,
3477                                               struct lnet_peer, lp_dc_list);
3478                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3479                         /*
3480                          * set the time the peer was put on the dc_working
3481                          * queue. It shouldn't remain on the queue
3482                          * forever, in case the GET message (for ping)
3483                          * doesn't get a REPLY or the PUT message (for
3484                          * push) doesn't get an ACK.
3485                          */
3486                         lp->lp_last_queued = ktime_get_real_seconds();
3487                         lnet_net_unlock(LNET_LOCK_EX);
3488
3489                         if (lnet_push_target_resize_needed())
3490                                 lnet_push_target_resize();
3491                         else if (the_lnet.ln_push_target->pb_needs_post)
3492                                 lnet_push_target_post(the_lnet.ln_push_target,
3493                                                       &the_lnet.ln_push_target_md);
3494
3495                         /*
3496                          * Select an action depending on the state of
3497                          * the peer and whether discovery is disabled.
3498                          * The check whether discovery is disabled is
3499                          * done after the code that handles processing
3500                          * for arrived data, cleanup for failures, and
3501                          * forcing a Ping or Push.
3502                          */
3503                         spin_lock(&lp->lp_lock);
3504                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3505                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3506                                 lp->lp_state);
3507                         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3508                                 rc = lnet_peer_data_present(lp);
3509                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3510                                 rc = lnet_peer_ping_failed(lp);
3511                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3512                                 rc = lnet_peer_push_failed(lp);
3513                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3514                                 rc = lnet_peer_send_ping(lp);
3515                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3516                                 rc = lnet_peer_send_push(lp);
3517                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3518                                 rc = lnet_peer_send_ping(lp);
3519                         else if (lnet_peer_needs_push(lp))
3520                                 rc = lnet_peer_send_push(lp);
3521                         else
3522                                 rc = lnet_peer_discovered(lp);
3523                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3524                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3525                                 lp->lp_state, rc);
3526                         spin_unlock(&lp->lp_lock);
3527
3528                         lnet_net_lock(LNET_LOCK_EX);
3529                         if (rc == LNET_REDISCOVER_PEER) {
3530                                 list_move(&lp->lp_dc_list,
3531                                           &the_lnet.ln_dc_request);
3532                         } else if (rc) {
3533                                 lnet_peer_discovery_error(lp, rc);
3534                         }
3535                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3536                                 lnet_peer_discovery_complete(lp);
3537                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3538                                 break;
3539
3540                         if (lp->lp_state & LNET_PEER_MARK_DELETION) {
3541                                 struct list_head rlist;
3542                                 struct lnet_route *route, *tmp;
3543                                 int sensitivity = lp->lp_health_sensitivity;
3544
3545                                 INIT_LIST_HEAD(&rlist);
3546
3547                                 /*
3548                                  * remove the peer from the discovery work
3549                                  * queue if it's on there in preparation
3550                                  * of deleting it.
3551                                  */
3552                                 if (!list_empty(&lp->lp_dc_list))
3553                                         list_del(&lp->lp_dc_list);
3554
3555                                 lnet_net_unlock(LNET_LOCK_EX);
3556
3557                                 mutex_lock(&the_lnet.ln_api_mutex);
3558
3559                                 lnet_net_lock(LNET_LOCK_EX);
3560                                 list_for_each_entry_safe(route, tmp,
3561                                                          &lp->lp_routes,
3562                                                          lr_gwlist)
3563                                         lnet_move_route(route, NULL, &rlist);
3564                                 lnet_net_unlock(LNET_LOCK_EX);
3565
3566                                 /* delete the peer */
3567                                 lnet_peer_del(lp);
3568
3569                                 list_for_each_entry_safe(route, tmp,
3570                                                          &rlist, lr_list) {
3571                                         /* re-add these routes */
3572                                         lnet_add_route(route->lr_net,
3573                                                        route->lr_hops,
3574                                                        route->lr_nid,
3575                                                        route->lr_priority,
3576                                                        sensitivity);
3577                                         LIBCFS_FREE(route, sizeof(*route));
3578                                 }
3579                                 mutex_unlock(&the_lnet.ln_api_mutex);
3580
3581                                 lnet_net_lock(LNET_LOCK_EX);
3582                         }
3583                 }
3584
3585                 lnet_net_unlock(LNET_LOCK_EX);
3586         }
3587
3588         CDEBUG(D_NET, "stopping\n");
3589         /*
3590          * Clean up before telling lnet_peer_discovery_stop() that
3591          * we're done. Use wake_up() below to somewhat reduce the
3592          * size of the thundering herd if there are multiple threads
3593          * waiting on discovery of a single peer.
3594          */
3595
3596         /* Queue cleanup 1: stop all pending pings and pushes. */
3597         lnet_net_lock(LNET_LOCK_EX);
3598         while (!list_empty(&the_lnet.ln_dc_working)) {
3599                 lp = list_first_entry(&the_lnet.ln_dc_working,
3600                                       struct lnet_peer, lp_dc_list);
3601                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3602                 lnet_net_unlock(LNET_LOCK_EX);
3603                 lnet_peer_cancel_discovery(lp);
3604                 lnet_net_lock(LNET_LOCK_EX);
3605         }
3606         lnet_net_unlock(LNET_LOCK_EX);
3607
3608         /* Queue cleanup 2: wait for the expired queue to clear. */
3609         while (!list_empty(&the_lnet.ln_dc_expired))
3610                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3611
3612         /* Queue cleanup 3: clear the request queue. */
3613         lnet_net_lock(LNET_LOCK_EX);
3614         while (!list_empty(&the_lnet.ln_dc_request)) {
3615                 lp = list_first_entry(&the_lnet.ln_dc_request,
3616                                       struct lnet_peer, lp_dc_list);
3617                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3618                 lnet_peer_discovery_complete(lp);
3619         }
3620         lnet_net_unlock(LNET_LOCK_EX);
3621
3622         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3623         the_lnet.ln_dc_handler = NULL;
3624
3625         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3626         wake_up(&the_lnet.ln_dc_waitq);
3627
3628         CDEBUG(D_NET, "stopped\n");
3629
3630         return 0;
3631 }
3632
3633 /* ln_api_mutex is held on entry. */
3634 int lnet_peer_discovery_start(void)
3635 {
3636         struct task_struct *task;
3637         int rc = 0;
3638
3639         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3640                 return -EALREADY;
3641
3642         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3643         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3644         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3645         if (IS_ERR(task)) {
3646                 rc = PTR_ERR(task);
3647                 CERROR("Can't start peer discovery thread: %d\n", rc);
3648
3649                 the_lnet.ln_dc_handler = NULL;
3650
3651                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3652         }
3653
3654         CDEBUG(D_NET, "discovery start: %d\n", rc);
3655
3656         return rc;
3657 }
3658
3659 /* ln_api_mutex is held on entry. */
3660 void lnet_peer_discovery_stop(void)
3661 {
3662         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3663                 return;
3664
3665         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3666         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3667
3668         /* In the LNetNIInit() path we may be stopping discovery before it
3669          * entered its work loop
3670          */
3671         if (!completion_done(&the_lnet.ln_started))
3672                 complete(&the_lnet.ln_started);
3673         else
3674                 wake_up(&the_lnet.ln_dc_waitq);
3675
3676         wait_event(the_lnet.ln_dc_waitq,
3677                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3678
3679         LASSERT(list_empty(&the_lnet.ln_dc_request));
3680         LASSERT(list_empty(&the_lnet.ln_dc_working));
3681         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3682
3683         CDEBUG(D_NET, "discovery stopped\n");
3684 }
3685
3686 /* Debugging */
3687
3688 void
3689 lnet_debug_peer(lnet_nid_t nid)
3690 {
3691         char                    *aliveness = "NA";
3692         struct lnet_peer_ni     *lp;
3693         int                     cpt;
3694
3695         cpt = lnet_cpt_of_nid(nid, NULL);
3696         lnet_net_lock(cpt);
3697
3698         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3699         if (IS_ERR(lp)) {
3700                 lnet_net_unlock(cpt);
3701                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3702                 return;
3703         }
3704
3705         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3706                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3707
3708         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3709                libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3710                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3711                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3712                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3713
3714         lnet_peer_ni_decref_locked(lp);
3715
3716         lnet_net_unlock(cpt);
3717 }
3718
3719 /* Gathering information for userspace. */
3720
3721 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3722                           char aliveness[LNET_MAX_STR_LEN],
3723                           __u32 *cpt_iter, __u32 *refcount,
3724                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3725                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3726                           __u32 *peer_tx_qnob)
3727 {
3728         struct lnet_peer_table          *peer_table;
3729         struct lnet_peer_ni             *lp;
3730         int                             j;
3731         int                             lncpt;
3732         bool                            found = false;
3733
3734         /* get the number of CPTs */
3735         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3736
3737         /* if the cpt number to be examined is >= the number of cpts in
3738          * the system then indicate that there are no more cpts to examin
3739          */
3740         if (*cpt_iter >= lncpt)
3741                 return -ENOENT;
3742
3743         /* get the current table */
3744         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3745         /* if the ptable is NULL then there are no more cpts to examine */
3746         if (peer_table == NULL)
3747                 return -ENOENT;
3748
3749         lnet_net_lock(*cpt_iter);
3750
3751         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3752                 struct list_head *peers = &peer_table->pt_hash[j];
3753
3754                 list_for_each_entry(lp, peers, lpni_hashlist) {
3755                         if (peer_index-- > 0)
3756                                 continue;
3757
3758                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3759                         if (lnet_isrouter(lp) ||
3760                                 lnet_peer_aliveness_enabled(lp))
3761                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3762                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3763
3764                         *nid = lp->lpni_nid;
3765                         *refcount = atomic_read(&lp->lpni_refcount);
3766                         *ni_peer_tx_credits =
3767                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3768                         *peer_tx_credits = lp->lpni_txcredits;
3769                         *peer_rtr_credits = lp->lpni_rtrcredits;
3770                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
3771                         *peer_tx_qnob = lp->lpni_txqnob;
3772
3773                         found = true;
3774                 }
3775
3776         }
3777         lnet_net_unlock(*cpt_iter);
3778
3779         *cpt_iter = lncpt;
3780
3781         return found ? 0 : -ENOENT;
3782 }
3783
3784 /* ln_api_mutex is held, which keeps the peer list stable */
3785 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3786 {
3787         struct lnet_ioctl_element_stats *lpni_stats;
3788         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3789         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3790         struct lnet_peer_ni_credit_info *lpni_info;
3791         struct lnet_peer_ni *lpni;
3792         struct lnet_peer *lp;
3793         lnet_nid_t nid;
3794         __u32 size;
3795         int rc;
3796
3797         lp = lnet_find_peer(cfg->prcfg_prim_nid);
3798
3799         if (!lp) {
3800                 rc = -ENOENT;
3801                 goto out;
3802         }
3803
3804         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3805                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3806         size *= lp->lp_nnis;
3807         if (size > cfg->prcfg_size) {
3808                 cfg->prcfg_size = size;
3809                 rc = -E2BIG;
3810                 goto out_lp_decref;
3811         }
3812
3813         cfg->prcfg_prim_nid = lp->lp_primary_nid;
3814         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3815         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3816         cfg->prcfg_count = lp->lp_nnis;
3817         cfg->prcfg_size = size;
3818         cfg->prcfg_state = lp->lp_state;
3819
3820         /* Allocate helper buffers. */
3821         rc = -ENOMEM;
3822         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3823         if (!lpni_info)
3824                 goto out_lp_decref;
3825         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3826         if (!lpni_stats)
3827                 goto out_free_info;
3828         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3829         if (!lpni_msg_stats)
3830                 goto out_free_stats;
3831         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3832         if (!lpni_hstats)
3833                 goto out_free_msg_stats;
3834
3835
3836         lpni = NULL;
3837         rc = -EFAULT;
3838         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3839                 nid = lpni->lpni_nid;
3840                 if (copy_to_user(bulk, &nid, sizeof(nid)))
3841                         goto out_free_hstats;
3842                 bulk += sizeof(nid);
3843
3844                 memset(lpni_info, 0, sizeof(*lpni_info));
3845                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3846                 if (lnet_isrouter(lpni) ||
3847                         lnet_peer_aliveness_enabled(lpni))
3848                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3849                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3850
3851                 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3852                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3853                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3854                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3855                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3856                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3857                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3858                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3859                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3860                         goto out_free_hstats;
3861                 bulk += sizeof(*lpni_info);
3862
3863                 memset(lpni_stats, 0, sizeof(*lpni_stats));
3864                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3865                                                             LNET_STATS_TYPE_SEND);
3866                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3867                                                             LNET_STATS_TYPE_RECV);
3868                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3869                                                             LNET_STATS_TYPE_DROP);
3870                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3871                         goto out_free_hstats;
3872                 bulk += sizeof(*lpni_stats);
3873                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3874                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3875                         goto out_free_hstats;
3876                 bulk += sizeof(*lpni_msg_stats);
3877                 lpni_hstats->hlpni_network_timeout =
3878                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3879                 lpni_hstats->hlpni_remote_dropped =
3880                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3881                 lpni_hstats->hlpni_remote_timeout =
3882                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3883                 lpni_hstats->hlpni_remote_error =
3884                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3885                 lpni_hstats->hlpni_health_value =
3886                   atomic_read(&lpni->lpni_healthv);
3887                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3888                         goto out_free_hstats;
3889                 bulk += sizeof(*lpni_hstats);
3890         }
3891         rc = 0;
3892
3893 out_free_hstats:
3894         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3895 out_free_msg_stats:
3896         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3897 out_free_stats:
3898         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3899 out_free_info:
3900         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3901 out_lp_decref:
3902         lnet_peer_decref_locked(lp);
3903 out:
3904         return rc;
3905 }
3906
3907 void
3908 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3909 {
3910         /* the mt could've shutdown and cleaned up the queues */
3911         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3912                 return;
3913
3914         if (list_empty(&lpni->lpni_recovery) &&
3915             atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3916                 CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
3917                         libcfs_nid2str(lpni->lpni_nid),
3918                         atomic_read(&lpni->lpni_healthv));
3919                 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3920                 lnet_peer_ni_addref_locked(lpni);
3921         }
3922 }
3923
3924 /* Call with the ln_api_mutex held */
3925 void
3926 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3927 {
3928         struct lnet_peer_table *ptable;
3929         struct lnet_peer *lp;
3930         struct lnet_peer_net *lpn;
3931         struct lnet_peer_ni *lpni;
3932         int lncpt;
3933         int cpt;
3934
3935         if (the_lnet.ln_state != LNET_STATE_RUNNING)
3936                 return;
3937