Whamcloud - gitweb
LU-10391 lnet: convert nids in lnet_parse to lnet_nid
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(struct lnet_nid *nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = *nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NID_NET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(struct lnet_nid *nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = *nid;
265         lp->lp_disc_src_nid = LNET_ANY_NID;
266         lp->lp_disc_dst_nid = LNET_ANY_NID;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid_is_lo0(nid))
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nidstr(&lp->lp_primary_nid),
382                 libcfs_nidstr(&lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nidstr(&lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
461
462         spin_lock(&peer->lp_lock);
463         peer->lp_state |= LNET_PEER_MARK_DELETED;
464         spin_unlock(&peer->lp_lock);
465
466         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467         while (lpni != NULL) {
468                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469                 rc = lnet_peer_ni_del_locked(lpni, false);
470                 if (rc != 0)
471                         rc2 = rc;
472                 lpni = lpni2;
473         }
474
475         return rc2;
476 }
477
478 /*
479  * Discovering this peer is taking too long. Cancel any Ping or Push
480  * that discovery is waiting on by unlinking the relevant MDs. The
481  * lnet_discovery_event_handler() will proceed from here and complete
482  * the cleanup.
483  */
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
485 {
486         struct lnet_handle_md ping_mdh;
487         struct lnet_handle_md push_mdh;
488
489         LNetInvalidateMDHandle(&ping_mdh);
490         LNetInvalidateMDHandle(&push_mdh);
491
492         spin_lock(&lp->lp_lock);
493         if (lp->lp_state & LNET_PEER_PING_SENT) {
494                 ping_mdh = lp->lp_ping_mdh;
495                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
496         }
497         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498                 push_mdh = lp->lp_push_mdh;
499                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
500         }
501         spin_unlock(&lp->lp_lock);
502
503         if (!LNetMDHandleIsInvalid(ping_mdh))
504                 LNetMDUnlink(ping_mdh);
505         if (!LNetMDHandleIsInvalid(push_mdh))
506                 LNetMDUnlink(push_mdh);
507 }
508
509 static int
510 lnet_peer_del(struct lnet_peer *peer)
511 {
512         int rc;
513
514         lnet_peer_cancel_discovery(peer);
515         lnet_net_lock(LNET_LOCK_EX);
516         rc = lnet_peer_del_locked(peer);
517         lnet_net_unlock(LNET_LOCK_EX);
518
519         return rc;
520 }
521
522 /*
523  * Delete a NID from a peer. Call with ln_api_mutex held.
524  *
525  * Error codes:
526  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
527  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
528  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
529  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
530  */
531 static int
532 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
533 {
534         struct lnet_peer_ni *lpni;
535         struct lnet_nid primary_nid = lp->lp_primary_nid;
536         struct lnet_nid nid;
537         int rc = 0;
538         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
539
540         lnet_nid4_to_nid(nid4, &nid);
541         if (!(flags & LNET_PEER_CONFIGURED)) {
542                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
543                         rc = -EPERM;
544                         goto out;
545                 }
546         }
547
548         lpni = lnet_peer_ni_find_locked(&nid);
549         if (!lpni) {
550                 rc = -ENOENT;
551                 goto out;
552         }
553         lnet_peer_ni_decref_locked(lpni);
554         if (lp != lpni->lpni_peer_net->lpn_peer) {
555                 rc = -ECHILD;
556                 goto out;
557         }
558
559         /*
560          * This function only allows deletion of the primary NID if it
561          * is the only NID.
562          */
563         if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
564                 rc = -EBUSY;
565                 goto out;
566         }
567
568         lnet_net_lock(LNET_LOCK_EX);
569
570         if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
571                 struct lnet_peer_ni *lpni2;
572                 /* assign the next peer_ni to be the primary */
573                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
574                 LASSERT(lpni2);
575                 lp->lp_primary_nid = lpni2->lpni_nid;
576         }
577         rc = lnet_peer_ni_del_locked(lpni, force);
578
579         lnet_net_unlock(LNET_LOCK_EX);
580
581 out:
582         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
583                libcfs_nidstr(&primary_nid), libcfs_nidstr(&nid),
584                flags, rc);
585
586         return rc;
587 }
588
589 static void
590 lnet_peer_table_cleanup_locked(struct lnet_net *net,
591                                struct lnet_peer_table *ptable)
592 {
593         int                      i;
594         struct lnet_peer_ni     *next;
595         struct lnet_peer_ni     *lpni;
596         struct lnet_peer        *peer;
597
598         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
599                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
600                                          lpni_hashlist) {
601                         if (net != NULL && net != lpni->lpni_net)
602                                 continue;
603
604                         peer = lpni->lpni_peer_net->lpn_peer;
605                         if (!nid_same(&peer->lp_primary_nid,
606                                        &lpni->lpni_nid)) {
607                                 lnet_peer_ni_del_locked(lpni, false);
608                                 continue;
609                         }
610                         /*
611                          * Removing the primary NID implies removing
612                          * the entire peer. Advance next beyond any
613                          * peer_ni that belongs to the same peer.
614                          */
615                         list_for_each_entry_from(next, &ptable->pt_hash[i],
616                                                  lpni_hashlist) {
617                                 if (next->lpni_peer_net->lpn_peer != peer)
618                                         break;
619                         }
620                         lnet_peer_del_locked(peer);
621                 }
622         }
623 }
624
625 static void
626 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
627 {
628         wait_var_event_warning(&ptable->pt_zombies,
629                                ptable->pt_zombies == 0,
630                                "Waiting for %d zombies on peer table\n",
631                                ptable->pt_zombies);
632 }
633
634 static void
635 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
636                                 struct lnet_peer_table *ptable)
637 {
638         struct lnet_peer_ni     *lp;
639         struct lnet_peer_ni     *tmp;
640         lnet_nid_t              gw_nid;
641         int                     i;
642
643         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
644                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
645                                          lpni_hashlist) {
646                         if (net != lp->lpni_net)
647                                 continue;
648
649                         if (!lnet_isrouter(lp))
650                                 continue;
651
652                         /* FIXME handle large-addr nid */
653                         gw_nid = lnet_nid_to_nid4(
654                                 &lp->lpni_peer_net->lpn_peer->lp_primary_nid);
655
656                         lnet_net_unlock(LNET_LOCK_EX);
657                         lnet_del_route(LNET_NET_ANY, gw_nid);
658                         lnet_net_lock(LNET_LOCK_EX);
659                 }
660         }
661 }
662
663 void
664 lnet_peer_tables_cleanup(struct lnet_net *net)
665 {
666         int i;
667         struct lnet_peer_table *ptable;
668
669         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
670         /* If just deleting the peers for a NI, get rid of any routes these
671          * peers are gateways for. */
672         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
673                 lnet_net_lock(LNET_LOCK_EX);
674                 lnet_peer_table_del_rtrs_locked(net, ptable);
675                 lnet_net_unlock(LNET_LOCK_EX);
676         }
677
678         /* Start the cleanup process */
679         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
680                 lnet_net_lock(LNET_LOCK_EX);
681                 lnet_peer_table_cleanup_locked(net, ptable);
682                 lnet_net_unlock(LNET_LOCK_EX);
683         }
684
685         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
686                 lnet_peer_ni_finalize_wait(ptable);
687 }
688
689 static struct lnet_peer_ni *
690 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
691 {
692         struct list_head        *peers;
693         struct lnet_peer_ni     *lp;
694
695         if (the_lnet.ln_state != LNET_STATE_RUNNING)
696                 return NULL;
697
698         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
699         list_for_each_entry(lp, peers, lpni_hashlist) {
700                 if (nid_same(&lp->lpni_nid, nid)) {
701                         lnet_peer_ni_addref_locked(lp);
702                         return lp;
703                 }
704         }
705
706         return NULL;
707 }
708
709 struct lnet_peer_ni *
710 lnet_find_peer_ni_locked(lnet_nid_t nid4)
711 {
712         struct lnet_peer_ni *lpni;
713         struct lnet_peer_table *ptable;
714         int cpt;
715         struct lnet_nid nid;
716
717         lnet_nid4_to_nid(nid4, &nid);
718
719         cpt = lnet_nid_cpt_hash(&nid, LNET_CPT_NUMBER);
720
721         ptable = the_lnet.ln_peer_tables[cpt];
722         lpni = lnet_get_peer_ni_locked(ptable, &nid);
723
724         return lpni;
725 }
726
727 struct lnet_peer_ni *
728 lnet_peer_ni_find_locked(struct lnet_nid *nid)
729 {
730         struct lnet_peer_ni *lpni;
731         struct lnet_peer_table *ptable;
732         int cpt;
733
734         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
735
736         ptable = the_lnet.ln_peer_tables[cpt];
737         lpni = lnet_get_peer_ni_locked(ptable, nid);
738
739         return lpni;
740 }
741
742 struct lnet_peer_ni *
743 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
744 {
745         struct lnet_peer_net *lpn;
746         struct lnet_peer_ni *lpni;
747
748         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
749         if (!lpn)
750                 return NULL;
751
752         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
753                 if (lnet_nid_to_nid4(&lpni->lpni_nid) == nid)
754                         return lpni;
755         }
756
757         return NULL;
758 }
759
760 struct lnet_peer_ni *
761 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
762 {
763         struct lnet_peer_net *lpn;
764         struct lnet_peer_ni *lpni;
765
766         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
767         if (!lpn)
768                 return NULL;
769
770         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
771                 if (nid_same(&lpni->lpni_nid, nid))
772                         return lpni;
773         }
774
775         return NULL;
776 }
777
778 struct lnet_peer *
779 lnet_find_peer4(lnet_nid_t nid)
780 {
781         struct lnet_peer_ni *lpni;
782         struct lnet_peer *lp = NULL;
783         int cpt;
784
785         cpt = lnet_net_lock_current();
786         lpni = lnet_find_peer_ni_locked(nid);
787         if (lpni) {
788                 lp = lpni->lpni_peer_net->lpn_peer;
789                 lnet_peer_addref_locked(lp);
790                 lnet_peer_ni_decref_locked(lpni);
791         }
792         lnet_net_unlock(cpt);
793
794         return lp;
795 }
796
797 struct lnet_peer *
798 lnet_find_peer(struct lnet_nid *nid)
799 {
800         struct lnet_peer_ni *lpni;
801         struct lnet_peer *lp = NULL;
802         int cpt;
803
804         cpt = lnet_net_lock_current();
805         lpni = lnet_peer_ni_find_locked(nid);
806         if (lpni) {
807                 lp = lpni->lpni_peer_net->lpn_peer;
808                 lnet_peer_addref_locked(lp);
809                 lnet_peer_ni_decref_locked(lpni);
810         }
811         lnet_net_unlock(cpt);
812
813         return lp;
814 }
815
816 struct lnet_peer_net *
817 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
818 {
819         struct lnet_peer_net *net;
820
821         if (!prev_lpn_id) {
822                 /* no net id provided return the first net */
823                 net = list_first_entry_or_null(&lp->lp_peer_nets,
824                                                struct lnet_peer_net,
825                                                lpn_peer_nets);
826
827                 return net;
828         }
829
830         /* find the net after the one provided */
831         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
832                 if (net->lpn_net_id == prev_lpn_id) {
833                         /*
834                          * if we reached the end of the list loop to the
835                          * beginning.
836                          */
837                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
838                                 return list_first_entry_or_null(&lp->lp_peer_nets,
839                                                                 struct lnet_peer_net,
840                                                                 lpn_peer_nets);
841                         else
842                                 return list_next_entry(net, lpn_peer_nets);
843                 }
844         }
845
846         return NULL;
847 }
848
849 struct lnet_peer_ni *
850 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
851                              struct lnet_peer_net *peer_net,
852                              struct lnet_peer_ni *prev)
853 {
854         struct lnet_peer_ni *lpni;
855         struct lnet_peer_net *net = peer_net;
856
857         if (!prev) {
858                 if (!net) {
859                         if (list_empty(&peer->lp_peer_nets))
860                                 return NULL;
861
862                         net = list_entry(peer->lp_peer_nets.next,
863                                          struct lnet_peer_net,
864                                          lpn_peer_nets);
865                 }
866                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
867                                   lpni_peer_nis);
868
869                 return lpni;
870         }
871
872         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
873                 /*
874                  * if you reached the end of the peer ni list and the peer
875                  * net is specified then there are no more peer nis in that
876                  * net.
877                  */
878                 if (net)
879                         return NULL;
880
881                 /*
882                  * we reached the end of this net ni list. move to the
883                  * next net
884                  */
885                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
886                     &peer->lp_peer_nets)
887                         /* no more nets and no more NIs. */
888                         return NULL;
889
890                 /* get the next net */
891                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
892                                  struct lnet_peer_net,
893                                  lpn_peer_nets);
894                 /* get the ni on it */
895                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
896                                   lpni_peer_nis);
897
898                 return lpni;
899         }
900
901         /* there are more nis left */
902         lpni = list_entry(prev->lpni_peer_nis.next,
903                           struct lnet_peer_ni, lpni_peer_nis);
904
905         return lpni;
906 }
907
908 /* Call with the ln_api_mutex held */
909 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
910 {
911         struct lnet_process_id id;
912         struct lnet_peer_table *ptable;
913         struct lnet_peer *lp;
914         __u32 count = 0;
915         __u32 size = 0;
916         int lncpt;
917         int cpt;
918         __u32 i;
919         int rc;
920
921         rc = -ESHUTDOWN;
922         if (the_lnet.ln_state != LNET_STATE_RUNNING)
923                 goto done;
924
925         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
926
927         /*
928          * Count the number of peers, and return E2BIG if the buffer
929          * is too small. We'll also return the desired size.
930          */
931         rc = -E2BIG;
932         for (cpt = 0; cpt < lncpt; cpt++) {
933                 ptable = the_lnet.ln_peer_tables[cpt];
934                 count += ptable->pt_peers;
935         }
936         size = count * sizeof(*ids);
937         if (size > *sizep)
938                 goto done;
939
940         /*
941          * Walk the peer lists and copy out the primary nids.
942          * This is safe because the peer lists are only modified
943          * while the ln_api_mutex is held. So we don't need to
944          * hold the lnet_net_lock as well, and can therefore
945          * directly call copy_to_user().
946          */
947         rc = -EFAULT;
948         memset(&id, 0, sizeof(id));
949         id.pid = LNET_PID_LUSTRE;
950         i = 0;
951         for (cpt = 0; cpt < lncpt; cpt++) {
952                 ptable = the_lnet.ln_peer_tables[cpt];
953                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
954                         if (!nid_is_nid4(&lp->lp_primary_nid))
955                                 continue;
956                         if (i >= count)
957                                 goto done;
958                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
959                         if (copy_to_user(&ids[i], &id, sizeof(id)))
960                                 goto done;
961                         i++;
962                 }
963         }
964         rc = 0;
965 done:
966         *countp = count;
967         *sizep = size;
968         return rc;
969 }
970
971 /*
972  * Start pushes to peers that need to be updated for a configuration
973  * change on this node.
974  */
975 void
976 lnet_push_update_to_peers(int force)
977 {
978         struct lnet_peer_table *ptable;
979         struct lnet_peer *lp;
980         int lncpt;
981         int cpt;
982
983         lnet_net_lock(LNET_LOCK_EX);
984         if (lnet_peer_discovery_disabled)
985                 force = 0;
986         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
987         for (cpt = 0; cpt < lncpt; cpt++) {
988                 ptable = the_lnet.ln_peer_tables[cpt];
989                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
990                         if (force) {
991                                 spin_lock(&lp->lp_lock);
992                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
993                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
994                                 spin_unlock(&lp->lp_lock);
995                         }
996                         if (lnet_peer_needs_push(lp))
997                                 lnet_peer_queue_for_discovery(lp);
998                 }
999         }
1000         lnet_net_unlock(LNET_LOCK_EX);
1001         wake_up(&the_lnet.ln_dc_waitq);
1002 }
1003
1004 /* find the NID in the preferred gateways for the remote peer
1005  * return:
1006  *      false: list is not empty and NID is not preferred
1007  *      false: list is empty
1008  *      true: nid is found in the list
1009  */
1010 bool
1011 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
1012                              struct lnet_nid *gw_nid)
1013 {
1014         struct lnet_nid_list *ne;
1015
1016         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
1017                libcfs_nidstr(&lpni->lpni_nid),
1018                list_empty(&lpni->lpni_rtr_pref_nids));
1019
1020         if (list_empty(&lpni->lpni_rtr_pref_nids))
1021                 return false;
1022
1023         /* iterate through all the preferred NIDs and see if any of them
1024          * matches the provided gw_nid
1025          */
1026         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1027                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1028                        libcfs_nidstr(&ne->nl_nid),
1029                        libcfs_nidstr(gw_nid));
1030                 if (nid_same(&ne->nl_nid, gw_nid))
1031                         return true;
1032         }
1033
1034         return false;
1035 }
1036
1037 void
1038 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
1039 {
1040         struct list_head zombies;
1041         struct lnet_nid_list *ne;
1042         struct lnet_nid_list *tmp;
1043         int cpt = lpni->lpni_cpt;
1044
1045         INIT_LIST_HEAD(&zombies);
1046
1047         lnet_net_lock(cpt);
1048         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1049         lnet_net_unlock(cpt);
1050
1051         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1052                 list_del(&ne->nl_list);
1053                 LIBCFS_FREE(ne, sizeof(*ne));
1054         }
1055 }
1056
1057 int
1058 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1059                        struct lnet_nid *gw_nid)
1060 {
1061         int cpt = lpni->lpni_cpt;
1062         struct lnet_nid_list *ne = NULL;
1063
1064         /* This function is called with api_mutex held. When the api_mutex
1065          * is held the list can not be modified, as it is only modified as
1066          * a result of applying a UDSP and that happens under api_mutex
1067          * lock.
1068          */
1069         __must_hold(&the_lnet.ln_api_mutex);
1070
1071         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1072                 if (nid_same(&ne->nl_nid, gw_nid))
1073                         return -EEXIST;
1074         }
1075
1076         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1077         if (!ne)
1078                 return -ENOMEM;
1079
1080         ne->nl_nid = *gw_nid;
1081
1082         /* Lock the cpt to protect against addition and checks in the
1083          * selection algorithm
1084          */
1085         lnet_net_lock(cpt);
1086         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1087         lnet_net_unlock(cpt);
1088
1089         return 0;
1090 }
1091
1092 /*
1093  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1094  * this is a preferred point-to-point path. Call with lnet_net_lock in
1095  * shared mmode.
1096  */
1097 bool
1098 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1099 {
1100         struct lnet_nid_list *ne;
1101
1102         if (lpni->lpni_pref_nnids == 0)
1103                 return false;
1104         if (lpni->lpni_pref_nnids == 1)
1105                 return nid_same(&lpni->lpni_pref.nid, nid);
1106         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1107                 if (nid_same(&ne->nl_nid, nid))
1108                         return true;
1109         }
1110         return false;
1111 }
1112
1113 /*
1114  * Set a single ni as preferred, provided no preferred ni is already
1115  * defined. Only to be used for non-multi-rail peer_ni.
1116  */
1117 int
1118 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1119                                   struct lnet_nid *nid)
1120 {
1121         int rc = 0;
1122
1123         if (!nid)
1124                 return -EINVAL;
1125         spin_lock(&lpni->lpni_lock);
1126         if (LNET_NID_IS_ANY(nid)) {
1127                 rc = -EINVAL;
1128         } else if (lpni->lpni_pref_nnids > 0) {
1129                 rc = -EPERM;
1130         } else if (lpni->lpni_pref_nnids == 0) {
1131                 lpni->lpni_pref.nid = *nid;
1132                 lpni->lpni_pref_nnids = 1;
1133                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1134         }
1135         spin_unlock(&lpni->lpni_lock);
1136
1137         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1138                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1139         return rc;
1140 }
1141
1142 /*
1143  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1144  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1145  */
1146 int
1147 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1148 {
1149         int rc = 0;
1150
1151         spin_lock(&lpni->lpni_lock);
1152         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1153                 lpni->lpni_pref_nnids = 0;
1154                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1155         } else if (lpni->lpni_pref_nnids == 0) {
1156                 rc = -ENOENT;
1157         } else {
1158                 rc = -EPERM;
1159         }
1160         spin_unlock(&lpni->lpni_lock);
1161
1162         CDEBUG(D_NET, "peer %s: %d\n",
1163                libcfs_nidstr(&lpni->lpni_nid), rc);
1164         return rc;
1165 }
1166
1167 void
1168 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1169 {
1170         lpni->lpni_sel_priority = priority;
1171 }
1172
1173 /*
1174  * Clear the preferred NIDs from a non-multi-rail peer.
1175  */
1176 void
1177 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1178 {
1179         struct lnet_peer_ni *lpni = NULL;
1180
1181         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1182                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1183 }
1184
1185 int
1186 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1187 {
1188         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1189         struct lnet_nid_list *ne1 = NULL;
1190         struct lnet_nid_list *ne2 = NULL;
1191         struct lnet_nid *tmp_nid = NULL;
1192         int rc = 0;
1193
1194         if (LNET_NID_IS_ANY(nid)) {
1195                 rc = -EINVAL;
1196                 goto out;
1197         }
1198
1199         if (lpni->lpni_pref_nnids == 1 &&
1200             nid_same(&lpni->lpni_pref.nid, nid)) {
1201                 rc = -EEXIST;
1202                 goto out;
1203         }
1204
1205         /* A non-MR node may have only one preferred NI per peer_ni */
1206         if (lpni->lpni_pref_nnids > 0 &&
1207             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1208                 rc = -EPERM;
1209                 goto out;
1210         }
1211
1212         /* add the new preferred nid to the list of preferred nids */
1213         if (lpni->lpni_pref_nnids != 0) {
1214                 size_t alloc_size = sizeof(*ne1);
1215
1216                 if (lpni->lpni_pref_nnids == 1) {
1217                         tmp_nid = &lpni->lpni_pref.nid;
1218                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1219                 }
1220
1221                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1222                         if (nid_same(&ne1->nl_nid, nid)) {
1223                                 rc = -EEXIST;
1224                                 goto out;
1225                         }
1226                 }
1227
1228                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1229                                  alloc_size);
1230                 if (!ne1) {
1231                         rc = -ENOMEM;
1232                         goto out;
1233                 }
1234
1235                 /* move the originally stored nid to the list */
1236                 if (lpni->lpni_pref_nnids == 1) {
1237                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1238                                 lpni->lpni_cpt, alloc_size);
1239                         if (!ne2) {
1240                                 rc = -ENOMEM;
1241                                 goto out;
1242                         }
1243                         INIT_LIST_HEAD(&ne2->nl_list);
1244                         ne2->nl_nid = *tmp_nid;
1245                 }
1246                 ne1->nl_nid = *nid;
1247         }
1248
1249         lnet_net_lock(LNET_LOCK_EX);
1250         spin_lock(&lpni->lpni_lock);
1251         if (lpni->lpni_pref_nnids == 0) {
1252                 lpni->lpni_pref.nid = *nid;
1253         } else {
1254                 if (ne2)
1255                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1256                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1257         }
1258         lpni->lpni_pref_nnids++;
1259         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1260         spin_unlock(&lpni->lpni_lock);
1261         lnet_net_unlock(LNET_LOCK_EX);
1262
1263 out:
1264         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1265                 spin_lock(&lpni->lpni_lock);
1266                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1267                 spin_unlock(&lpni->lpni_lock);
1268         }
1269         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1270                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1271         return rc;
1272 }
1273
1274 int
1275 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1276 {
1277         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1278         struct lnet_nid_list *ne = NULL;
1279         int rc = 0;
1280
1281         if (lpni->lpni_pref_nnids == 0) {
1282                 rc = -ENOENT;
1283                 goto out;
1284         }
1285
1286         if (lpni->lpni_pref_nnids == 1) {
1287                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1288                         rc = -ENOENT;
1289                         goto out;
1290                 }
1291         } else {
1292                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1293                         if (nid_same(&ne->nl_nid, nid))
1294                                 goto remove_nid_entry;
1295                 }
1296                 rc = -ENOENT;
1297                 ne = NULL;
1298                 goto out;
1299         }
1300
1301 remove_nid_entry:
1302         lnet_net_lock(LNET_LOCK_EX);
1303         spin_lock(&lpni->lpni_lock);
1304         if (lpni->lpni_pref_nnids == 1)
1305                 lpni->lpni_pref.nid = LNET_ANY_NID;
1306         else {
1307                 list_del_init(&ne->nl_list);
1308                 if (lpni->lpni_pref_nnids == 2) {
1309                         struct lnet_nid_list *ne, *tmp;
1310
1311                         list_for_each_entry_safe(ne, tmp,
1312                                                  &lpni->lpni_pref.nids,
1313                                                  nl_list) {
1314                                 lpni->lpni_pref.nid = ne->nl_nid;
1315                                 list_del_init(&ne->nl_list);
1316                                 LIBCFS_FREE(ne, sizeof(*ne));
1317                         }
1318                 }
1319         }
1320         lpni->lpni_pref_nnids--;
1321         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1322         spin_unlock(&lpni->lpni_lock);
1323         lnet_net_unlock(LNET_LOCK_EX);
1324
1325         if (ne)
1326                 LIBCFS_FREE(ne, sizeof(*ne));
1327 out:
1328         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1329                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1330         return rc;
1331 }
1332
1333 void
1334 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1335 {
1336         struct list_head zombies;
1337         struct lnet_nid_list *ne;
1338         struct lnet_nid_list *tmp;
1339
1340         INIT_LIST_HEAD(&zombies);
1341
1342         lnet_net_lock(LNET_LOCK_EX);
1343         if (lpni->lpni_pref_nnids == 1)
1344                 lpni->lpni_pref.nid = LNET_ANY_NID;
1345         else if (lpni->lpni_pref_nnids > 1)
1346                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1347         lpni->lpni_pref_nnids = 0;
1348         lnet_net_unlock(LNET_LOCK_EX);
1349
1350         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1351                 list_del_init(&ne->nl_list);
1352                 LIBCFS_FREE(ne, sizeof(*ne));
1353         }
1354 }
1355
1356 void
1357 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1358 {
1359         struct lnet_peer_ni *lpni;
1360
1361         *result = *nid;
1362         lpni = lnet_peer_ni_find_locked(nid);
1363         if (lpni) {
1364                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1365                 lnet_peer_ni_decref_locked(lpni);
1366         }
1367 }
1368
1369 bool
1370 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1371 __must_hold(&lp->lp_lock)
1372 {
1373         if (lnet_peer_discovery_disabled)
1374                 return true;
1375
1376         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1377             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1378                 return true;
1379         }
1380
1381         return false;
1382 }
1383
1384 /*
1385  * Peer Discovery
1386  */
1387 bool
1388 lnet_is_discovery_disabled(struct lnet_peer *lp)
1389 {
1390         bool rc = false;
1391
1392         spin_lock(&lp->lp_lock);
1393         rc = lnet_is_discovery_disabled_locked(lp);
1394         spin_unlock(&lp->lp_lock);
1395
1396         return rc;
1397 }
1398
1399 int
1400 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1401 {
1402         lnet_nid_t pnid = 0;
1403         bool mr;
1404         int i, rc;
1405
1406         if (!nids || num_nids < 1)
1407                 return -EINVAL;
1408
1409         rc = LNetNIInit(LNET_PID_ANY);
1410         if (rc < 0)
1411                 return rc;
1412
1413         mutex_lock(&the_lnet.ln_api_mutex);
1414
1415         mr = lnet_peer_discovery_disabled == 0;
1416
1417         rc = 0;
1418         for (i = 0; i < num_nids; i++) {
1419                 if (nids[i] == LNET_NID_LO_0)
1420                         continue;
1421
1422                 if (!pnid) {
1423                         pnid = nids[i];
1424                         rc = lnet_add_peer_ni(pnid, LNET_NID_ANY, mr, true);
1425                 } else if (lnet_peer_discovery_disabled) {
1426                         rc = lnet_add_peer_ni(nids[i], LNET_NID_ANY, mr, true);
1427                 } else {
1428                         rc = lnet_add_peer_ni(pnid, nids[i], mr, true);
1429                 }
1430
1431                 if (rc && rc != -EEXIST)
1432                         goto unlock;
1433         }
1434
1435 unlock:
1436         mutex_unlock(&the_lnet.ln_api_mutex);
1437
1438         LNetNIFini();
1439
1440         return rc == -EEXIST ? 0 : rc;
1441 }
1442 EXPORT_SYMBOL(LNetAddPeer);
1443
1444 /* FIXME support large-addr nid */
1445 lnet_nid_t
1446 LNetPrimaryNID(lnet_nid_t nid)
1447 {
1448         struct lnet_peer *lp;
1449         struct lnet_peer_ni *lpni;
1450         lnet_nid_t primary_nid = nid;
1451         int rc = 0;
1452         int cpt;
1453
1454         if (nid == LNET_NID_LO_0)
1455                 return LNET_NID_LO_0;
1456
1457         cpt = lnet_net_lock_current();
1458         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1459         if (IS_ERR(lpni)) {
1460                 rc = PTR_ERR(lpni);
1461                 goto out_unlock;
1462         }
1463         lp = lpni->lpni_peer_net->lpn_peer;
1464
1465         /* If discovery is disabled locally then we needn't bother running
1466          * discovery here because discovery will not modify whatever
1467          * primary NID is currently set for this peer. If the specified peer is
1468          * down then this discovery can introduce long delays into the mount
1469          * process, so skip it if it isn't necessary.
1470          */
1471         while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1472                 spin_lock(&lp->lp_lock);
1473                 /* force a full discovery cycle */
1474                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1475                 spin_unlock(&lp->lp_lock);
1476
1477                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1478                 if (rc)
1479                         goto out_decref;
1480                 /* The lpni (or lp) for this NID may have changed and our ref is
1481                  * the only thing keeping the old one around. Release the ref
1482                  * and lookup the lpni again
1483                  */
1484                 lnet_peer_ni_decref_locked(lpni);
1485                 lpni = lnet_find_peer_ni_locked(nid);
1486                 if (!lpni) {
1487                         rc = -ENOENT;
1488                         goto out_unlock;
1489                 }
1490                 lp = lpni->lpni_peer_net->lpn_peer;
1491
1492                 /* If we find that the peer has discovery disabled then we will
1493                  * not modify whatever primary NID is currently set for this
1494                  * peer. Thus, we can break out of this loop even if the peer
1495                  * is not fully up to date.
1496                  */
1497                 if (lnet_is_discovery_disabled(lp))
1498                         break;
1499         }
1500         primary_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
1501 out_decref:
1502         lnet_peer_ni_decref_locked(lpni);
1503 out_unlock:
1504         lnet_net_unlock(cpt);
1505
1506         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1507                libcfs_nid2str(primary_nid), rc);
1508         return primary_nid;
1509 }
1510 EXPORT_SYMBOL(LNetPrimaryNID);
1511
1512 struct lnet_peer_net *
1513 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1514 {
1515         struct lnet_peer_net *peer_net;
1516         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1517                 if (peer_net->lpn_net_id == net_id)
1518                         return peer_net;
1519         }
1520         return NULL;
1521 }
1522
1523 /*
1524  * Attach a peer_ni to a peer_net and peer. This function assumes
1525  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1526  * may be attached to a different peer, in which case it will be
1527  * properly detached first. The whole operation is done atomically.
1528  *
1529  * This function consumes the reference on lpni and Always returns 0.
1530  * This is the last function called from functions that do return an
1531  * int, so returning 0 here allows the compiler to do a tail call.
1532  */
1533 static int
1534 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1535                                 struct lnet_peer_net *lpn,
1536                                 struct lnet_peer_ni *lpni,
1537                                 unsigned flags)
1538 {
1539         struct lnet_peer_table *ptable;
1540         bool new_lpn = false;
1541         int rc;
1542
1543         /* Install the new peer_ni */
1544         lnet_net_lock(LNET_LOCK_EX);
1545         /* Add peer_ni to global peer table hash, if necessary. */
1546         if (list_empty(&lpni->lpni_hashlist)) {
1547                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1548
1549                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1550                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1551                 ptable->pt_version++;
1552                 lnet_peer_ni_addref_locked(lpni);
1553         }
1554
1555         /* Detach the peer_ni from an existing peer, if necessary. */
1556         if (lpni->lpni_peer_net) {
1557                 LASSERT(lpni->lpni_peer_net != lpn);
1558                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1559                 lnet_peer_detach_peer_ni_locked(lpni);
1560                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1561                 lpni->lpni_peer_net = NULL;
1562         }
1563
1564         /* Add peer_ni to peer_net */
1565         lpni->lpni_peer_net = lpn;
1566         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1567                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1568         else
1569                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1570         lnet_update_peer_net_healthv(lpni);
1571         lnet_peer_net_addref_locked(lpn);
1572
1573         /* Add peer_net to peer */
1574         if (!lpn->lpn_peer) {
1575                 new_lpn = true;
1576                 lpn->lpn_peer = lp;
1577                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1578                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1579                 else
1580                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1581                 lnet_peer_addref_locked(lp);
1582         }
1583
1584         /* Add peer to global peer list, if necessary */
1585         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1586         if (list_empty(&lp->lp_peer_list)) {
1587                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1588                 ptable->pt_peers++;
1589         }
1590
1591
1592         /* Update peer state */
1593         spin_lock(&lp->lp_lock);
1594         if (flags & LNET_PEER_CONFIGURED) {
1595                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1596                         lp->lp_state |= LNET_PEER_CONFIGURED;
1597         }
1598         if (flags & LNET_PEER_MULTI_RAIL) {
1599                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1600                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1601                         lnet_peer_clr_non_mr_pref_nids(lp);
1602                 }
1603         }
1604         spin_unlock(&lp->lp_lock);
1605
1606         lp->lp_nnis++;
1607
1608         /* apply UDSPs */
1609         if (new_lpn) {
1610                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1611                 if (rc)
1612                         CERROR("Failed to apply UDSPs on lpn %s\n",
1613                                libcfs_net2str(lpn->lpn_net_id));
1614         }
1615         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1616         if (rc)
1617                 CERROR("Failed to apply UDSPs on lpni %s\n",
1618                        libcfs_nidstr(&lpni->lpni_nid));
1619
1620         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1621                libcfs_nidstr(&lp->lp_primary_nid),
1622                libcfs_nidstr(&lpni->lpni_nid), flags);
1623         lnet_peer_ni_decref_locked(lpni);
1624         lnet_net_unlock(LNET_LOCK_EX);
1625
1626         return 0;
1627 }
1628
1629 /*
1630  * Create a new peer, with nid as its primary nid.
1631  *
1632  * Call with the lnet_api_mutex held.
1633  */
1634 static int
1635 lnet_peer_add(lnet_nid_t nid4, unsigned int flags)
1636 {
1637         struct lnet_nid nid;
1638         struct lnet_peer *lp;
1639         struct lnet_peer_net *lpn;
1640         struct lnet_peer_ni *lpni;
1641         int rc = 0;
1642
1643         LASSERT(nid4 != LNET_NID_ANY);
1644
1645         /*
1646          * No need for the lnet_net_lock here, because the
1647          * lnet_api_mutex is held.
1648          */
1649         lpni = lnet_find_peer_ni_locked(nid4);
1650         if (lpni) {
1651                 /* A peer with this NID already exists. */
1652                 lp = lpni->lpni_peer_net->lpn_peer;
1653                 lnet_peer_ni_decref_locked(lpni);
1654                 /*
1655                  * This is an error if the peer was configured and the
1656                  * primary NID differs or an attempt is made to change
1657                  * the Multi-Rail flag. Otherwise the assumption is
1658                  * that an existing peer is being modified.
1659                  */
1660                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1661                         if (lnet_nid_to_nid4(&lp->lp_primary_nid) != nid4)
1662                                 rc = -EEXIST;
1663                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1664                                 rc = -EPERM;
1665                         goto out;
1666                 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1667                         if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid4) {
1668                                 rc = -EEXIST;
1669                                 goto out;
1670                         }
1671                 }
1672                 /* Delete and recreate as a configured peer. */
1673                 rc = lnet_peer_del(lp);
1674                 if (rc)
1675                         goto out;
1676         }
1677
1678         /* Create peer, peer_net, and peer_ni. */
1679         rc = -ENOMEM;
1680         lnet_nid4_to_nid(nid4, &nid);
1681         lp = lnet_peer_alloc(&nid);
1682         if (!lp)
1683                 goto out;
1684         lpn = lnet_peer_net_alloc(LNET_NID_NET(&nid));
1685         if (!lpn)
1686                 goto out_free_lp;
1687         lpni = lnet_peer_ni_alloc(&nid);
1688         if (!lpni)
1689                 goto out_free_lpn;
1690
1691         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1692
1693 out_free_lpn:
1694         LIBCFS_FREE(lpn, sizeof(*lpn));
1695 out_free_lp:
1696         LIBCFS_FREE(lp, sizeof(*lp));
1697 out:
1698         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1699                libcfs_nid2str(nid4), flags, rc);
1700         return rc;
1701 }
1702
1703 /*
1704  * Add a NID to a peer. Call with ln_api_mutex held.
1705  *
1706  * Error codes:
1707  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1708  *  -EEXIST:   The NID was configured by DLC for a different peer.
1709  *  -ENOMEM:   Out of memory.
1710  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1711  *             non-multi-rail peer.
1712  */
1713 static int
1714 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
1715 {
1716         struct lnet_peer_net *lpn;
1717         struct lnet_peer_ni *lpni;
1718         struct lnet_nid nid;
1719         int rc = 0;
1720
1721         LASSERT(lp);
1722         LASSERT(nid4 != LNET_NID_ANY);
1723
1724         lnet_nid4_to_nid(nid4, &nid);
1725
1726         /* A configured peer can only be updated through configuration. */
1727         if (!(flags & LNET_PEER_CONFIGURED)) {
1728                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1729                         rc = -EPERM;
1730                         goto out;
1731                 }
1732         }
1733
1734         /*
1735          * The MULTI_RAIL flag can be set but not cleared, because
1736          * that would leave the peer struct in an invalid state.
1737          */
1738         if (flags & LNET_PEER_MULTI_RAIL) {
1739                 spin_lock(&lp->lp_lock);
1740                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1741                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1742                         lnet_peer_clr_non_mr_pref_nids(lp);
1743                 }
1744                 spin_unlock(&lp->lp_lock);
1745         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1746                 rc = -EPERM;
1747                 goto out;
1748         }
1749
1750         lpni = lnet_find_peer_ni_locked(nid4);
1751         if (lpni) {
1752                 /*
1753                  * A peer_ni already exists. This is only a problem if
1754                  * it is not connected to this peer and was configured
1755                  * by DLC.
1756                  */
1757                 if (lpni->lpni_peer_net->lpn_peer == lp)
1758                         goto out_free_lpni;
1759                 if (lnet_peer_ni_is_configured(lpni)) {
1760                         rc = -EEXIST;
1761                         goto out_free_lpni;
1762                 }
1763                 /* If this is the primary NID, destroy the peer. */
1764                 if (lnet_peer_ni_is_primary(lpni)) {
1765                         struct lnet_peer *rtr_lp =
1766                                 lpni->lpni_peer_net->lpn_peer;
1767                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1768                         /*
1769                          * if we're trying to delete a router it means
1770                          * we're moving this peer NI to a new peer so must
1771                          * transfer router properties to the new peer
1772                          */
1773                         if (rtr_refcount > 0) {
1774                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1775                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1776                         }
1777                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1778                         lnet_peer_ni_decref_locked(lpni);
1779                         lpni = lnet_peer_ni_alloc(&nid);
1780                         if (!lpni) {
1781                                 rc = -ENOMEM;
1782                                 goto out_free_lpni;
1783                         }
1784                 }
1785         } else {
1786                 lpni = lnet_peer_ni_alloc(&nid);
1787                 if (!lpni) {
1788                         rc = -ENOMEM;
1789                         goto out_free_lpni;
1790                 }
1791         }
1792
1793         /*
1794          * Get the peer_net. Check that we're not adding a second
1795          * peer_ni on a peer_net of a non-multi-rail peer.
1796          */
1797         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid4));
1798         if (!lpn) {
1799                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid4));
1800                 if (!lpn) {
1801                         rc = -ENOMEM;
1802                         goto out_free_lpni;
1803                 }
1804         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1805                 rc = -ENOTUNIQ;
1806                 goto out_free_lpni;
1807         }
1808
1809         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1810
1811 out_free_lpni:
1812         lnet_peer_ni_decref_locked(lpni);
1813 out:
1814         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1815                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nid2str(nid4),
1816                flags, rc);
1817         return rc;
1818 }
1819
1820 /*
1821  * Update the primary NID of a peer, if possible.
1822  *
1823  * Call with the lnet_api_mutex held.
1824  */
1825 static int
1826 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid,
1827                           unsigned int flags)
1828 {
1829         struct lnet_nid old = lp->lp_primary_nid;
1830         int rc = 0;
1831
1832         if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid)
1833                 goto out;
1834
1835         lnet_nid4_to_nid(nid, &lp->lp_primary_nid);
1836
1837         rc = lnet_peer_add_nid(lp, nid, flags);
1838         if (rc) {
1839                 lp->lp_primary_nid = old;
1840                 goto out;
1841         }
1842 out:
1843         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1844                libcfs_nidstr(&old), libcfs_nid2str(nid), rc);
1845
1846         return rc;
1847 }
1848
1849 /*
1850  * lpni creation initiated due to traffic either sending or receiving.
1851  */
1852 static int
1853 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1854 {
1855         struct lnet_peer *lp;
1856         struct lnet_peer_net *lpn;
1857         struct lnet_peer_ni *lpni;
1858         unsigned flags = 0;
1859         int rc = 0;
1860
1861         if (LNET_NID_IS_ANY(nid)) {
1862                 rc = -EINVAL;
1863                 goto out;
1864         }
1865
1866         /* lnet_net_lock is not needed here because ln_api_lock is held */
1867         lpni = lnet_peer_ni_find_locked(nid);
1868         if (lpni) {
1869                 /*
1870                  * We must have raced with another thread. Since we
1871                  * know next to nothing about a peer_ni created by
1872                  * traffic, we just assume everything is ok and
1873                  * return.
1874                  */
1875                 lnet_peer_ni_decref_locked(lpni);
1876                 goto out;
1877         }
1878
1879         /* Create peer, peer_net, and peer_ni. */
1880         rc = -ENOMEM;
1881         lp = lnet_peer_alloc(nid);
1882         if (!lp)
1883                 goto out;
1884         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1885         if (!lpn)
1886                 goto out_free_lp;
1887         lpni = lnet_peer_ni_alloc(nid);
1888         if (!lpni)
1889                 goto out_free_lpn;
1890         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1891
1892         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1893
1894 out_free_lpn:
1895         LIBCFS_FREE(lpn, sizeof(*lpn));
1896 out_free_lp:
1897         LIBCFS_FREE(lp, sizeof(*lp));
1898 out:
1899         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1900         return rc;
1901 }
1902
1903 /*
1904  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1905  *
1906  * This API handles the following combinations:
1907  *   Create a peer with its primary NI if only the prim_nid is provided
1908  *   Add a NID to a peer identified by the prim_nid. The peer identified
1909  *   by the prim_nid must already exist.
1910  *   The peer being created may be non-MR.
1911  *
1912  * The caller must hold ln_api_mutex. This prevents the peer from
1913  * being created/modified/deleted by a different thread.
1914  */
1915 int
1916 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr, bool temp)
1917 {
1918         struct lnet_peer *lp = NULL;
1919         struct lnet_peer_ni *lpni;
1920         unsigned int flags = 0;
1921
1922         /* The prim_nid must always be specified */
1923         if (prim_nid == LNET_NID_ANY)
1924                 return -EINVAL;
1925
1926         if (!temp)
1927                 flags = LNET_PEER_CONFIGURED;
1928
1929         if (mr)
1930                 flags |= LNET_PEER_MULTI_RAIL;
1931
1932         /*
1933          * If nid isn't specified, we must create a new peer with
1934          * prim_nid as its primary nid.
1935          */
1936         if (nid == LNET_NID_ANY)
1937                 return lnet_peer_add(prim_nid, flags);
1938
1939         /* Look up the prim_nid, which must exist. */
1940         lpni = lnet_find_peer_ni_locked(prim_nid);
1941         if (!lpni)
1942                 return -ENOENT;
1943         lnet_peer_ni_decref_locked(lpni);
1944         lp = lpni->lpni_peer_net->lpn_peer;
1945
1946         /* Peer must have been configured. */
1947         if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1948                 CDEBUG(D_NET, "peer %s was not configured\n",
1949                        libcfs_nid2str(prim_nid));
1950                 return -ENOENT;
1951         }
1952
1953         /* Primary NID must match */
1954         if (lnet_nid_to_nid4(&lp->lp_primary_nid) != prim_nid) {
1955                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1956                        libcfs_nid2str(prim_nid),
1957                        libcfs_nidstr(&lp->lp_primary_nid));
1958                 return -ENODEV;
1959         }
1960
1961         /* Multi-Rail flag must match. */
1962         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1963                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1964                        libcfs_nid2str(prim_nid));
1965                 return -EPERM;
1966         }
1967
1968         return lnet_peer_add_nid(lp, nid, flags);
1969 }
1970
1971 /*
1972  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1973  *
1974  * This API handles the following combinations:
1975  *   Delete a NI from a peer if both prim_nid and nid are provided.
1976  *   Delete a peer if only prim_nid is provided.
1977  *   Delete a peer if its primary nid is provided.
1978  *
1979  * The caller must hold ln_api_mutex. This prevents the peer from
1980  * being modified/deleted by a different thread.
1981  */
1982 int
1983 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1984 {
1985         struct lnet_peer *lp;
1986         struct lnet_peer_ni *lpni;
1987         unsigned flags;
1988
1989         if (prim_nid == LNET_NID_ANY)
1990                 return -EINVAL;
1991
1992         lpni = lnet_find_peer_ni_locked(prim_nid);
1993         if (!lpni)
1994                 return -ENOENT;
1995         lnet_peer_ni_decref_locked(lpni);
1996         lp = lpni->lpni_peer_net->lpn_peer;
1997
1998         if (prim_nid != lnet_nid_to_nid4(&lp->lp_primary_nid)) {
1999                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2000                        libcfs_nid2str(prim_nid),
2001                        libcfs_nidstr(&lp->lp_primary_nid));
2002                 return -ENODEV;
2003         }
2004
2005         lnet_net_lock(LNET_LOCK_EX);
2006         if (lp->lp_rtr_refcount > 0) {
2007                 lnet_net_unlock(LNET_LOCK_EX);
2008                 CERROR("%s is a router. Can not be deleted\n",
2009                        libcfs_nid2str(prim_nid));
2010                 return -EBUSY;
2011         }
2012         lnet_net_unlock(LNET_LOCK_EX);
2013
2014         if (nid == LNET_NID_ANY || nid == lnet_nid_to_nid4(&lp->lp_primary_nid))
2015                 return lnet_peer_del(lp);
2016
2017         flags = LNET_PEER_CONFIGURED;
2018         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2019                 flags |= LNET_PEER_MULTI_RAIL;
2020
2021         return lnet_peer_del_nid(lp, nid, flags);
2022 }
2023
2024 void
2025 lnet_destroy_peer_ni_locked(struct kref *ref)
2026 {
2027         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2028                                                  lpni_kref);
2029         struct lnet_peer_table *ptable;
2030         struct lnet_peer_net *lpn;
2031
2032         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2033
2034         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2035         LASSERT(list_empty(&lpni->lpni_txq));
2036         LASSERT(lpni->lpni_txqnob == 0);
2037         LASSERT(list_empty(&lpni->lpni_peer_nis));
2038         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2039
2040         lpn = lpni->lpni_peer_net;
2041         lpni->lpni_peer_net = NULL;
2042         lpni->lpni_net = NULL;
2043
2044         if (!list_empty(&lpni->lpni_hashlist)) {
2045                 /* remove the peer ni from the zombie list */
2046                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2047                 spin_lock(&ptable->pt_zombie_lock);
2048                 list_del_init(&lpni->lpni_hashlist);
2049                 ptable->pt_zombies--;
2050                 spin_unlock(&ptable->pt_zombie_lock);
2051         }
2052
2053         if (lpni->lpni_pref_nnids > 1) {
2054                 struct lnet_nid_list *ne, *tmp;
2055
2056                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2057                                          nl_list) {
2058                         list_del_init(&ne->nl_list);
2059                         LIBCFS_FREE(ne, sizeof(*ne));
2060                 }
2061         }
2062         LIBCFS_FREE(lpni, sizeof(*lpni));
2063
2064         if (lpn)
2065                 lnet_peer_net_decref_locked(lpn);
2066 }
2067
2068 struct lnet_peer_ni *
2069 lnet_nid2peerni_ex(struct lnet_nid *nid, int cpt)
2070 {
2071         struct lnet_peer_ni *lpni = NULL;
2072         int rc;
2073
2074         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2075                 return ERR_PTR(-ESHUTDOWN);
2076
2077         /*
2078          * find if a peer_ni already exists.
2079          * If so then just return that.
2080          */
2081         lpni = lnet_peer_ni_find_locked(nid);
2082         if (lpni)
2083                 return lpni;
2084
2085         lnet_net_unlock(cpt);
2086
2087         rc = lnet_peer_ni_traffic_add(nid, NULL);
2088         if (rc) {
2089                 lpni = ERR_PTR(rc);
2090                 goto out_net_relock;
2091         }
2092
2093         lpni = lnet_peer_ni_find_locked(nid);
2094         LASSERT(lpni);
2095
2096 out_net_relock:
2097         lnet_net_lock(cpt);
2098
2099         return lpni;
2100 }
2101
2102 /*
2103  * Get a peer_ni for the given nid, create it if necessary. Takes a
2104  * hold on the peer_ni.
2105  */
2106 struct lnet_peer_ni *
2107 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2108                         struct lnet_nid *pref, int cpt)
2109 {
2110         struct lnet_peer_ni *lpni = NULL;
2111         int rc;
2112
2113         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2114                 return ERR_PTR(-ESHUTDOWN);
2115
2116         /*
2117          * find if a peer_ni already exists.
2118          * If so then just return that.
2119          */
2120         lpni = lnet_peer_ni_find_locked(nid);
2121         if (lpni)
2122                 return lpni;
2123
2124         /*
2125          * Slow path:
2126          * use the lnet_api_mutex to serialize the creation of the peer_ni
2127          * and the creation/deletion of the local ni/net. When a local ni is
2128          * created, if there exists a set of peer_nis on that network,
2129          * they need to be traversed and updated. When a local NI is
2130          * deleted, which could result in a network being deleted, then
2131          * all peer nis on that network need to be removed as well.
2132          *
2133          * Creation through traffic should also be serialized with
2134          * creation through DLC.
2135          */
2136         lnet_net_unlock(cpt);
2137         mutex_lock(&the_lnet.ln_api_mutex);
2138         /*
2139          * Shutdown is only set under the ln_api_lock, so a single
2140          * check here is sufficent.
2141          */
2142         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2143                 lpni = ERR_PTR(-ESHUTDOWN);
2144                 goto out_mutex_unlock;
2145         }
2146
2147         rc = lnet_peer_ni_traffic_add(nid, pref);
2148         if (rc) {
2149                 lpni = ERR_PTR(rc);
2150                 goto out_mutex_unlock;
2151         }
2152
2153         lpni = lnet_peer_ni_find_locked(nid);
2154         LASSERT(lpni);
2155
2156 out_mutex_unlock:
2157         mutex_unlock(&the_lnet.ln_api_mutex);
2158         lnet_net_lock(cpt);
2159
2160         /* Lock has been dropped, check again for shutdown. */
2161         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2162                 if (!IS_ERR(lpni))
2163                         lnet_peer_ni_decref_locked(lpni);
2164                 lpni = ERR_PTR(-ESHUTDOWN);
2165         }
2166
2167         return lpni;
2168 }
2169
2170 struct lnet_peer_ni *
2171 lnet_nid2peerni_locked(lnet_nid_t nid4, lnet_nid_t pref4, int cpt)
2172 {
2173         struct lnet_nid nid, pref;
2174
2175         lnet_nid4_to_nid(nid4, &nid);
2176         lnet_nid4_to_nid(pref4, &pref);
2177         if (pref4 == LNET_NID_ANY)
2178                 return lnet_peerni_by_nid_locked(&nid, NULL, cpt);
2179         else
2180                 return lnet_peerni_by_nid_locked(&nid, &pref, cpt);
2181 }
2182
2183 bool
2184 lnet_peer_gw_discovery(struct lnet_peer *lp)
2185 {
2186         bool rc = false;
2187
2188         spin_lock(&lp->lp_lock);
2189         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2190                 rc = true;
2191         spin_unlock(&lp->lp_lock);
2192
2193         return rc;
2194 }
2195
2196 bool
2197 lnet_peer_is_uptodate(struct lnet_peer *lp)
2198 {
2199         bool rc;
2200
2201         spin_lock(&lp->lp_lock);
2202         rc = lnet_peer_is_uptodate_locked(lp);
2203         spin_unlock(&lp->lp_lock);
2204         return rc;
2205 }
2206
2207 /*
2208  * Is a peer uptodate from the point of view of discovery?
2209  *
2210  * If it is currently being processed, obviously not.
2211  * A forced Ping or Push is also handled by the discovery thread.
2212  *
2213  * Otherwise look at whether the peer needs rediscovering.
2214  */
2215 bool
2216 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2217 __must_hold(&lp->lp_lock)
2218 {
2219         bool rc;
2220
2221         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2222                             LNET_PEER_FORCE_PING |
2223                             LNET_PEER_FORCE_PUSH)) {
2224                 rc = false;
2225         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2226                 rc = false;
2227         } else if (lnet_peer_needs_push(lp)) {
2228                 rc = false;
2229         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2230                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2231                         rc = true;
2232                 else
2233                         rc = false;
2234         } else {
2235                 rc = false;
2236         }
2237
2238         return rc;
2239 }
2240
2241 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2242 void
2243 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2244 {
2245         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2246          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2247          * when adding to the list and queuing the peer to ensure that we do not
2248          * strand any messages on the lp_dc_pendq. This scheme ensures the
2249          * message will be resent even if the peer is already being discovered.
2250          * Therefore we needn't check the return value of
2251          * lnet_peer_queue_for_discovery(lp).
2252          */
2253         lnet_net_lock(LNET_LOCK_EX);
2254         spin_lock(&lp->lp_lock);
2255         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2256         spin_unlock(&lp->lp_lock);
2257         lnet_peer_queue_for_discovery(lp);
2258         lnet_net_unlock(LNET_LOCK_EX);
2259 }
2260
2261 /*
2262  * Queue a peer for the attention of the discovery thread.  Call with
2263  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2264  * -EALREADY if the peer was already queued.
2265  */
2266 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2267 {
2268         int rc;
2269
2270         spin_lock(&lp->lp_lock);
2271         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2272                 lp->lp_state |= LNET_PEER_DISCOVERING;
2273         spin_unlock(&lp->lp_lock);
2274         if (list_empty(&lp->lp_dc_list)) {
2275                 lnet_peer_addref_locked(lp);
2276                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2277                 wake_up(&the_lnet.ln_dc_waitq);
2278                 rc = 0;
2279         } else {
2280                 rc = -EALREADY;
2281         }
2282
2283         CDEBUG(D_NET, "Queue peer %s: %d\n",
2284                libcfs_nidstr(&lp->lp_primary_nid), rc);
2285
2286         return rc;
2287 }
2288
2289 /*
2290  * Discovery of a peer is complete. Wake all waiters on the peer.
2291  * Call with lnet_net_lock/EX held.
2292  */
2293 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2294 {
2295         struct lnet_msg *msg, *tmp;
2296         int rc = 0;
2297         LIST_HEAD(pending_msgs);
2298
2299         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2300                libcfs_nidstr(&lp->lp_primary_nid));
2301
2302         list_del_init(&lp->lp_dc_list);
2303         spin_lock(&lp->lp_lock);
2304         if (dc_error) {
2305                 lp->lp_dc_error = dc_error;
2306                 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2307                 lp->lp_state |= LNET_PEER_REDISCOVER;
2308         }
2309         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2310         spin_unlock(&lp->lp_lock);
2311         wake_up(&lp->lp_dc_waitq);
2312
2313         if (lp->lp_rtr_refcount > 0)
2314                 lnet_router_discovery_complete(lp);
2315
2316         lnet_net_unlock(LNET_LOCK_EX);
2317
2318         /* iterate through all pending messages and send them again */
2319         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2320                 list_del_init(&msg->msg_list);
2321                 if (dc_error) {
2322                         lnet_finalize(msg, dc_error);
2323                         continue;
2324                 }
2325
2326                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2327                        lnet_msgtyp2str(msg->msg_type),
2328                        libcfs_idstr(&msg->msg_target));
2329                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2330                                &msg->msg_rtr_nid_param);
2331                 if (rc < 0) {
2332                         CNETERR("Error sending %s to %s: %d\n",
2333                                lnet_msgtyp2str(msg->msg_type),
2334                                libcfs_idstr(&msg->msg_target), rc);
2335                         lnet_finalize(msg, rc);
2336                 }
2337         }
2338         lnet_net_lock(LNET_LOCK_EX);
2339         lnet_peer_decref_locked(lp);
2340 }
2341
2342 /*
2343  * Handle inbound push.
2344  * Like any event handler, called with lnet_res_lock/CPT held.
2345  */
2346 void lnet_peer_push_event(struct lnet_event *ev)
2347 {
2348         struct lnet_ping_buffer *pbuf;
2349         struct lnet_peer *lp;
2350
2351         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2352
2353         /* lnet_find_peer() adds a refcount */
2354         lp = lnet_find_peer(&ev->source.nid);
2355         if (!lp) {
2356                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2357                        libcfs_nidstr(&ev->initiator.nid),
2358                        libcfs_nidstr(&ev->source.nid));
2359                 pbuf->pb_needs_post = true;
2360                 return;
2361         }
2362
2363         /* Ensure peer state remains consistent while we modify it. */
2364         spin_lock(&lp->lp_lock);
2365
2366         /*
2367          * If some kind of error happened the contents of the message
2368          * cannot be used. Clear the NIDS_UPTODATE and set the
2369          * FORCE_PING flag to trigger a ping.
2370          */
2371         if (ev->status) {
2372                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2373                 lp->lp_state |= LNET_PEER_FORCE_PING;
2374                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2375                        ev->status,
2376                        libcfs_nidstr(&lp->lp_primary_nid),
2377                        libcfs_nidstr(&ev->source.nid));
2378                 goto out;
2379         }
2380
2381         /*
2382          * A push with invalid or corrupted info. Clear the UPTODATE
2383          * flag to trigger a ping.
2384          */
2385         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2386                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2387                 lp->lp_state |= LNET_PEER_FORCE_PING;
2388                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2389                        libcfs_nidstr(&lp->lp_primary_nid));
2390                 goto out;
2391         }
2392
2393         /*
2394          * Make sure we'll allocate the correct size ping buffer when
2395          * pinging the peer.
2396          */
2397         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2398                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2399
2400         /*
2401          * A non-Multi-Rail peer is not supposed to be capable of
2402          * sending a push.
2403          */
2404         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2405                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2406                        libcfs_nidstr(&lp->lp_primary_nid));
2407                 goto out;
2408         }
2409
2410         /*
2411          * The peer may have discovery disabled at its end. Set
2412          * NO_DISCOVERY as appropriate.
2413          */
2414         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2415                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2416                        libcfs_nidstr(&lp->lp_primary_nid));
2417                 /*
2418                  * Mark the peer for deletion if we already know about it
2419                  * and it's going from discovery set to no discovery set
2420                  */
2421                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2422                                       LNET_PEER_DISCOVERING)) &&
2423                      lp->lp_state & LNET_PEER_DISCOVERED) {
2424                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2425                                libcfs_nidstr(&lp->lp_primary_nid),
2426                                lp->lp_state);
2427                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2428                 }
2429                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2430         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2431                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2432                        libcfs_nidstr(&lp->lp_primary_nid));
2433                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2434         }
2435
2436         /*
2437          * Update the MULTI_RAIL flag based on the push. If the peer
2438          * was configured with DLC then the setting should match what
2439          * DLC put in.
2440          * NB: We verified above that the MR feature bit is set in pi_features
2441          */
2442         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2443                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2444                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2445         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2446                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2447                       libcfs_nidstr(&lp->lp_primary_nid));
2448         } else if (lnet_peer_discovery_disabled) {
2449                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2450                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2451         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2452                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2453                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2454         } else {
2455                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2456                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2457                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2458                 lnet_peer_clr_non_mr_pref_nids(lp);
2459         }
2460
2461         /*
2462          * Check for truncation of the Put message. Clear the
2463          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2464          * and tell discovery to allocate a bigger buffer.
2465          */
2466         if (ev->mlength < ev->rlength) {
2467                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2468                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2469                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2470                 lp->lp_state |= LNET_PEER_FORCE_PING;
2471                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2472                        libcfs_nidstr(&lp->lp_primary_nid),
2473                        pbuf->pb_info.pi_nnis);
2474                 goto out;
2475         }
2476
2477         /* always assume new data */
2478         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2479         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2480
2481         /*
2482          * If there is data present that hasn't been processed yet,
2483          * we'll replace it if the Put contained newer data and it
2484          * fits. We're racing with a Ping or earlier Push in this
2485          * case.
2486          */
2487         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2488                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2489                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2490                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2491                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2492                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2493                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2494                               libcfs_nidstr(&lp->lp_primary_nid),
2495                               LNET_PING_BUFFER_SEQNO(pbuf),
2496                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2497                 }
2498                 goto out;
2499         }
2500
2501         /*
2502          * Allocate a buffer to copy the data. On a failure we drop
2503          * the Push and set FORCE_PING to force the discovery
2504          * thread to fix the problem by pinging the peer.
2505          */
2506         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2507         if (!lp->lp_data) {
2508                 lp->lp_state |= LNET_PEER_FORCE_PING;
2509                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2510                        libcfs_nidstr(&lp->lp_primary_nid),
2511                        LNET_PING_BUFFER_SEQNO(pbuf));
2512                 goto out;
2513         }
2514
2515         /* Success */
2516         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2517                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2518         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2519         CDEBUG(D_NET, "Received Push %s %u\n",
2520                libcfs_nidstr(&lp->lp_primary_nid),
2521                LNET_PING_BUFFER_SEQNO(pbuf));
2522
2523 out:
2524         /* We've processed this buffer. It can be reposted */
2525         pbuf->pb_needs_post = true;
2526
2527         /*
2528          * Queue the peer for discovery if not done, force it on the request
2529          * queue and wake the discovery thread if the peer was already queued,
2530          * because its status changed.
2531          */
2532         spin_unlock(&lp->lp_lock);
2533         lnet_net_lock(LNET_LOCK_EX);
2534         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2535                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2536                 wake_up(&the_lnet.ln_dc_waitq);
2537         }
2538         /* Drop refcount from lookup */
2539         lnet_peer_decref_locked(lp);
2540         lnet_net_unlock(LNET_LOCK_EX);
2541 }
2542
2543 /*
2544  * Clear the discovery error state, unless we're already discovering
2545  * this peer, in which case the error is current.
2546  */
2547 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2548 {
2549         spin_lock(&lp->lp_lock);
2550         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2551                 lp->lp_dc_error = 0;
2552         spin_unlock(&lp->lp_lock);
2553 }
2554
2555 /*
2556  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2557  * dropped/retaken within this function. An lnet_peer_ni is passed in
2558  * because discovery could tear down an lnet_peer.
2559  */
2560 int
2561 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2562 {
2563         DEFINE_WAIT(wait);
2564         struct lnet_peer *lp;
2565         int rc = 0;
2566         int count = 0;
2567
2568 again:
2569         lnet_net_unlock(cpt);
2570         lnet_net_lock(LNET_LOCK_EX);
2571         lp = lpni->lpni_peer_net->lpn_peer;
2572         lnet_peer_clear_discovery_error(lp);
2573
2574         /*
2575          * We're willing to be interrupted. The lpni can become a
2576          * zombie if we race with DLC, so we must check for that.
2577          */
2578         for (;;) {
2579                 /* Keep lp alive when the lnet_net_lock is unlocked */
2580                 lnet_peer_addref_locked(lp);
2581                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2582                 if (signal_pending(current))
2583                         break;
2584                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2585                         break;
2586                 /*
2587                  * Don't repeat discovery if discovery is disabled. This is
2588                  * done to ensure we can use discovery as a standard ping as
2589                  * well for backwards compatibility with routers which do not
2590                  * have discovery or have discovery disabled
2591                  */
2592                 if (lnet_is_discovery_disabled(lp) && count > 0)
2593                         break;
2594                 if (lp->lp_dc_error)
2595                         break;
2596                 if (lnet_peer_is_uptodate(lp))
2597                         break;
2598                 lnet_peer_queue_for_discovery(lp);
2599                 count++;
2600                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2601
2602                 /*
2603                  * If caller requested a non-blocking operation then
2604                  * return immediately. Once discovery is complete any
2605                  * pending messages that were stopped due to discovery
2606                  * will be transmitted.
2607                  */
2608                 if (!block)
2609                         break;
2610
2611                 lnet_net_unlock(LNET_LOCK_EX);
2612                 schedule();
2613                 finish_wait(&lp->lp_dc_waitq, &wait);
2614                 lnet_net_lock(LNET_LOCK_EX);
2615                 lnet_peer_decref_locked(lp);
2616                 /* Peer may have changed */
2617                 lp = lpni->lpni_peer_net->lpn_peer;
2618         }
2619         finish_wait(&lp->lp_dc_waitq, &wait);
2620
2621         lnet_net_unlock(LNET_LOCK_EX);
2622         lnet_net_lock(cpt);
2623         lnet_peer_decref_locked(lp);
2624         /*
2625          * The peer may have changed, so re-check and rediscover if that turns
2626          * out to have been the case. The reference count on lp ensured that
2627          * even if it was unlinked from lpni the memory could not be recycled.
2628          * Thus the check below is sufficient to determine whether the peer
2629          * changed. If the peer changed, then lp must not be dereferenced.
2630          */
2631         if (lp != lpni->lpni_peer_net->lpn_peer)
2632                 goto again;
2633
2634         if (signal_pending(current))
2635                 rc = -EINTR;
2636         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2637                 rc = -ESHUTDOWN;
2638         else if (lp->lp_dc_error)
2639                 rc = lp->lp_dc_error;
2640         else if (!block)
2641                 CDEBUG(D_NET, "non-blocking discovery\n");
2642         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2643                 goto again;
2644
2645         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2646                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2647                libcfs_nidstr(&lpni->lpni_nid), rc,
2648                (!block) ? "pending discovery" : "discovery complete");
2649
2650         return rc;
2651 }
2652
2653 /* Handle an incoming ack for a push. */
2654 static void
2655 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2656 {
2657         struct lnet_ping_buffer *pbuf;
2658
2659         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2660         spin_lock(&lp->lp_lock);
2661         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2662         lp->lp_push_error = ev->status;
2663         if (ev->status)
2664                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2665         else
2666                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2667         spin_unlock(&lp->lp_lock);
2668
2669         CDEBUG(D_NET, "peer %s ev->status %d\n",
2670                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2671 }
2672
2673 /* Handle a Reply message. This is the reply to a Ping message. */
2674 static void
2675 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2676 {
2677         struct lnet_ping_buffer *pbuf;
2678         int rc;
2679
2680         spin_lock(&lp->lp_lock);
2681
2682         lp->lp_disc_src_nid = ev->target.nid;
2683         lp->lp_disc_dst_nid = ev->source.nid;
2684
2685         /*
2686          * If some kind of error happened the contents of message
2687          * cannot be used. Set PING_FAILED to trigger a retry.
2688          */
2689         if (ev->status) {
2690                 lp->lp_state |= LNET_PEER_PING_FAILED;
2691                 lp->lp_ping_error = ev->status;
2692                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2693                        ev->status,
2694                        libcfs_nidstr(&lp->lp_primary_nid),
2695                        libcfs_nidstr(&ev->source.nid));
2696                 goto out;
2697         }
2698
2699         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2700         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2701                 lnet_swap_pinginfo(pbuf);
2702
2703         /*
2704          * A reply with invalid or corrupted info. Set PING_FAILED to
2705          * trigger a retry.
2706          */
2707         rc = lnet_ping_info_validate(&pbuf->pb_info);
2708         if (rc) {
2709                 lp->lp_state |= LNET_PEER_PING_FAILED;
2710                 lp->lp_ping_error = 0;
2711                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2712                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2713                 goto out;
2714         }
2715
2716         /*
2717          * The peer may have discovery disabled at its end. Set
2718          * NO_DISCOVERY as appropriate.
2719          */
2720         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2721             lnet_peer_discovery_disabled) {
2722                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2723                        libcfs_nidstr(&lp->lp_primary_nid));
2724
2725                 /* Detect whether this peer has toggled discovery from on to
2726                  * off and whether we can delete and re-create the peer. Peers
2727                  * that were manually configured cannot be deleted by discovery.
2728                  * We need to delete this peer and re-create it if the peer was
2729                  * not configured manually, is currently considered DD capable,
2730                  * and either:
2731                  * 1. We've already discovered the peer (the peer has toggled
2732                  *    the discovery feature from on to off), or
2733                  * 2. The peer is considered MR, but it was not user configured
2734                  *    (this was a "temporary" peer created via the kernel APIs
2735                  *     that we're discovering for the first time)
2736                  */
2737                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2738                                       LNET_PEER_NO_DISCOVERY)) &&
2739                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2740                                      LNET_PEER_MULTI_RAIL))) {
2741                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2742                                libcfs_nidstr(&lp->lp_primary_nid),
2743                                lp->lp_state);
2744                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2745                 }
2746                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2747         } else {
2748                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2749                        libcfs_nidstr(&lp->lp_primary_nid));
2750                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2751         }
2752
2753         /*
2754          * Update the MULTI_RAIL flag based on the reply. If the peer
2755          * was configured with DLC then the setting should match what
2756          * DLC put in.
2757          */
2758         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2759                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2760                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2761                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2762                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2763                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2764                               libcfs_nidstr(&lp->lp_primary_nid));
2765                 } else if (lnet_peer_discovery_disabled) {
2766                         CDEBUG(D_NET,
2767                                "peer %s(%p) not MR: DD disabled locally\n",
2768                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2769                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2770                         CDEBUG(D_NET,
2771                                "peer %s(%p) not MR: DD disabled remotely\n",
2772                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2773                 } else {
2774                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2775                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2776                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2777                         lnet_peer_clr_non_mr_pref_nids(lp);
2778                 }
2779         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2780                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2781                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2782                               libcfs_nidstr(&lp->lp_primary_nid));
2783                 } else {
2784                         CERROR("Multi-Rail state vanished from %s\n",
2785                                libcfs_nidstr(&lp->lp_primary_nid));
2786                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2787                 }
2788         }
2789
2790         /*
2791          * Make sure we'll allocate the correct size ping buffer when
2792          * pinging the peer.
2793          */
2794         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2795                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2796
2797         /*
2798          * Check for truncation of the Reply. Clear PING_SENT and set
2799          * PING_FAILED to trigger a retry.
2800          */
2801         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2802                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2803                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2804                 lp->lp_state |= LNET_PEER_PING_FAILED;
2805                 lp->lp_ping_error = 0;
2806                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2807                        libcfs_nidstr(&lp->lp_primary_nid),
2808                        pbuf->pb_info.pi_nnis);
2809                 goto out;
2810         }
2811
2812         /*
2813          * Check the sequence numbers in the reply. These are only
2814          * available if the reply came from a Multi-Rail peer.
2815          */
2816         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2817             pbuf->pb_info.pi_nnis > 1 &&
2818             lnet_nid_to_nid4(&lp->lp_primary_nid) ==
2819             pbuf->pb_info.pi_ni[1].ns_nid) {
2820                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2821                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2822                                 libcfs_nidstr(&lp->lp_primary_nid),
2823                                 LNET_PING_BUFFER_SEQNO(pbuf),
2824                                 lp->lp_peer_seqno);
2825
2826                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2827         }
2828
2829         /* We're happy with the state of the data in the buffer. */
2830         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2831                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2832                lp->lp_state);
2833         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2834                 lnet_ping_buffer_decref(lp->lp_data);
2835         else
2836                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2837         lnet_ping_buffer_addref(pbuf);
2838         lp->lp_data = pbuf;
2839 out:
2840         lp->lp_state &= ~LNET_PEER_PING_SENT;
2841         spin_unlock(&lp->lp_lock);
2842
2843         lnet_net_lock(LNET_LOCK_EX);
2844         /*
2845          * If this peer is a gateway, call the routing callback to
2846          * handle the ping reply
2847          */
2848         if (lp->lp_rtr_refcount > 0)
2849                 lnet_router_discovery_ping_reply(lp);
2850         lnet_net_unlock(LNET_LOCK_EX);
2851 }
2852
2853 /*
2854  * Send event handling. Only matters for error cases, where we clean
2855  * up state on the peer and peer_ni that would otherwise be updated in
2856  * the REPLY event handler for a successful Ping, and the ACK event
2857  * handler for a successful Push.
2858  */
2859 static int
2860 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2861 {
2862         int rc = 0;
2863
2864         if (!ev->status)
2865                 goto out;
2866
2867         spin_lock(&lp->lp_lock);
2868         if (ev->msg_type == LNET_MSG_GET) {
2869                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2870                 lp->lp_state |= LNET_PEER_PING_FAILED;
2871                 lp->lp_ping_error = ev->status;
2872         } else { /* ev->msg_type == LNET_MSG_PUT */
2873                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2874                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2875                 lp->lp_push_error = ev->status;
2876         }
2877         spin_unlock(&lp->lp_lock);
2878         rc = LNET_REDISCOVER_PEER;
2879 out:
2880         CDEBUG(D_NET, "%s Send to %s: %d\n",
2881                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2882                 libcfs_nidstr(&ev->target.nid), rc);
2883         return rc;
2884 }
2885
2886 /*
2887  * Unlink event handling. This event is only seen if a call to
2888  * LNetMDUnlink() caused the event to be unlinked. If this call was
2889  * made after the event was set up in LNetGet() or LNetPut() then we
2890  * assume the Ping or Push timed out.
2891  */
2892 static void
2893 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2894 {
2895         spin_lock(&lp->lp_lock);
2896         /* We've passed through LNetGet() */
2897         if (lp->lp_state & LNET_PEER_PING_SENT) {
2898                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2899                 lp->lp_state |= LNET_PEER_PING_FAILED;
2900                 lp->lp_ping_error = -ETIMEDOUT;
2901                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2902                         libcfs_nidstr(&lp->lp_primary_nid));
2903         }
2904         /* We've passed through LNetPut() */
2905         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2906                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2907                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2908                 lp->lp_push_error = -ETIMEDOUT;
2909                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2910                         libcfs_nidstr(&lp->lp_primary_nid));
2911         }
2912         spin_unlock(&lp->lp_lock);
2913 }
2914
2915 /*
2916  * Event handler for the discovery EQ.
2917  *
2918  * Called with lnet_res_lock(cpt) held. The cpt is the
2919  * lnet_cpt_of_cookie() of the md handle cookie.
2920  */
2921 static void lnet_discovery_event_handler(struct lnet_event *event)
2922 {
2923         struct lnet_peer *lp = event->md_user_ptr;
2924         struct lnet_ping_buffer *pbuf;
2925         int rc;
2926
2927         /* discovery needs to take another look */
2928         rc = LNET_REDISCOVER_PEER;
2929
2930         CDEBUG(D_NET, "Received event: %d\n", event->type);
2931
2932         switch (event->type) {
2933         case LNET_EVENT_ACK:
2934                 lnet_discovery_event_ack(lp, event);
2935                 break;
2936         case LNET_EVENT_REPLY:
2937                 lnet_discovery_event_reply(lp, event);
2938                 break;
2939         case LNET_EVENT_SEND:
2940                 /* Only send failure triggers a retry. */
2941                 rc = lnet_discovery_event_send(lp, event);
2942                 break;
2943         case LNET_EVENT_UNLINK:
2944                 /* LNetMDUnlink() was called */
2945                 lnet_discovery_event_unlink(lp, event);
2946                 break;
2947         default:
2948                 /* Invalid events. */
2949                 LBUG();
2950         }
2951         lnet_net_lock(LNET_LOCK_EX);
2952         if (event->unlinked) {
2953                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2954                 lnet_ping_buffer_decref(pbuf);
2955                 lnet_peer_decref_locked(lp);
2956         }
2957
2958         /* put peer back at end of request queue, if discovery not already
2959          * done */
2960         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2961             lnet_peer_queue_for_discovery(lp)) {
2962                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2963                 wake_up(&the_lnet.ln_dc_waitq);
2964         }
2965         lnet_net_unlock(LNET_LOCK_EX);
2966 }
2967
2968 /*
2969  * Build a peer from incoming data.
2970  *
2971  * The NIDs in the incoming data are supposed to be structured as follows:
2972  *  - loopback
2973  *  - primary NID
2974  *  - other NIDs in same net
2975  *  - NIDs in second net
2976  *  - NIDs in third net
2977  *  - ...
2978  * This due to the way the list of NIDs in the data is created.
2979  *
2980  * Note that this function will mark the peer uptodate unless an
2981  * ENOMEM is encontered. All other errors are due to a conflict
2982  * between the DLC configuration and what discovery sees. We treat DLC
2983  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2984  * peer from becoming stuck in discovery.
2985  */
2986 static int lnet_peer_merge_data(struct lnet_peer *lp,
2987                                 struct lnet_ping_buffer *pbuf)
2988 {
2989         struct lnet_peer_net *lpn;
2990         struct lnet_peer_ni *lpni;
2991         lnet_nid_t *curnis = NULL;
2992         struct lnet_ni_status *addnis = NULL;
2993         lnet_nid_t *delnis = NULL;
2994         unsigned flags;
2995         int ncurnis;
2996         int naddnis;
2997         int ndelnis;
2998         int nnis = 0;
2999         int i;
3000         int j;
3001         int rc;
3002
3003         flags = LNET_PEER_DISCOVERED;
3004         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3005                 flags |= LNET_PEER_MULTI_RAIL;
3006
3007         /*
3008          * Cache the routing feature for the peer; whether it is enabled
3009          * for disabled as reported by the remote peer.
3010          */
3011         spin_lock(&lp->lp_lock);
3012         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3013                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3014         else
3015                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3016         spin_unlock(&lp->lp_lock);
3017
3018         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
3019         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3020         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3021         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3022         if (!curnis || !addnis || !delnis) {
3023                 rc = -ENOMEM;
3024                 goto out;
3025         }
3026         ncurnis = 0;
3027         naddnis = 0;
3028         ndelnis = 0;
3029
3030         /* Construct the list of NIDs present in peer. */
3031         lpni = NULL;
3032         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3033                 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
3034
3035         /*
3036          * Check for NIDs in pbuf not present in curnis[].
3037          * The loop starts at 1 to skip the loopback NID.
3038          */
3039         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
3040                 for (j = 0; j < ncurnis; j++)
3041                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
3042                                 break;
3043                 if (j == ncurnis)
3044                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
3045         }
3046         /*
3047          * Check for NIDs in curnis[] not present in pbuf.
3048          * The nested loop starts at 1 to skip the loopback NID.
3049          *
3050          * But never add the loopback NID to delnis[]: if it is
3051          * present in curnis[] then this peer is for this node.
3052          */
3053         for (i = 0; i < ncurnis; i++) {
3054                 if (curnis[i] == LNET_NID_LO_0)
3055                         continue;
3056                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
3057                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
3058                                 /*
3059                                  * update the information we cache for the
3060                                  * peer with the latest information we
3061                                  * received
3062                                  */
3063                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
3064                                 if (lpni) {
3065                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
3066                                         lnet_peer_ni_decref_locked(lpni);
3067                                 }
3068                                 break;
3069                         }
3070                 }
3071                 if (j == pbuf->pb_info.pi_nnis)
3072                         delnis[ndelnis++] = curnis[i];
3073         }
3074
3075         /*
3076          * If we get here and the discovery is disabled then we don't want
3077          * to add or delete any NIs. We just updated the ones we have some
3078          * information on, and call it a day
3079          */
3080         rc = 0;
3081         if (lnet_is_discovery_disabled(lp))
3082                 goto out;
3083
3084         for (i = 0; i < naddnis; i++) {
3085                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
3086                 if (rc) {
3087                         CERROR("Error adding NID %s to peer %s: %d\n",
3088                                libcfs_nid2str(addnis[i].ns_nid),
3089                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3090                         if (rc == -ENOMEM)
3091                                 goto out;
3092                 }
3093                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
3094                 if (lpni) {
3095                         lpni->lpni_ns_status = addnis[i].ns_status;
3096                         lnet_peer_ni_decref_locked(lpni);
3097                 }
3098         }
3099
3100         for (i = 0; i < ndelnis; i++) {
3101                 /*
3102                  * for routers it's okay to delete the primary_nid because
3103                  * the upper layers don't really rely on it. So if we're
3104                  * being told that the router changed its primary_nid
3105                  * then it's okay to delete it.
3106                  */
3107                 if (lp->lp_rtr_refcount > 0)
3108                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3109                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
3110                 if (rc) {
3111                         CERROR("Error deleting NID %s from peer %s: %d\n",
3112                                libcfs_nid2str(delnis[i]),
3113                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3114                         if (rc == -ENOMEM)
3115                                 goto out;
3116                 }
3117         }
3118
3119         /* The peer net for the primary NID should be the first entry in the
3120          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3121          * be the first entry in its peer net's lpn_peer_nis list.
3122          */
3123         lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
3124         if (!lpni) {
3125                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3126                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3127                 goto out;
3128         }
3129
3130         lnet_peer_ni_decref_locked(lpni);
3131
3132         lpn = lpni->lpni_peer_net;
3133         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3134                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3135
3136         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3137                 list_move(&lpni->lpni_peer_nis,
3138                           &lpni->lpni_peer_net->lpn_peer_nis);
3139
3140         /*
3141          * Errors other than -ENOMEM are due to peers having been
3142          * configured with DLC. Ignore these because DLC overrides
3143          * Discovery.
3144          */
3145         rc = 0;
3146 out:
3147         CFS_FREE_PTR_ARRAY(curnis, nnis);
3148         CFS_FREE_PTR_ARRAY(addnis, nnis);
3149         CFS_FREE_PTR_ARRAY(delnis, nnis);
3150         lnet_ping_buffer_decref(pbuf);
3151         CDEBUG(D_NET, "peer %s (%p): %d\n",
3152                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3153
3154         if (rc) {
3155                 spin_lock(&lp->lp_lock);
3156                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3157                 lp->lp_state |= LNET_PEER_FORCE_PING;
3158                 spin_unlock(&lp->lp_lock);
3159         }
3160         return rc;
3161 }
3162
3163 /*
3164  * The data in pbuf says lp is its primary peer, but the data was
3165  * received by a different peer. Try to update lp with the data.
3166  */
3167 static int
3168 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3169 {
3170         struct lnet_handle_md mdh;
3171
3172         /* Queue lp for discovery, and force it on the request queue. */
3173         lnet_net_lock(LNET_LOCK_EX);
3174         if (lnet_peer_queue_for_discovery(lp))
3175                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3176         lnet_net_unlock(LNET_LOCK_EX);
3177
3178         LNetInvalidateMDHandle(&mdh);
3179
3180         /*
3181          * Decide whether we can move the peer to the DATA_PRESENT state.
3182          *
3183          * We replace stale data for a multi-rail peer, repair PING_FAILED
3184          * status, and preempt FORCE_PING.
3185          *
3186          * If after that we have DATA_PRESENT, we merge it into this peer.
3187          */
3188         spin_lock(&lp->lp_lock);
3189         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3190                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3191                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3192                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3193                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3194                         lnet_ping_buffer_decref(pbuf);
3195                         pbuf = lp->lp_data;
3196                         lp->lp_data = NULL;
3197                 }
3198         }
3199         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3200                 lnet_ping_buffer_decref(lp->lp_data);
3201                 lp->lp_data = NULL;
3202                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3203         }
3204         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3205                 mdh = lp->lp_ping_mdh;
3206                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3207                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3208                 lp->lp_ping_error = 0;
3209         }
3210         if (lp->lp_state & LNET_PEER_FORCE_PING)
3211                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3212         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3213         spin_unlock(&lp->lp_lock);
3214
3215         if (!LNetMDHandleIsInvalid(mdh))
3216                 LNetMDUnlink(mdh);
3217
3218         if (pbuf)
3219                 return lnet_peer_merge_data(lp, pbuf);
3220
3221         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3222         return 0;
3223 }
3224
3225 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3226 {
3227         int i;
3228
3229         for (i = 0; i < pinfo->pi_nnis; i++) {
3230                 if (pinfo->pi_ni[i].ns_nid == nid)
3231                         return true;
3232         }
3233
3234         return false;
3235 }
3236
3237 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3238  * to the discovery queue a reference was taken that will prevent the peer from
3239  * actually being freed by this function. After this function exits the
3240  * discovery thread should call lnet_peer_discovery_complete() which will
3241  * drop that reference as well as wake any waiters that may also be holding a
3242  * ref on the peer
3243  */
3244 static int lnet_peer_deletion(struct lnet_peer *lp)
3245 __must_hold(&lp->lp_lock)
3246 {
3247         struct list_head rlist;
3248         struct lnet_route *route, *tmp;
3249         int sensitivity = lp->lp_health_sensitivity;
3250         int rc;
3251
3252         INIT_LIST_HEAD(&rlist);
3253
3254         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3255                           LNET_PEER_FORCE_PUSH);
3256         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3257                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3258
3259         /* no-op if lnet_peer_del() has already been called on this peer */
3260         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3261                 return 0;
3262
3263         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3264                 return -ESHUTDOWN;
3265
3266         spin_unlock(&lp->lp_lock);
3267
3268         mutex_lock(&the_lnet.ln_api_mutex);
3269
3270         lnet_net_lock(LNET_LOCK_EX);
3271         /* remove the peer from the discovery work
3272          * queue if it's on there in preparation
3273          * of deleting it.
3274          */
3275         if (!list_empty(&lp->lp_dc_list))
3276                 list_del_init(&lp->lp_dc_list);
3277         list_for_each_entry_safe(route, tmp,
3278                                  &lp->lp_routes,
3279                                  lr_gwlist)
3280                 lnet_move_route(route, NULL, &rlist);
3281         lnet_net_unlock(LNET_LOCK_EX);
3282
3283         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3284         rc = lnet_peer_del(lp);
3285         if (rc)
3286                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3287                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3288
3289         list_for_each_entry_safe(route, tmp,
3290                                  &rlist, lr_list) {
3291                 /* re-add these routes */
3292                 lnet_add_route(route->lr_net,
3293                                route->lr_hops,
3294                                &route->lr_nid,
3295                                route->lr_priority,
3296                                sensitivity);
3297                 LIBCFS_FREE(route, sizeof(*route));
3298         }
3299
3300         mutex_unlock(&the_lnet.ln_api_mutex);
3301
3302         spin_lock(&lp->lp_lock);
3303
3304         return 0;
3305 }
3306
3307 /*
3308  * Update a peer using the data received.
3309  */
3310 static int lnet_peer_data_present(struct lnet_peer *lp)
3311 __must_hold(&lp->lp_lock)
3312 {
3313         struct lnet_ping_buffer *pbuf;
3314         struct lnet_peer_ni *lpni;
3315         lnet_nid_t nid = LNET_NID_ANY;
3316         unsigned flags;
3317         int rc = 0;
3318
3319         pbuf = lp->lp_data;
3320         lp->lp_data = NULL;
3321         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3322         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3323         spin_unlock(&lp->lp_lock);
3324
3325         /*
3326          * Modifications of peer structures are done while holding the
3327          * ln_api_mutex. A global lock is required because we may be
3328          * modifying multiple peer structures, and a mutex greatly
3329          * simplifies memory management.
3330          *
3331          * The actual changes to the data structures must also protect
3332          * against concurrent lookups, for which the lnet_net_lock in
3333          * LNET_LOCK_EX mode is used.
3334          */
3335         mutex_lock(&the_lnet.ln_api_mutex);
3336         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3337                 rc = -ESHUTDOWN;
3338                 goto out;
3339         }
3340
3341         /*
3342          * If this peer is not on the peer list then it is being torn
3343          * down, and our reference count may be all that is keeping it
3344          * alive. Don't do any work on it.
3345          */
3346         if (list_empty(&lp->lp_peer_list))
3347                 goto out;
3348
3349         flags = LNET_PEER_DISCOVERED;
3350         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3351                 flags |= LNET_PEER_MULTI_RAIL;
3352
3353         /*
3354          * Check whether the primary NID in the message matches the
3355          * primary NID of the peer. If it does, update the peer, if
3356          * it it does not, check whether there is already a peer with
3357          * that primary NID. If no such peer exists, try to update
3358          * the primary NID of the current peer (allowed if it was
3359          * created due to message traffic) and complete the update.
3360          * If the peer did exist, hand off the data to it.
3361          *
3362          * The peer for the loopback interface is a special case: this
3363          * is the peer for the local node, and we want to set its
3364          * primary NID to the correct value here. Moreover, this peer
3365          * can show up with only the loopback NID in the ping buffer.
3366          */
3367         if (pbuf->pb_info.pi_nnis <= 1)
3368                 goto out;
3369         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3370         if (nid_is_lo0(&lp->lp_primary_nid)) {
3371                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3372                 if (!rc)
3373                         rc = lnet_peer_merge_data(lp, pbuf);
3374         /*
3375          * if the primary nid of the peer is present in the ping info returned
3376          * from the peer, but it's not the local primary peer we have
3377          * cached and discovery is disabled, then we don't want to update
3378          * our local peer info, by adding or removing NIDs, we just want
3379          * to update the status of the nids that we currently have
3380          * recorded in that peer.
3381          */
3382         } else if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid ||
3383                    (lnet_is_nid_in_ping_info(lnet_nid_to_nid4(&lp->lp_primary_nid),
3384                                              &pbuf->pb_info) &&
3385                     lnet_is_discovery_disabled(lp))) {
3386                 rc = lnet_peer_merge_data(lp, pbuf);
3387         } else {
3388                 lpni = lnet_find_peer_ni_locked(nid);
3389                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3390                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3391                         if (rc) {
3392                                 CERROR("Primary NID error %s versus %s: %d\n",
3393                                        libcfs_nidstr(&lp->lp_primary_nid),
3394                                        libcfs_nid2str(nid), rc);
3395                         } else {
3396                                 rc = lnet_peer_merge_data(lp, pbuf);
3397                         }
3398                         if (lpni)
3399                                 lnet_peer_ni_decref_locked(lpni);
3400                 } else {
3401                         struct lnet_peer *new_lp;
3402                         new_lp = lpni->lpni_peer_net->lpn_peer;
3403                         /*
3404                          * if lp has discovery/MR enabled that means new_lp
3405                          * should have discovery/MR enabled as well, since
3406                          * it's the same peer, which we're about to merge
3407                          */
3408                         spin_lock(&lp->lp_lock);
3409                         spin_lock(&new_lp->lp_lock);
3410                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3411                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3412                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3413                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3414                         /* If we're processing a ping reply then we may be
3415                          * about to send a push to the peer that we ping'd.
3416                          * Since the ping reply that we're processing was
3417                          * received by lp, we need to set the discovery source
3418                          * NID for new_lp to the NID stored in lp.
3419                          */
3420                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3421                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3422                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3423                         }
3424                         spin_unlock(&new_lp->lp_lock);
3425                         spin_unlock(&lp->lp_lock);
3426
3427                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3428                         lnet_consolidate_routes_locked(lp, new_lp);
3429                         lnet_peer_ni_decref_locked(lpni);
3430                 }
3431         }
3432 out:
3433         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3434                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3435                lp->lp_state);
3436         mutex_unlock(&the_lnet.ln_api_mutex);
3437
3438         spin_lock(&lp->lp_lock);
3439         /* Tell discovery to re-check the peer immediately. */
3440         if (!rc)
3441                 rc = LNET_REDISCOVER_PEER;
3442         return rc;
3443 }
3444
3445 /*
3446  * A ping failed. Clear the PING_FAILED state and set the
3447  * FORCE_PING state, to ensure a retry even if discovery is
3448  * disabled. This avoids being left with incorrect state.
3449  */
3450 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3451 __must_hold(&lp->lp_lock)
3452 {
3453         struct lnet_handle_md mdh;
3454         int rc;
3455
3456         mdh = lp->lp_ping_mdh;
3457         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3458         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3459         lp->lp_state |= LNET_PEER_FORCE_PING;
3460         rc = lp->lp_ping_error;
3461         lp->lp_ping_error = 0;
3462         spin_unlock(&lp->lp_lock);
3463
3464         if (!LNetMDHandleIsInvalid(mdh))
3465                 LNetMDUnlink(mdh);
3466
3467         CDEBUG(D_NET, "peer %s:%d\n",
3468                libcfs_nidstr(&lp->lp_primary_nid), rc);
3469
3470         spin_lock(&lp->lp_lock);
3471         return rc ? rc : LNET_REDISCOVER_PEER;
3472 }
3473
3474 /* Active side of ping. */
3475 static int lnet_peer_send_ping(struct lnet_peer *lp)
3476 __must_hold(&lp->lp_lock)
3477 {
3478         int nnis;
3479         int rc;
3480         int cpt;
3481
3482         lp->lp_state |= LNET_PEER_PING_SENT;
3483         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3484         spin_unlock(&lp->lp_lock);
3485
3486         cpt = lnet_net_lock_current();
3487         /* Refcount for MD. */
3488         lnet_peer_addref_locked(lp);
3489         lnet_net_unlock(cpt);
3490
3491         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3492
3493         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
3494                             the_lnet.ln_dc_handler, false);
3495
3496         /*
3497          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3498          * refcount on the peer, otherwise LNetMDUnlink will be called
3499          * which will eventually do that.
3500          */
3501         if (rc > 0) {
3502                 lnet_net_lock(cpt);
3503                 lnet_peer_decref_locked(lp);
3504                 lnet_net_unlock(cpt);
3505                 rc = -rc; /* change the rc to negative value */
3506                 goto fail_error;
3507         } else if (rc < 0) {
3508                 goto fail_error;
3509         }
3510
3511         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3512
3513         spin_lock(&lp->lp_lock);
3514         return 0;
3515
3516 fail_error:
3517         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3518         /*
3519          * The errors that get us here are considered hard errors and
3520          * cause Discovery to terminate. So we clear PING_SENT, but do
3521          * not set either PING_FAILED or FORCE_PING. In fact we need
3522          * to clear PING_FAILED, because the unlink event handler will
3523          * have set it if we called LNetMDUnlink() above.
3524          */
3525         spin_lock(&lp->lp_lock);
3526         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3527         return rc;
3528 }
3529
3530 /*
3531  * This function exists because you cannot call LNetMDUnlink() from an
3532  * event handler.
3533  */
3534 static int lnet_peer_push_failed(struct lnet_peer *lp)
3535 __must_hold(&lp->lp_lock)
3536 {
3537         struct lnet_handle_md mdh;
3538         int rc;
3539
3540         mdh = lp->lp_push_mdh;
3541         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3542         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3543         rc = lp->lp_push_error;
3544         lp->lp_push_error = 0;
3545         spin_unlock(&lp->lp_lock);
3546
3547         if (!LNetMDHandleIsInvalid(mdh))
3548                 LNetMDUnlink(mdh);
3549
3550         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3551         spin_lock(&lp->lp_lock);
3552         return rc ? rc : LNET_REDISCOVER_PEER;
3553 }
3554
3555 /*
3556  * Mark the peer as discovered.
3557  */
3558 static int lnet_peer_discovered(struct lnet_peer *lp)
3559 __must_hold(&lp->lp_lock)
3560 {
3561         lp->lp_state |= LNET_PEER_DISCOVERED;
3562         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3563                           LNET_PEER_REDISCOVER);
3564
3565         lp->lp_dc_error = 0;
3566
3567         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3568
3569         return 0;
3570 }
3571
3572 /* Active side of push. */
3573 static int lnet_peer_send_push(struct lnet_peer *lp)
3574 __must_hold(&lp->lp_lock)
3575 {
3576         struct lnet_ping_buffer *pbuf;
3577         struct lnet_process_id id;
3578         struct lnet_md md;
3579         int cpt;
3580         int rc;
3581
3582         /* Don't push to a non-multi-rail peer. */
3583         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3584                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3585                 /* if peer's NIDs are uptodate then peer is discovered */
3586                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3587                         rc = lnet_peer_discovered(lp);
3588                         return rc;
3589                 }
3590
3591                 return 0;
3592         }
3593
3594         lp->lp_state |= LNET_PEER_PUSH_SENT;
3595         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3596         spin_unlock(&lp->lp_lock);
3597
3598         cpt = lnet_net_lock_current();
3599         pbuf = the_lnet.ln_ping_target;
3600         lnet_ping_buffer_addref(pbuf);
3601         lnet_net_unlock(cpt);
3602
3603         /* Push source MD */
3604         md.start     = &pbuf->pb_info;
3605         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3606         md.threshold = 2; /* Put/Ack */
3607         md.max_size  = 0;
3608         md.options   = LNET_MD_TRACK_RESPONSE;
3609         md.handler   = the_lnet.ln_dc_handler;
3610         md.user_ptr  = lp;
3611
3612         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3613         if (rc) {
3614                 lnet_ping_buffer_decref(pbuf);
3615                 CERROR("Can't bind push source MD: %d\n", rc);
3616                 goto fail_error;
3617         }
3618
3619         cpt = lnet_net_lock_current();
3620         /* Refcount for MD. */
3621         lnet_peer_addref_locked(lp);
3622         id.pid = LNET_PID_LUSTRE;
3623         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3624                 id.nid = lnet_nid_to_nid4(&lp->lp_disc_dst_nid);
3625         else
3626                 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
3627         lnet_net_unlock(cpt);
3628
3629         rc = LNetPut(lnet_nid_to_nid4(&lp->lp_disc_src_nid), lp->lp_push_mdh,
3630                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3631                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3632
3633         /*
3634          * reset the discovery nid. There is no need to restrict sending
3635          * from that source, if we call lnet_push_update_to_peers(). It'll
3636          * get set to a specific NID, if we initiate discovery from the
3637          * scratch
3638          */
3639         lp->lp_disc_src_nid = LNET_ANY_NID;
3640         lp->lp_disc_dst_nid = LNET_ANY_NID;
3641
3642         if (rc)
3643                 goto fail_unlink;
3644
3645         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3646
3647         spin_lock(&lp->lp_lock);
3648         return 0;
3649
3650 fail_unlink:
3651         LNetMDUnlink(lp->lp_push_mdh);
3652         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3653 fail_error:
3654         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3655                lp, rc);
3656         /*
3657          * The errors that get us here are considered hard errors and
3658          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3659          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3660          * because the unlink event handler will have set it if we
3661          * called LNetMDUnlink() above.
3662          */
3663         spin_lock(&lp->lp_lock);
3664         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3665         return rc;
3666 }
3667
3668 /*
3669  * Wait for work to be queued or some other change that must be
3670  * attended to. Returns non-zero if the discovery thread should shut
3671  * down.
3672  */
3673 static int lnet_peer_discovery_wait_for_work(void)
3674 {
3675         int cpt;
3676         int rc = 0;
3677
3678         DEFINE_WAIT(wait);
3679
3680         cpt = lnet_net_lock_current();
3681         for (;;) {
3682                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3683                                 TASK_INTERRUPTIBLE);
3684                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3685                         break;
3686                 if (lnet_push_target_resize_needed() ||
3687                     the_lnet.ln_push_target->pb_needs_post)
3688                         break;
3689                 if (!list_empty(&the_lnet.ln_dc_request))
3690                         break;
3691                 if (!list_empty(&the_lnet.ln_msg_resend))
3692                         break;
3693                 lnet_net_unlock(cpt);
3694
3695                 /*
3696                  * wakeup max every second to check if there are peers that
3697                  * have been stuck on the working queue for greater than
3698                  * the peer timeout.
3699                  */
3700                 schedule_timeout(cfs_time_seconds(1));
3701                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3702                 cpt = lnet_net_lock_current();
3703         }
3704         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3705
3706         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3707                 rc = -ESHUTDOWN;
3708
3709         lnet_net_unlock(cpt);
3710
3711         CDEBUG(D_NET, "woken: %d\n", rc);
3712
3713         return rc;
3714 }
3715
3716 /*
3717  * Messages that were pending on a destroyed peer will be put on a global
3718  * resend list. The message resend list will be checked by
3719  * the discovery thread when it wakes up, and will resend messages. These
3720  * messages can still be sendable in the case the lpni which was the initial
3721  * cause of the message re-queue was transfered to another peer.
3722  *
3723  * It is possible that LNet could be shutdown while we're iterating
3724  * through the list. lnet_shudown_lndnets() will attempt to access the
3725  * resend list, but will have to wait until the spinlock is released, by
3726  * which time there shouldn't be any more messages on the resend list.
3727  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3728  * for the messages so they can be released. The other case is that
3729  * lnet_shudown_lndnets() can finalize all the messages before this
3730  * function can visit the resend list, in which case this function will be
3731  * a no-op.
3732  */
3733 static void lnet_resend_msgs(void)
3734 {
3735         struct lnet_msg *msg, *tmp;
3736         LIST_HEAD(resend);
3737         int rc;
3738
3739         spin_lock(&the_lnet.ln_msg_resend_lock);
3740         list_splice(&the_lnet.ln_msg_resend, &resend);
3741         spin_unlock(&the_lnet.ln_msg_resend_lock);
3742
3743         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3744                 list_del_init(&msg->msg_list);
3745                 rc = lnet_send(&msg->msg_src_nid_param, msg,
3746                                &msg->msg_rtr_nid_param);
3747                 if (rc < 0) {
3748                         CNETERR("Error sending %s to %s: %d\n",
3749                                lnet_msgtyp2str(msg->msg_type),
3750                                libcfs_idstr(&msg->msg_target), rc);
3751                         lnet_finalize(msg, rc);
3752                 }
3753         }
3754 }
3755
3756 /* The discovery thread. */
3757 static int lnet_peer_discovery(void *arg)
3758 {
3759         struct lnet_peer *lp;
3760         int rc;
3761
3762         wait_for_completion(&the_lnet.ln_started);
3763
3764         CDEBUG(D_NET, "started\n");
3765
3766         for (;;) {
3767                 if (lnet_peer_discovery_wait_for_work())
3768                         break;
3769
3770                 if (lnet_push_target_resize_needed())
3771                         lnet_push_target_resize();
3772                 else if (the_lnet.ln_push_target->pb_needs_post)
3773                         lnet_push_target_post(the_lnet.ln_push_target,
3774                                               &the_lnet.ln_push_target_md);
3775
3776                 lnet_resend_msgs();
3777
3778                 lnet_net_lock(LNET_LOCK_EX);
3779                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3780                         lnet_net_unlock(LNET_LOCK_EX);
3781                         break;
3782                 }
3783
3784                 /*
3785                  * Process all incoming discovery work requests.  When
3786                  * discovery must wait on a peer to change state, it
3787                  * is added to the tail of the ln_dc_working queue. A
3788                  * timestamp keeps track of when the peer was added,
3789                  * so we can time out discovery requests that take too
3790                  * long.
3791                  */
3792                 while (!list_empty(&the_lnet.ln_dc_request)) {
3793                         lp = list_first_entry(&the_lnet.ln_dc_request,
3794                                               struct lnet_peer, lp_dc_list);
3795                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3796                         /*
3797                          * set the time the peer was put on the dc_working
3798                          * queue. It shouldn't remain on the queue
3799                          * forever, in case the GET message (for ping)
3800                          * doesn't get a REPLY or the PUT message (for
3801                          * push) doesn't get an ACK.
3802                          */
3803                         lp->lp_last_queued = ktime_get_real_seconds();
3804                         lnet_net_unlock(LNET_LOCK_EX);
3805
3806                         if (lnet_push_target_resize_needed())
3807                                 lnet_push_target_resize();
3808                         else if (the_lnet.ln_push_target->pb_needs_post)
3809                                 lnet_push_target_post(the_lnet.ln_push_target,
3810                                                       &the_lnet.ln_push_target_md);
3811
3812                         /*
3813                          * Select an action depending on the state of
3814                          * the peer and whether discovery is disabled.
3815                          * The check whether discovery is disabled is
3816                          * done after the code that handles processing
3817                          * for arrived data, cleanup for failures, and
3818                          * forcing a Ping or Push.
3819                          */
3820                         spin_lock(&lp->lp_lock);
3821                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3822                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3823                                 lp->lp_state);
3824                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3825                                             LNET_PEER_MARK_DELETED))
3826                                 rc = lnet_peer_deletion(lp);
3827                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3828                                 rc = lnet_peer_data_present(lp);
3829                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3830                                 rc = lnet_peer_ping_failed(lp);
3831                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3832                                 rc = lnet_peer_push_failed(lp);
3833                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3834                                 rc = lnet_peer_send_ping(lp);
3835                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3836                                 rc = lnet_peer_send_push(lp);
3837                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3838                                 rc = lnet_peer_send_ping(lp);
3839                         else if (lnet_peer_needs_push(lp))
3840                                 rc = lnet_peer_send_push(lp);
3841                         else
3842                                 rc = lnet_peer_discovered(lp);
3843                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3844                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3845                                 lp->lp_state, rc);
3846
3847                         if (rc == LNET_REDISCOVER_PEER) {
3848                                 spin_unlock(&lp->lp_lock);
3849                                 lnet_net_lock(LNET_LOCK_EX);
3850                                 list_move(&lp->lp_dc_list,
3851                                           &the_lnet.ln_dc_request);
3852                         } else if (rc ||
3853                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
3854                                 spin_unlock(&lp->lp_lock);
3855                                 lnet_net_lock(LNET_LOCK_EX);
3856                                 lnet_peer_discovery_complete(lp, rc);
3857                         } else {
3858                                 spin_unlock(&lp->lp_lock);
3859                                 lnet_net_lock(LNET_LOCK_EX);
3860                         }
3861
3862                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3863                                 break;
3864
3865                 }
3866
3867                 lnet_net_unlock(LNET_LOCK_EX);
3868         }
3869
3870         CDEBUG(D_NET, "stopping\n");
3871         /*
3872          * Clean up before telling lnet_peer_discovery_stop() that
3873          * we're done. Use wake_up() below to somewhat reduce the
3874          * size of the thundering herd if there are multiple threads
3875          * waiting on discovery of a single peer.
3876          */
3877
3878         /* Queue cleanup 1: stop all pending pings and pushes. */
3879         lnet_net_lock(LNET_LOCK_EX);
3880         while (!list_empty(&the_lnet.ln_dc_working)) {
3881                 lp = list_first_entry(&the_lnet.ln_dc_working,
3882                                       struct lnet_peer, lp_dc_list);
3883                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3884                 lnet_net_unlock(LNET_LOCK_EX);
3885                 lnet_peer_cancel_discovery(lp);
3886                 lnet_net_lock(LNET_LOCK_EX);
3887         }
3888         lnet_net_unlock(LNET_LOCK_EX);
3889
3890         /* Queue cleanup 2: wait for the expired queue to clear. */
3891         while (!list_empty(&the_lnet.ln_dc_expired))
3892                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3893
3894         /* Queue cleanup 3: clear the request queue. */
3895         lnet_net_lock(LNET_LOCK_EX);
3896         while (!list_empty(&the_lnet.ln_dc_request)) {
3897                 lp = list_first_entry(&the_lnet.ln_dc_request,
3898                                       struct lnet_peer, lp_dc_list);
3899                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
3900         }
3901         lnet_net_unlock(LNET_LOCK_EX);
3902
3903         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3904         the_lnet.ln_dc_handler = NULL;
3905
3906         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3907         wake_up(&the_lnet.ln_dc_waitq);
3908
3909         CDEBUG(D_NET, "stopped\n");
3910
3911         return 0;
3912 }
3913
3914 /* ln_api_mutex is held on entry. */
3915 int lnet_peer_discovery_start(void)
3916 {
3917         struct task_struct *task;
3918         int rc = 0;
3919
3920         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3921                 return -EALREADY;
3922
3923         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3924         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3925         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3926         if (IS_ERR(task)) {
3927                 rc = PTR_ERR(task);
3928                 CERROR("Can't start peer discovery thread: %d\n", rc);
3929
3930                 the_lnet.ln_dc_handler = NULL;
3931
3932                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3933         }
3934
3935         CDEBUG(D_NET, "discovery start: %d\n", rc);
3936
3937         return rc;
3938 }
3939
3940 /* ln_api_mutex is held on entry. */
3941 void lnet_peer_discovery_stop(void)
3942 {
3943         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3944                 return;
3945
3946         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3947         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3948
3949         /* In the LNetNIInit() path we may be stopping discovery before it
3950          * entered its work loop
3951          */
3952         if (!completion_done(&the_lnet.ln_started))
3953                 complete(&the_lnet.ln_started);
3954         else
3955                 wake_up(&the_lnet.ln_dc_waitq);
3956
3957         wait_event(the_lnet.ln_dc_waitq,
3958                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3959
3960         LASSERT(list_empty(&the_lnet.ln_dc_request));
3961         LASSERT(list_empty(&the_lnet.ln_dc_working));
3962         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3963
3964         CDEBUG(D_NET, "discovery stopped\n");
3965 }
3966
3967 /* Debugging */
3968
3969 void
3970 lnet_debug_peer(lnet_nid_t nid)
3971 {
3972         char                    *aliveness = "NA";
3973         struct lnet_peer_ni     *lp;
3974         int                     cpt;
3975
3976         cpt = lnet_cpt_of_nid(nid, NULL);
3977         lnet_net_lock(cpt);
3978
3979         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3980         if (IS_ERR(lp)) {
3981                 lnet_net_unlock(cpt);
3982                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3983                 return;
3984         }
3985
3986         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3987                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3988
3989         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3990                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
3991                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3992                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3993                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3994
3995         lnet_peer_ni_decref_locked(lp);
3996
3997         lnet_net_unlock(cpt);
3998 }
3999
4000 /* Gathering information for userspace. */
4001
4002 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4003                           char aliveness[LNET_MAX_STR_LEN],
4004                           __u32 *cpt_iter, __u32 *refcount,
4005                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4006                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4007                           __u32 *peer_tx_qnob)
4008 {
4009         struct lnet_peer_table          *peer_table;
4010         struct lnet_peer_ni             *lp;
4011         int                             j;
4012         int                             lncpt;
4013         bool                            found = false;
4014
4015         /* get the number of CPTs */
4016         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4017
4018         /* if the cpt number to be examined is >= the number of cpts in
4019          * the system then indicate that there are no more cpts to examin
4020          */
4021         if (*cpt_iter >= lncpt)
4022                 return -ENOENT;
4023
4024         /* get the current table */
4025         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4026         /* if the ptable is NULL then there are no more cpts to examine */
4027         if (peer_table == NULL)
4028                 return -ENOENT;
4029
4030         lnet_net_lock(*cpt_iter);
4031
4032         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4033                 struct list_head *peers = &peer_table->pt_hash[j];
4034
4035                 list_for_each_entry(lp, peers, lpni_hashlist) {
4036                         if (!nid_is_nid4(&lp->lpni_nid))
4037                                 continue;
4038                         if (peer_index-- > 0)
4039                                 continue;
4040
4041                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4042                         if (lnet_isrouter(lp) ||
4043                                 lnet_peer_aliveness_enabled(lp))
4044                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4045                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4046
4047                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4048                         *refcount = kref_read(&lp->lpni_kref);
4049                         *ni_peer_tx_credits =
4050                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4051                         *peer_tx_credits = lp->lpni_txcredits;
4052                         *peer_rtr_credits = lp->lpni_rtrcredits;
4053                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4054                         *peer_tx_qnob = lp->lpni_txqnob;
4055
4056                         found = true;
4057                 }
4058
4059         }
4060         lnet_net_unlock(*cpt_iter);
4061
4062         *cpt_iter = lncpt;
4063
4064         return found ? 0 : -ENOENT;
4065 }
4066
4067 /* ln_api_mutex is held, which keeps the peer list stable */
4068 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4069 {
4070         struct lnet_ioctl_element_stats *lpni_stats;
4071         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4072         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4073         struct lnet_peer_ni_credit_info *lpni_info;
4074         struct lnet_peer_ni *lpni;
4075         struct lnet_peer *lp;
4076         lnet_nid_t nid;
4077         __u32 size;
4078         int rc;
4079
4080         lp = lnet_find_peer4(cfg->prcfg_prim_nid);
4081
4082         if (!lp) {
4083                 rc = -ENOENT;
4084                 goto out;
4085         }
4086
4087         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4088                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4089         size *= lp->lp_nnis;
4090         if (size > cfg->prcfg_size) {
4091                 cfg->prcfg_size = size;
4092                 rc = -E2BIG;
4093                 goto out_lp_decref;
4094         }
4095
4096         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4097         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4098         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4099         cfg->prcfg_count = lp->lp_nnis;
4100         cfg->prcfg_size = size;
4101         cfg->prcfg_state = lp->lp_state;
4102
4103         /* Allocate helper buffers. */
4104         rc = -ENOMEM;
4105         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4106         if (!lpni_info)
4107                 goto out_lp_decref;
4108         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4109         if (!lpni_stats)
4110                 goto out_free_info;
4111         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4112         if (!lpni_msg_stats)
4113                 goto out_free_stats;
4114         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4115         if (!lpni_hstats)
4116                 goto out_free_msg_stats;
4117
4118
4119         lpni = NULL;
4120         rc = -EFAULT;
4121         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4122                 if (!nid_is_nid4(&lpni->lpni_nid))
4123                         continue;
4124                 nid = lnet_nid_to_nid4(&lpni->lpni_nid);
4125                 if (copy_to_user(bulk, &nid, sizeof(nid)))
4126                         goto out_free_hstats;
4127                 bulk += sizeof(nid);
4128
4129                 memset(lpni_info, 0, sizeof(*lpni_info));
4130                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4131                 if (lnet_isrouter(lpni) ||
4132                         lnet_peer_aliveness_enabled(lpni))
4133                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4134                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4135
4136                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4137                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4138                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4139                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4140                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4141                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4142                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4143                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4144                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4145                         goto out_free_hstats;
4146                 bulk += sizeof(*lpni_info);
4147
4148                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4149                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4150                                                             LNET_STATS_TYPE_SEND);
4151                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4152                                                             LNET_STATS_TYPE_RECV);
4153                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4154                                                             LNET_STATS_TYPE_DROP);
4155                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4156                         goto out_free_hstats;
4157                 bulk += sizeof(*lpni_stats);
4158                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4159                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4160                         goto out_free_hstats;
4161                 bulk += sizeof(*lpni_msg_stats);
4162                 lpni_hstats->hlpni_network_timeout =
4163                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4164                 lpni_hstats->hlpni_remote_dropped =
4165                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4166                 lpni_hstats->hlpni_remote_timeout =
4167                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4168                 lpni_hstats->hlpni_remote_error =
4169                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4170                 lpni_hstats->hlpni_health_value =
4171                   atomic_read(&lpni->lpni_healthv);
4172                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4173                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4174                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4175                         goto out_free_hstats;
4176                 bulk += sizeof(*lpni_hstats);
4177         }
4178         rc = 0;
4179
4180 out_free_hstats:
4181         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4182 out_free_msg_stats:
4183         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4184 out_free_stats:
4185         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4186 out_free_info:
4187         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4188 out_lp_decref:
4189         lnet_peer_decref_locked(lp);
4190 out:
4191         return rc;
4192 }
4193
4194 /* must hold net_lock/0 */
4195 void
4196 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4197                                      struct list_head *recovery_queue,
4198                                      time64_t now)
4199 {
4200         /* the mt could've shutdown and cleaned up the queues */
4201         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4202                 return;
4203
4204         if (!list_empty(&lpni->lpni_recovery))
4205                 return;
4206
4207         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4208                 return;
4209
4210         if (!lpni->lpni_last_alive) {
4211                 CDEBUG(D_NET,
4212                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4213                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4214                        lpni->lpni_last_alive);
4215                 return;
4216         }
4217
4218         if (lnet_recovery_limit &&
4219             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4220                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4221                        libcfs_nidstr(&lpni->lpni_nid),
4222                        lpni->lpni_last_alive);
4223                 /* Reset the ping count so that if this peer NI is added back to
4224                  * the recovery queue we will send the first ping right away.
4225                  */
4226                 lpni->lpni_ping_count = 0;
4227                 return;
4228         }
4229
4230         /* This peer NI is going on the recovery queue, so take a ref on it */
4231         lnet_peer_ni_addref_locked(lpni);
4232
4233         lnet_peer_ni_set_next_ping(lpni, now);
4234
4235         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4236                libcfs_nidstr(&lpni->lpni_nid),
4237                lpni->lpni_ping_count,
4238                lpni->lpni_next_ping,
4239                lpni->lpni_last_alive,
4240                atomic_read(&lpni->lpni_healthv));
4241
4242         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4243 }
4244
4245 /* Call with the ln_api_mutex held */
4246 void
4247 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4248 {
4249         struct lnet_peer_table *ptable;
4250         struct lnet_peer *lp;
4251         struct lnet_peer_net *lpn;
4252         struct lnet_peer_ni *lpni;
4253         int lncpt;
4254         int cpt;
4255         time64_t now;
4256
4257         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4258                 return;
4259
4260         now = ktime_get_seconds();
4261
4262         if (!all) {
4263                 lnet_net_lock(LNET_LOCK_EX);
4264                 lpni = lnet_find_peer_ni_locked(nid);
4265                 if (!lpni) {
4266                         lnet_net_unlock(LNET_LOCK_EX);
4267                         return;
4268                 }
4269                 lnet_set_lpni_healthv_locked(lpni, value);
4270                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4271                                              &the_lnet.ln_mt_peerNIRecovq, now);
4272                 lnet_peer_ni_decref_locked(lpni);
4273                 lnet_net_unlock(LNET_LOCK_EX);
4274                 return;
4275         }
4276
4277         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4278
4279         /*
4280          * Walk all the peers and reset the health value for each one to the
4281          * specified value.
4282          */
4283         lnet_net_lock(LNET_LOCK_EX);
4284         for (cpt = 0; cpt < lncpt; cpt++) {
4285                 ptable = the_lnet.ln_peer_tables[cpt];
4286                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4287                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4288                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4289                                                     lpni_peer_nis) {
4290                                         lnet_set_lpni_healthv_locked(lpni,
4291                                                                      value);
4292                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4293                                              &the_lnet.ln_mt_peerNIRecovq, now);
4294                                 }
4295                         }
4296                 }
4297         }
4298         lnet_net_unlock(LNET_LOCK_EX);
4299 }
4300