Whamcloud - gitweb
LU-16282 lnet: fix debug message in lnet_discovery_event_reply
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(struct lnet_nid *nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = *nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NID_NET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(struct lnet_nid *nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = *nid;
265         lp->lp_disc_src_nid = LNET_ANY_NID;
266         lp->lp_disc_dst_nid = LNET_ANY_NID;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid_is_lo0(nid))
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nidstr(&lp->lp_primary_nid),
382                 libcfs_nidstr(&lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nidstr(&lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
461
462         spin_lock(&peer->lp_lock);
463         peer->lp_state |= LNET_PEER_MARK_DELETED;
464         spin_unlock(&peer->lp_lock);
465
466         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467         while (lpni != NULL) {
468                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469                 rc = lnet_peer_ni_del_locked(lpni, false);
470                 if (rc != 0)
471                         rc2 = rc;
472                 lpni = lpni2;
473         }
474
475         return rc2;
476 }
477
478 /*
479  * Discovering this peer is taking too long. Cancel any Ping or Push
480  * that discovery is waiting on by unlinking the relevant MDs. The
481  * lnet_discovery_event_handler() will proceed from here and complete
482  * the cleanup.
483  */
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
485 {
486         struct lnet_handle_md ping_mdh;
487         struct lnet_handle_md push_mdh;
488
489         LNetInvalidateMDHandle(&ping_mdh);
490         LNetInvalidateMDHandle(&push_mdh);
491
492         spin_lock(&lp->lp_lock);
493         if (lp->lp_state & LNET_PEER_PING_SENT) {
494                 ping_mdh = lp->lp_ping_mdh;
495                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
496         }
497         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498                 push_mdh = lp->lp_push_mdh;
499                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
500         }
501         spin_unlock(&lp->lp_lock);
502
503         if (!LNetMDHandleIsInvalid(ping_mdh))
504                 LNetMDUnlink(ping_mdh);
505         if (!LNetMDHandleIsInvalid(push_mdh))
506                 LNetMDUnlink(push_mdh);
507 }
508
509 static int
510 lnet_peer_del(struct lnet_peer *peer)
511 {
512         int rc;
513
514         lnet_peer_cancel_discovery(peer);
515         lnet_net_lock(LNET_LOCK_EX);
516         rc = lnet_peer_del_locked(peer);
517         lnet_net_unlock(LNET_LOCK_EX);
518
519         return rc;
520 }
521
522 /*
523  * Delete a NID from a peer. Call with ln_api_mutex held.
524  *
525  * Error codes:
526  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
527  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
528  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
529  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
530  */
531 static int
532 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
533                   unsigned int flags)
534 {
535         struct lnet_peer_ni *lpni;
536         struct lnet_nid primary_nid = lp->lp_primary_nid;
537         int rc = 0;
538         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
539
540         if (!(flags & LNET_PEER_CONFIGURED)) {
541                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
542                         rc = -EPERM;
543                         goto out;
544                 }
545         }
546
547         lpni = lnet_peer_ni_find_locked(nid);
548         if (!lpni) {
549                 rc = -ENOENT;
550                 goto out;
551         }
552         lnet_peer_ni_decref_locked(lpni);
553         if (lp != lpni->lpni_peer_net->lpn_peer) {
554                 rc = -ECHILD;
555                 goto out;
556         }
557
558         /*
559          * This function only allows deletion of the primary NID if it
560          * is the only NID.
561          */
562         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
563                 rc = -EBUSY;
564                 goto out;
565         }
566
567         lnet_net_lock(LNET_LOCK_EX);
568
569         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
570                 struct lnet_peer_ni *lpni2;
571                 /* assign the next peer_ni to be the primary */
572                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
573                 LASSERT(lpni2);
574                 lp->lp_primary_nid = lpni2->lpni_nid;
575         }
576         rc = lnet_peer_ni_del_locked(lpni, force);
577
578         lnet_net_unlock(LNET_LOCK_EX);
579
580 out:
581         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
582                libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
583                flags, rc);
584
585         return rc;
586 }
587
588 static void
589 lnet_peer_table_cleanup_locked(struct lnet_net *net,
590                                struct lnet_peer_table *ptable)
591 {
592         int                      i;
593         struct lnet_peer_ni     *next;
594         struct lnet_peer_ni     *lpni;
595         struct lnet_peer        *peer;
596
597         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
598                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
599                                          lpni_hashlist) {
600                         if (net != NULL && net != lpni->lpni_net)
601                                 continue;
602
603                         peer = lpni->lpni_peer_net->lpn_peer;
604                         if (!nid_same(&peer->lp_primary_nid,
605                                        &lpni->lpni_nid)) {
606                                 lnet_peer_ni_del_locked(lpni, false);
607                                 continue;
608                         }
609                         /*
610                          * Removing the primary NID implies removing
611                          * the entire peer. Advance next beyond any
612                          * peer_ni that belongs to the same peer.
613                          */
614                         list_for_each_entry_from(next, &ptable->pt_hash[i],
615                                                  lpni_hashlist) {
616                                 if (next->lpni_peer_net->lpn_peer != peer)
617                                         break;
618                         }
619                         lnet_peer_del_locked(peer);
620                 }
621         }
622 }
623
624 static void
625 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
626 {
627         wait_var_event_warning(&ptable->pt_zombies,
628                                ptable->pt_zombies == 0,
629                                "Waiting for %d zombies on peer table\n",
630                                ptable->pt_zombies);
631 }
632
633 static void
634 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
635                                 struct lnet_peer_table *ptable)
636 {
637         struct lnet_peer_ni     *lp;
638         struct lnet_peer_ni     *tmp;
639         struct lnet_nid         gw_nid;
640         int                     i;
641
642         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
643                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
644                                          lpni_hashlist) {
645                         if (net != lp->lpni_net)
646                                 continue;
647
648                         if (!lnet_isrouter(lp))
649                                 continue;
650
651                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
652
653                         lnet_net_unlock(LNET_LOCK_EX);
654                         lnet_del_route(LNET_NET_ANY, &gw_nid);
655                         lnet_net_lock(LNET_LOCK_EX);
656                 }
657         }
658 }
659
660 void
661 lnet_peer_tables_cleanup(struct lnet_net *net)
662 {
663         int i;
664         struct lnet_peer_table *ptable;
665
666         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
667         /* If just deleting the peers for a NI, get rid of any routes these
668          * peers are gateways for. */
669         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
670                 lnet_net_lock(LNET_LOCK_EX);
671                 lnet_peer_table_del_rtrs_locked(net, ptable);
672                 lnet_net_unlock(LNET_LOCK_EX);
673         }
674
675         /* Start the cleanup process */
676         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
677                 lnet_net_lock(LNET_LOCK_EX);
678                 lnet_peer_table_cleanup_locked(net, ptable);
679                 lnet_net_unlock(LNET_LOCK_EX);
680         }
681
682         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
683                 lnet_peer_ni_finalize_wait(ptable);
684 }
685
686 static struct lnet_peer_ni *
687 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
688 {
689         struct list_head        *peers;
690         struct lnet_peer_ni     *lp;
691
692         if (the_lnet.ln_state != LNET_STATE_RUNNING)
693                 return NULL;
694
695         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
696         list_for_each_entry(lp, peers, lpni_hashlist) {
697                 if (nid_same(&lp->lpni_nid, nid)) {
698                         lnet_peer_ni_addref_locked(lp);
699                         return lp;
700                 }
701         }
702
703         return NULL;
704 }
705
706 struct lnet_peer_ni *
707 lnet_peer_ni_find_locked(struct lnet_nid *nid)
708 {
709         struct lnet_peer_ni *lpni;
710         struct lnet_peer_table *ptable;
711         int cpt;
712
713         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
714
715         ptable = the_lnet.ln_peer_tables[cpt];
716         lpni = lnet_get_peer_ni_locked(ptable, nid);
717
718         return lpni;
719 }
720
721 struct lnet_peer_ni *
722 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
723 {
724         struct lnet_peer_net *lpn;
725         struct lnet_peer_ni *lpni;
726
727         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
728         if (!lpn)
729                 return NULL;
730
731         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
732                 if (nid_same(&lpni->lpni_nid, nid))
733                         return lpni;
734         }
735
736         return NULL;
737 }
738
739 struct lnet_peer *
740 lnet_find_peer(struct lnet_nid *nid)
741 {
742         struct lnet_peer_ni *lpni;
743         struct lnet_peer *lp = NULL;
744         int cpt;
745
746         cpt = lnet_net_lock_current();
747         lpni = lnet_peer_ni_find_locked(nid);
748         if (lpni) {
749                 lp = lpni->lpni_peer_net->lpn_peer;
750                 lnet_peer_addref_locked(lp);
751                 lnet_peer_ni_decref_locked(lpni);
752         }
753         lnet_net_unlock(cpt);
754
755         return lp;
756 }
757
758 struct lnet_peer_net *
759 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
760 {
761         struct lnet_peer_net *net;
762
763         if (!prev_lpn_id) {
764                 /* no net id provided return the first net */
765                 net = list_first_entry_or_null(&lp->lp_peer_nets,
766                                                struct lnet_peer_net,
767                                                lpn_peer_nets);
768
769                 return net;
770         }
771
772         /* find the net after the one provided */
773         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
774                 if (net->lpn_net_id == prev_lpn_id) {
775                         /*
776                          * if we reached the end of the list loop to the
777                          * beginning.
778                          */
779                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
780                                 return list_first_entry_or_null(&lp->lp_peer_nets,
781                                                                 struct lnet_peer_net,
782                                                                 lpn_peer_nets);
783                         else
784                                 return list_next_entry(net, lpn_peer_nets);
785                 }
786         }
787
788         return NULL;
789 }
790
791 struct lnet_peer_ni *
792 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
793                              struct lnet_peer_net *peer_net,
794                              struct lnet_peer_ni *prev)
795 {
796         struct lnet_peer_ni *lpni;
797         struct lnet_peer_net *net = peer_net;
798
799         if (!prev) {
800                 if (!net) {
801                         if (list_empty(&peer->lp_peer_nets))
802                                 return NULL;
803
804                         net = list_first_entry(&peer->lp_peer_nets,
805                                                struct lnet_peer_net,
806                                                lpn_peer_nets);
807                 }
808                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
809                                         lpni_peer_nis);
810
811                 return lpni;
812         }
813
814         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
815                 /*
816                  * if you reached the end of the peer ni list and the peer
817                  * net is specified then there are no more peer nis in that
818                  * net.
819                  */
820                 if (net)
821                         return NULL;
822
823                 /*
824                  * we reached the end of this net ni list. move to the
825                  * next net
826                  */
827                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
828                     &peer->lp_peer_nets)
829                         /* no more nets and no more NIs. */
830                         return NULL;
831
832                 /* get the next net */
833                 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
834                                        struct lnet_peer_net,
835                                        lpn_peer_nets);
836                 /* get the ni on it */
837                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
838                                         lpni_peer_nis);
839
840                 return lpni;
841         }
842
843         /* there are more nis left */
844         lpni = list_first_entry(&prev->lpni_peer_nis,
845                                 struct lnet_peer_ni, lpni_peer_nis);
846
847         return lpni;
848 }
849
850 /* Call with the ln_api_mutex held */
851 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
852 {
853         struct lnet_process_id id;
854         struct lnet_peer_table *ptable;
855         struct lnet_peer *lp;
856         __u32 count = 0;
857         __u32 size = 0;
858         int lncpt;
859         int cpt;
860         __u32 i;
861         int rc;
862
863         rc = -ESHUTDOWN;
864         if (the_lnet.ln_state != LNET_STATE_RUNNING)
865                 goto done;
866
867         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
868
869         /*
870          * Count the number of peers, and return E2BIG if the buffer
871          * is too small. We'll also return the desired size.
872          */
873         rc = -E2BIG;
874         for (cpt = 0; cpt < lncpt; cpt++) {
875                 ptable = the_lnet.ln_peer_tables[cpt];
876                 count += ptable->pt_peers;
877         }
878         size = count * sizeof(*ids);
879         if (size > *sizep)
880                 goto done;
881
882         /*
883          * Walk the peer lists and copy out the primary nids.
884          * This is safe because the peer lists are only modified
885          * while the ln_api_mutex is held. So we don't need to
886          * hold the lnet_net_lock as well, and can therefore
887          * directly call copy_to_user().
888          */
889         rc = -EFAULT;
890         memset(&id, 0, sizeof(id));
891         id.pid = LNET_PID_LUSTRE;
892         i = 0;
893         for (cpt = 0; cpt < lncpt; cpt++) {
894                 ptable = the_lnet.ln_peer_tables[cpt];
895                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
896                         if (!nid_is_nid4(&lp->lp_primary_nid))
897                                 continue;
898                         if (i >= count)
899                                 goto done;
900                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
901                         if (copy_to_user(&ids[i], &id, sizeof(id)))
902                                 goto done;
903                         i++;
904                 }
905         }
906         rc = 0;
907 done:
908         *countp = count;
909         *sizep = size;
910         return rc;
911 }
912
913 /*
914  * Start pushes to peers that need to be updated for a configuration
915  * change on this node.
916  */
917 void
918 lnet_push_update_to_peers(int force)
919 {
920         struct lnet_peer_table *ptable;
921         struct lnet_peer *lp;
922         int lncpt;
923         int cpt;
924
925         lnet_net_lock(LNET_LOCK_EX);
926         if (lnet_peer_discovery_disabled)
927                 force = 0;
928         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
929         for (cpt = 0; cpt < lncpt; cpt++) {
930                 ptable = the_lnet.ln_peer_tables[cpt];
931                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
932                         if (force) {
933                                 spin_lock(&lp->lp_lock);
934                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
935                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
936                                 spin_unlock(&lp->lp_lock);
937                         }
938                         if (lnet_peer_needs_push(lp))
939                                 lnet_peer_queue_for_discovery(lp);
940                 }
941         }
942         lnet_net_unlock(LNET_LOCK_EX);
943         wake_up(&the_lnet.ln_dc_waitq);
944 }
945
946 /* find the NID in the preferred gateways for the remote peer
947  * return:
948  *      false: list is not empty and NID is not preferred
949  *      false: list is empty
950  *      true: nid is found in the list
951  */
952 bool
953 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
954                              struct lnet_nid *gw_nid)
955 {
956         struct lnet_nid_list *ne;
957
958         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
959                libcfs_nidstr(&lpni->lpni_nid),
960                list_empty(&lpni->lpni_rtr_pref_nids));
961
962         if (list_empty(&lpni->lpni_rtr_pref_nids))
963                 return false;
964
965         /* iterate through all the preferred NIDs and see if any of them
966          * matches the provided gw_nid
967          */
968         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
969                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
970                        libcfs_nidstr(&ne->nl_nid),
971                        libcfs_nidstr(gw_nid));
972                 if (nid_same(&ne->nl_nid, gw_nid))
973                         return true;
974         }
975
976         return false;
977 }
978
979 void
980 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
981 {
982         struct list_head zombies;
983         struct lnet_nid_list *ne;
984         struct lnet_nid_list *tmp;
985         int cpt = lpni->lpni_cpt;
986
987         INIT_LIST_HEAD(&zombies);
988
989         lnet_net_lock(cpt);
990         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
991         lnet_net_unlock(cpt);
992
993         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
994                 list_del(&ne->nl_list);
995                 LIBCFS_FREE(ne, sizeof(*ne));
996         }
997 }
998
999 int
1000 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1001                        struct lnet_nid *gw_nid)
1002 {
1003         int cpt = lpni->lpni_cpt;
1004         struct lnet_nid_list *ne = NULL;
1005
1006         /* This function is called with api_mutex held. When the api_mutex
1007          * is held the list can not be modified, as it is only modified as
1008          * a result of applying a UDSP and that happens under api_mutex
1009          * lock.
1010          */
1011         __must_hold(&the_lnet.ln_api_mutex);
1012
1013         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1014                 if (nid_same(&ne->nl_nid, gw_nid))
1015                         return -EEXIST;
1016         }
1017
1018         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1019         if (!ne)
1020                 return -ENOMEM;
1021
1022         ne->nl_nid = *gw_nid;
1023
1024         /* Lock the cpt to protect against addition and checks in the
1025          * selection algorithm
1026          */
1027         lnet_net_lock(cpt);
1028         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1029         lnet_net_unlock(cpt);
1030
1031         return 0;
1032 }
1033
1034 /*
1035  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1036  * this is a preferred point-to-point path. Call with lnet_net_lock in
1037  * shared mmode.
1038  */
1039 bool
1040 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1041 {
1042         struct lnet_nid_list *ne;
1043
1044         if (lpni->lpni_pref_nnids == 0)
1045                 return false;
1046         if (lpni->lpni_pref_nnids == 1)
1047                 return nid_same(&lpni->lpni_pref.nid, nid);
1048         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1049                 if (nid_same(&ne->nl_nid, nid))
1050                         return true;
1051         }
1052         return false;
1053 }
1054
1055 /*
1056  * Set a single ni as preferred, provided no preferred ni is already
1057  * defined. Only to be used for non-multi-rail peer_ni.
1058  */
1059 int
1060 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1061                                   struct lnet_nid *nid)
1062 {
1063         int rc = 0;
1064
1065         if (!nid)
1066                 return -EINVAL;
1067         spin_lock(&lpni->lpni_lock);
1068         if (LNET_NID_IS_ANY(nid)) {
1069                 rc = -EINVAL;
1070         } else if (lpni->lpni_pref_nnids > 0) {
1071                 rc = -EPERM;
1072         } else if (lpni->lpni_pref_nnids == 0) {
1073                 lpni->lpni_pref.nid = *nid;
1074                 lpni->lpni_pref_nnids = 1;
1075                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1076         }
1077         spin_unlock(&lpni->lpni_lock);
1078
1079         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1080                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1081         return rc;
1082 }
1083
1084 /*
1085  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1086  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1087  */
1088 int
1089 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1090 {
1091         int rc = 0;
1092
1093         spin_lock(&lpni->lpni_lock);
1094         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1095                 lpni->lpni_pref_nnids = 0;
1096                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1097         } else if (lpni->lpni_pref_nnids == 0) {
1098                 rc = -ENOENT;
1099         } else {
1100                 rc = -EPERM;
1101         }
1102         spin_unlock(&lpni->lpni_lock);
1103
1104         CDEBUG(D_NET, "peer %s: %d\n",
1105                libcfs_nidstr(&lpni->lpni_nid), rc);
1106         return rc;
1107 }
1108
1109 void
1110 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1111 {
1112         lpni->lpni_sel_priority = priority;
1113 }
1114
1115 /*
1116  * Clear the preferred NIDs from a non-multi-rail peer.
1117  */
1118 void
1119 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1120 {
1121         struct lnet_peer_ni *lpni = NULL;
1122
1123         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1124                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1125 }
1126
1127 int
1128 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1129 {
1130         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1131         struct lnet_nid_list *ne1 = NULL;
1132         struct lnet_nid_list *ne2 = NULL;
1133         struct lnet_nid *tmp_nid = NULL;
1134         int rc = 0;
1135
1136         if (LNET_NID_IS_ANY(nid)) {
1137                 rc = -EINVAL;
1138                 goto out;
1139         }
1140
1141         if (lpni->lpni_pref_nnids == 1 &&
1142             nid_same(&lpni->lpni_pref.nid, nid)) {
1143                 rc = -EEXIST;
1144                 goto out;
1145         }
1146
1147         /* A non-MR node may have only one preferred NI per peer_ni */
1148         if (lpni->lpni_pref_nnids > 0 &&
1149             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1150                 rc = -EPERM;
1151                 goto out;
1152         }
1153
1154         /* add the new preferred nid to the list of preferred nids */
1155         if (lpni->lpni_pref_nnids != 0) {
1156                 size_t alloc_size = sizeof(*ne1);
1157
1158                 if (lpni->lpni_pref_nnids == 1) {
1159                         tmp_nid = &lpni->lpni_pref.nid;
1160                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1161                 }
1162
1163                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1164                         if (nid_same(&ne1->nl_nid, nid)) {
1165                                 rc = -EEXIST;
1166                                 goto out;
1167                         }
1168                 }
1169
1170                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1171                                  alloc_size);
1172                 if (!ne1) {
1173                         rc = -ENOMEM;
1174                         goto out;
1175                 }
1176
1177                 /* move the originally stored nid to the list */
1178                 if (lpni->lpni_pref_nnids == 1) {
1179                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1180                                 lpni->lpni_cpt, alloc_size);
1181                         if (!ne2) {
1182                                 rc = -ENOMEM;
1183                                 goto out;
1184                         }
1185                         INIT_LIST_HEAD(&ne2->nl_list);
1186                         ne2->nl_nid = *tmp_nid;
1187                 }
1188                 ne1->nl_nid = *nid;
1189         }
1190
1191         lnet_net_lock(LNET_LOCK_EX);
1192         spin_lock(&lpni->lpni_lock);
1193         if (lpni->lpni_pref_nnids == 0) {
1194                 lpni->lpni_pref.nid = *nid;
1195         } else {
1196                 if (ne2)
1197                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1198                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1199         }
1200         lpni->lpni_pref_nnids++;
1201         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1202         spin_unlock(&lpni->lpni_lock);
1203         lnet_net_unlock(LNET_LOCK_EX);
1204
1205 out:
1206         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1207                 spin_lock(&lpni->lpni_lock);
1208                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1209                 spin_unlock(&lpni->lpni_lock);
1210         }
1211         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1212                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1213         return rc;
1214 }
1215
1216 int
1217 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1218 {
1219         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1220         struct lnet_nid_list *ne = NULL;
1221         int rc = 0;
1222
1223         if (lpni->lpni_pref_nnids == 0) {
1224                 rc = -ENOENT;
1225                 goto out;
1226         }
1227
1228         if (lpni->lpni_pref_nnids == 1) {
1229                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1230                         rc = -ENOENT;
1231                         goto out;
1232                 }
1233         } else {
1234                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1235                         if (nid_same(&ne->nl_nid, nid))
1236                                 goto remove_nid_entry;
1237                 }
1238                 rc = -ENOENT;
1239                 ne = NULL;
1240                 goto out;
1241         }
1242
1243 remove_nid_entry:
1244         lnet_net_lock(LNET_LOCK_EX);
1245         spin_lock(&lpni->lpni_lock);
1246         if (lpni->lpni_pref_nnids == 1)
1247                 lpni->lpni_pref.nid = LNET_ANY_NID;
1248         else {
1249                 list_del_init(&ne->nl_list);
1250                 if (lpni->lpni_pref_nnids == 2) {
1251                         struct lnet_nid_list *ne, *tmp;
1252
1253                         list_for_each_entry_safe(ne, tmp,
1254                                                  &lpni->lpni_pref.nids,
1255                                                  nl_list) {
1256                                 lpni->lpni_pref.nid = ne->nl_nid;
1257                                 list_del_init(&ne->nl_list);
1258                                 LIBCFS_FREE(ne, sizeof(*ne));
1259                         }
1260                 }
1261         }
1262         lpni->lpni_pref_nnids--;
1263         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1264         spin_unlock(&lpni->lpni_lock);
1265         lnet_net_unlock(LNET_LOCK_EX);
1266
1267         if (ne)
1268                 LIBCFS_FREE(ne, sizeof(*ne));
1269 out:
1270         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1271                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1272         return rc;
1273 }
1274
1275 void
1276 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1277 {
1278         struct list_head zombies;
1279         struct lnet_nid_list *ne;
1280         struct lnet_nid_list *tmp;
1281
1282         INIT_LIST_HEAD(&zombies);
1283
1284         lnet_net_lock(LNET_LOCK_EX);
1285         if (lpni->lpni_pref_nnids == 1)
1286                 lpni->lpni_pref.nid = LNET_ANY_NID;
1287         else if (lpni->lpni_pref_nnids > 1)
1288                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1289         lpni->lpni_pref_nnids = 0;
1290         lnet_net_unlock(LNET_LOCK_EX);
1291
1292         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1293                 list_del_init(&ne->nl_list);
1294                 LIBCFS_FREE(ne, sizeof(*ne));
1295         }
1296 }
1297
1298 void
1299 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1300 {
1301         struct lnet_peer_ni *lpni;
1302
1303         *result = *nid;
1304         lpni = lnet_peer_ni_find_locked(nid);
1305         if (lpni) {
1306                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1307                 lnet_peer_ni_decref_locked(lpni);
1308         }
1309 }
1310
1311 bool
1312 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1313 __must_hold(&lp->lp_lock)
1314 {
1315         if (lnet_peer_discovery_disabled)
1316                 return true;
1317
1318         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1319             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1320                 return true;
1321         }
1322
1323         return false;
1324 }
1325
1326 /*
1327  * Peer Discovery
1328  */
1329 bool
1330 lnet_is_discovery_disabled(struct lnet_peer *lp)
1331 {
1332         bool rc = false;
1333
1334         spin_lock(&lp->lp_lock);
1335         rc = lnet_is_discovery_disabled_locked(lp);
1336         spin_unlock(&lp->lp_lock);
1337
1338         return rc;
1339 }
1340
1341 int
1342 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1343 {
1344         struct lnet_nid pnid = LNET_ANY_NID;
1345         bool mr;
1346         int i, rc;
1347
1348         if (!nids || num_nids < 1)
1349                 return -EINVAL;
1350
1351         rc = LNetNIInit(LNET_PID_ANY);
1352         if (rc < 0)
1353                 return rc;
1354
1355         mutex_lock(&the_lnet.ln_api_mutex);
1356
1357         mr = lnet_peer_discovery_disabled == 0;
1358
1359         rc = 0;
1360         for (i = 0; i < num_nids; i++) {
1361                 struct lnet_nid nid;
1362
1363                 if (nids[i] == LNET_NID_LO_0)
1364                         continue;
1365
1366                 lnet_nid4_to_nid(nids[i], &nid);
1367                 if (LNET_NID_IS_ANY(&pnid)) {
1368                         lnet_nid4_to_nid(nids[i], &pnid);
1369                         rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, true);
1370                 } else if (lnet_peer_discovery_disabled) {
1371                         lnet_nid4_to_nid(nids[i], &nid);
1372                         rc = lnet_add_peer_ni(&nid, &LNET_ANY_NID, mr, true);
1373                 } else {
1374                         lnet_nid4_to_nid(nids[i], &nid);
1375                         rc = lnet_add_peer_ni(&pnid, &nid, mr, true);
1376                 }
1377
1378                 if (rc && rc != -EEXIST)
1379                         goto unlock;
1380         }
1381
1382 unlock:
1383         mutex_unlock(&the_lnet.ln_api_mutex);
1384
1385         LNetNIFini();
1386
1387         return rc == -EEXIST ? 0 : rc;
1388 }
1389 EXPORT_SYMBOL(LNetAddPeer);
1390
1391 void LNetPrimaryNID(struct lnet_nid *nid)
1392 {
1393         struct lnet_peer *lp;
1394         struct lnet_peer_ni *lpni;
1395         struct lnet_nid orig;
1396         int rc = 0;
1397         int cpt;
1398
1399         if (!nid || nid_is_lo0(nid))
1400                 return;
1401         orig = *nid;
1402
1403         cpt = lnet_net_lock_current();
1404         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1405         if (IS_ERR(lpni)) {
1406                 rc = PTR_ERR(lpni);
1407                 goto out_unlock;
1408         }
1409         lp = lpni->lpni_peer_net->lpn_peer;
1410
1411         /* If discovery is disabled locally then we needn't bother running
1412          * discovery here because discovery will not modify whatever
1413          * primary NID is currently set for this peer. If the specified peer is
1414          * down then this discovery can introduce long delays into the mount
1415          * process, so skip it if it isn't necessary.
1416          */
1417         while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1418                 spin_lock(&lp->lp_lock);
1419                 /* force a full discovery cycle */
1420                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1421                 spin_unlock(&lp->lp_lock);
1422
1423                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1424                 if (rc)
1425                         goto out_decref;
1426                 /* The lpni (or lp) for this NID may have changed and our ref is
1427                  * the only thing keeping the old one around. Release the ref
1428                  * and lookup the lpni again
1429                  */
1430                 lnet_peer_ni_decref_locked(lpni);
1431                 lpni = lnet_peer_ni_find_locked(nid);
1432                 if (!lpni) {
1433                         rc = -ENOENT;
1434                         goto out_unlock;
1435                 }
1436                 lp = lpni->lpni_peer_net->lpn_peer;
1437
1438                 /* If we find that the peer has discovery disabled then we will
1439                  * not modify whatever primary NID is currently set for this
1440                  * peer. Thus, we can break out of this loop even if the peer
1441                  * is not fully up to date.
1442                  */
1443                 if (lnet_is_discovery_disabled(lp))
1444                         break;
1445         }
1446         *nid = lp->lp_primary_nid;
1447 out_decref:
1448         lnet_peer_ni_decref_locked(lpni);
1449 out_unlock:
1450         lnet_net_unlock(cpt);
1451
1452         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1453                libcfs_nidstr(nid), rc);
1454 }
1455 EXPORT_SYMBOL(LNetPrimaryNID);
1456
1457 struct lnet_peer_net *
1458 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1459 {
1460         struct lnet_peer_net *peer_net;
1461         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1462                 if (peer_net->lpn_net_id == net_id)
1463                         return peer_net;
1464         }
1465         return NULL;
1466 }
1467
1468 /*
1469  * Attach a peer_ni to a peer_net and peer. This function assumes
1470  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1471  * may be attached to a different peer, in which case it will be
1472  * properly detached first. The whole operation is done atomically.
1473  *
1474  * This function consumes the reference on lpni and Always returns 0.
1475  * This is the last function called from functions that do return an
1476  * int, so returning 0 here allows the compiler to do a tail call.
1477  */
1478 static int
1479 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1480                                 struct lnet_peer_net *lpn,
1481                                 struct lnet_peer_ni *lpni,
1482                                 unsigned flags)
1483 {
1484         struct lnet_peer_table *ptable;
1485         bool new_lpn = false;
1486         int rc;
1487
1488         /* Install the new peer_ni */
1489         lnet_net_lock(LNET_LOCK_EX);
1490         /* Add peer_ni to global peer table hash, if necessary. */
1491         if (list_empty(&lpni->lpni_hashlist)) {
1492                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1493
1494                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1495                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1496                 ptable->pt_version++;
1497                 lnet_peer_ni_addref_locked(lpni);
1498         }
1499
1500         /* Detach the peer_ni from an existing peer, if necessary. */
1501         if (lpni->lpni_peer_net) {
1502                 LASSERT(lpni->lpni_peer_net != lpn);
1503                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1504                 lnet_peer_detach_peer_ni_locked(lpni);
1505                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1506                 lpni->lpni_peer_net = NULL;
1507         }
1508
1509         /* Add peer_ni to peer_net */
1510         lpni->lpni_peer_net = lpn;
1511         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1512                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1513         else
1514                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1515         lnet_update_peer_net_healthv(lpni);
1516         lnet_peer_net_addref_locked(lpn);
1517
1518         /* Add peer_net to peer */
1519         if (!lpn->lpn_peer) {
1520                 new_lpn = true;
1521                 lpn->lpn_peer = lp;
1522                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1523                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1524                 else
1525                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1526                 lnet_peer_addref_locked(lp);
1527         }
1528
1529         /* Add peer to global peer list, if necessary */
1530         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1531         if (list_empty(&lp->lp_peer_list)) {
1532                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1533                 ptable->pt_peers++;
1534         }
1535
1536
1537         /* Update peer state */
1538         spin_lock(&lp->lp_lock);
1539         if (flags & LNET_PEER_CONFIGURED) {
1540                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1541                         lp->lp_state |= LNET_PEER_CONFIGURED;
1542         }
1543         if (flags & LNET_PEER_MULTI_RAIL) {
1544                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1545                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1546                         lnet_peer_clr_non_mr_pref_nids(lp);
1547                 }
1548         }
1549         spin_unlock(&lp->lp_lock);
1550
1551         lp->lp_nnis++;
1552
1553         /* apply UDSPs */
1554         if (new_lpn) {
1555                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1556                 if (rc)
1557                         CERROR("Failed to apply UDSPs on lpn %s\n",
1558                                libcfs_net2str(lpn->lpn_net_id));
1559         }
1560         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1561         if (rc)
1562                 CERROR("Failed to apply UDSPs on lpni %s\n",
1563                        libcfs_nidstr(&lpni->lpni_nid));
1564
1565         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1566                libcfs_nidstr(&lp->lp_primary_nid),
1567                libcfs_nidstr(&lpni->lpni_nid), flags);
1568         lnet_peer_ni_decref_locked(lpni);
1569         lnet_net_unlock(LNET_LOCK_EX);
1570
1571         return 0;
1572 }
1573
1574 /*
1575  * Create a new peer, with nid as its primary nid.
1576  *
1577  * Call with the lnet_api_mutex held.
1578  */
1579 static int
1580 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1581 {
1582         struct lnet_peer *lp;
1583         struct lnet_peer_net *lpn;
1584         struct lnet_peer_ni *lpni;
1585         int rc = 0;
1586
1587         LASSERT(nid);
1588
1589         /*
1590          * No need for the lnet_net_lock here, because the
1591          * lnet_api_mutex is held.
1592          */
1593         lpni = lnet_peer_ni_find_locked(nid);
1594         if (lpni) {
1595                 /* A peer with this NID already exists. */
1596                 lp = lpni->lpni_peer_net->lpn_peer;
1597                 lnet_peer_ni_decref_locked(lpni);
1598                 /*
1599                  * This is an error if the peer was configured and the
1600                  * primary NID differs or an attempt is made to change
1601                  * the Multi-Rail flag. Otherwise the assumption is
1602                  * that an existing peer is being modified.
1603                  */
1604                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1605                         if (!nid_same(&lp->lp_primary_nid, nid))
1606                                 rc = -EEXIST;
1607                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1608                                 rc = -EPERM;
1609                         goto out;
1610                 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1611                         if (nid_same(&lp->lp_primary_nid, nid)) {
1612                                 rc = -EEXIST;
1613                                 goto out;
1614                         }
1615                 }
1616                 /* Delete and recreate as a configured peer. */
1617                 rc = lnet_peer_del(lp);
1618                 if (rc)
1619                         goto out;
1620         }
1621
1622         /* Create peer, peer_net, and peer_ni. */
1623         rc = -ENOMEM;
1624         lp = lnet_peer_alloc(nid);
1625         if (!lp)
1626                 goto out;
1627         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1628         if (!lpn)
1629                 goto out_free_lp;
1630         lpni = lnet_peer_ni_alloc(nid);
1631         if (!lpni)
1632                 goto out_free_lpn;
1633
1634         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1635
1636 out_free_lpn:
1637         LIBCFS_FREE(lpn, sizeof(*lpn));
1638 out_free_lp:
1639         LIBCFS_FREE(lp, sizeof(*lp));
1640 out:
1641         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1642                libcfs_nidstr(nid), flags, rc);
1643         return rc;
1644 }
1645
1646 /*
1647  * Add a NID to a peer. Call with ln_api_mutex held.
1648  *
1649  * Error codes:
1650  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1651  *  -EEXIST:   The NID was configured by DLC for a different peer.
1652  *  -ENOMEM:   Out of memory.
1653  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1654  *             non-multi-rail peer.
1655  */
1656 static int
1657 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1658                   unsigned int flags)
1659 {
1660         struct lnet_peer_net *lpn;
1661         struct lnet_peer_ni *lpni;
1662         int rc = 0;
1663
1664         LASSERT(lp);
1665         LASSERT(nid);
1666
1667         /* A configured peer can only be updated through configuration. */
1668         if (!(flags & LNET_PEER_CONFIGURED)) {
1669                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1670                         rc = -EPERM;
1671                         goto out;
1672                 }
1673         }
1674
1675         /*
1676          * The MULTI_RAIL flag can be set but not cleared, because
1677          * that would leave the peer struct in an invalid state.
1678          */
1679         if (flags & LNET_PEER_MULTI_RAIL) {
1680                 spin_lock(&lp->lp_lock);
1681                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1682                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1683                         lnet_peer_clr_non_mr_pref_nids(lp);
1684                 }
1685                 spin_unlock(&lp->lp_lock);
1686         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1687                 rc = -EPERM;
1688                 goto out;
1689         }
1690
1691         lpni = lnet_peer_ni_find_locked(nid);
1692         if (lpni) {
1693                 /*
1694                  * A peer_ni already exists. This is only a problem if
1695                  * it is not connected to this peer and was configured
1696                  * by DLC.
1697                  */
1698                 if (lpni->lpni_peer_net->lpn_peer == lp)
1699                         goto out_free_lpni;
1700                 if (lnet_peer_ni_is_configured(lpni)) {
1701                         rc = -EEXIST;
1702                         goto out_free_lpni;
1703                 }
1704                 /* If this is the primary NID, destroy the peer. */
1705                 if (lnet_peer_ni_is_primary(lpni)) {
1706                         struct lnet_peer *rtr_lp =
1707                                 lpni->lpni_peer_net->lpn_peer;
1708                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1709                         /*
1710                          * if we're trying to delete a router it means
1711                          * we're moving this peer NI to a new peer so must
1712                          * transfer router properties to the new peer
1713                          */
1714                         if (rtr_refcount > 0) {
1715                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1716                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1717                         }
1718                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1719                         lnet_peer_ni_decref_locked(lpni);
1720                         lpni = lnet_peer_ni_alloc(nid);
1721                         if (!lpni) {
1722                                 rc = -ENOMEM;
1723                                 goto out_free_lpni;
1724                         }
1725                 }
1726         } else {
1727                 lpni = lnet_peer_ni_alloc(nid);
1728                 if (!lpni) {
1729                         rc = -ENOMEM;
1730                         goto out_free_lpni;
1731                 }
1732         }
1733
1734         /*
1735          * Get the peer_net. Check that we're not adding a second
1736          * peer_ni on a peer_net of a non-multi-rail peer.
1737          */
1738         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1739         if (!lpn) {
1740                 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1741                 if (!lpn) {
1742                         rc = -ENOMEM;
1743                         goto out_free_lpni;
1744                 }
1745         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1746                 rc = -ENOTUNIQ;
1747                 goto out_free_lpni;
1748         }
1749
1750         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1751
1752 out_free_lpni:
1753         lnet_peer_ni_decref_locked(lpni);
1754 out:
1755         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1756                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1757                flags, rc);
1758         return rc;
1759 }
1760
1761 /*
1762  * Update the primary NID of a peer, if possible.
1763  *
1764  * Call with the lnet_api_mutex held.
1765  */
1766 static int
1767 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1768                           unsigned int flags)
1769 {
1770         struct lnet_nid old = lp->lp_primary_nid;
1771         int rc = 0;
1772
1773         if (nid_same(&lp->lp_primary_nid, nid))
1774                 goto out;
1775
1776         lp->lp_primary_nid = *nid;
1777
1778         rc = lnet_peer_add_nid(lp, nid, flags);
1779         if (rc) {
1780                 lp->lp_primary_nid = old;
1781                 goto out;
1782         }
1783 out:
1784         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1785                libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1786
1787         return rc;
1788 }
1789
1790 /*
1791  * lpni creation initiated due to traffic either sending or receiving.
1792  * Callers must hold ln_api_mutex
1793  * Ref taken on lnet_peer_ni returned by this function
1794  */
1795 static struct lnet_peer_ni *
1796 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1797 __must_hold(&the_lnet.ln_api_mutex)
1798 {
1799         struct lnet_peer *lp = NULL;
1800         struct lnet_peer_net *lpn = NULL;
1801         struct lnet_peer_ni *lpni;
1802         unsigned flags = 0;
1803         int rc = 0;
1804
1805         if (LNET_NID_IS_ANY(nid)) {
1806                 rc = -EINVAL;
1807                 goto out_err;
1808         }
1809
1810         /* lnet_net_lock is not needed here because ln_api_lock is held */
1811         lpni = lnet_peer_ni_find_locked(nid);
1812         if (lpni) {
1813                 /*
1814                  * We must have raced with another thread. Since we
1815                  * know next to nothing about a peer_ni created by
1816                  * traffic, we just assume everything is ok and
1817                  * return.
1818                  */
1819                 goto out;
1820         }
1821
1822         /* Create peer, peer_net, and peer_ni. */
1823         rc = -ENOMEM;
1824         lp = lnet_peer_alloc(nid);
1825         if (!lp)
1826                 goto out_err;
1827         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1828         if (!lpn)
1829                 goto out_err;
1830         lpni = lnet_peer_ni_alloc(nid);
1831         if (!lpni)
1832                 goto out_err;
1833         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1834
1835         /* lnet_peer_attach_peer_ni() always returns 0 */
1836         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1837
1838         lnet_peer_ni_addref_locked(lpni);
1839
1840 out_err:
1841         if (rc) {
1842                 if (lpn)
1843                         LIBCFS_FREE(lpn, sizeof(*lpn));
1844                 if (lp)
1845                         LIBCFS_FREE(lp, sizeof(*lp));
1846                 lpni = ERR_PTR(rc);
1847         }
1848 out:
1849         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1850         return lpni;
1851 }
1852
1853 /*
1854  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1855  *
1856  * This API handles the following combinations:
1857  *   Create a peer with its primary NI if only the prim_nid is provided
1858  *   Add a NID to a peer identified by the prim_nid. The peer identified
1859  *   by the prim_nid must already exist.
1860  *   The peer being created may be non-MR.
1861  *
1862  * The caller must hold ln_api_mutex. This prevents the peer from
1863  * being created/modified/deleted by a different thread.
1864  */
1865 int
1866 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
1867                  bool temp)
1868 __must_hold(&the_lnet.ln_api_mutex)
1869 {
1870         struct lnet_peer *lp = NULL;
1871         struct lnet_peer_ni *lpni;
1872         unsigned int flags = 0;
1873
1874         /* The prim_nid must always be specified */
1875         if (LNET_NID_IS_ANY(prim_nid))
1876                 return -EINVAL;
1877
1878         if (!temp)
1879                 flags = LNET_PEER_CONFIGURED;
1880
1881         if (mr)
1882                 flags |= LNET_PEER_MULTI_RAIL;
1883
1884         /*
1885          * If nid isn't specified, we must create a new peer with
1886          * prim_nid as its primary nid.
1887          */
1888         if (LNET_NID_IS_ANY(nid))
1889                 return lnet_peer_add(prim_nid, flags);
1890
1891         /* Look up the prim_nid, which must exist. */
1892         lpni = lnet_peer_ni_find_locked(prim_nid);
1893         if (!lpni)
1894                 return -ENOENT;
1895         lnet_peer_ni_decref_locked(lpni);
1896         lp = lpni->lpni_peer_net->lpn_peer;
1897
1898         /* Peer must have been configured. */
1899         if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1900                 CDEBUG(D_NET, "peer %s was not configured\n",
1901                        libcfs_nidstr(prim_nid));
1902                 return -ENOENT;
1903         }
1904
1905         /* Primary NID must match */
1906         if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
1907                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1908                        libcfs_nidstr(prim_nid),
1909                        libcfs_nidstr(&lp->lp_primary_nid));
1910                 return -ENODEV;
1911         }
1912
1913         /* Multi-Rail flag must match. */
1914         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1915                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1916                        libcfs_nidstr(prim_nid));
1917                 return -EPERM;
1918         }
1919
1920         if (temp && lnet_peer_is_uptodate(lp)) {
1921                 CDEBUG(D_NET,
1922                        "Don't add temporary peer NI for uptodate peer %s\n",
1923                        libcfs_nidstr(&lp->lp_primary_nid));
1924                 return -EINVAL;
1925         }
1926
1927         return lnet_peer_add_nid(lp, nid, flags);
1928 }
1929
1930 /*
1931  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1932  *
1933  * This API handles the following combinations:
1934  *   Delete a NI from a peer if both prim_nid and nid are provided.
1935  *   Delete a peer if only prim_nid is provided.
1936  *   Delete a peer if its primary nid is provided.
1937  *
1938  * The caller must hold ln_api_mutex. This prevents the peer from
1939  * being modified/deleted by a different thread.
1940  */
1941 int
1942 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid)
1943 {
1944         struct lnet_peer *lp;
1945         struct lnet_peer_ni *lpni;
1946         unsigned int flags;
1947
1948         if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
1949                 return -EINVAL;
1950
1951         lpni = lnet_peer_ni_find_locked(prim_nid);
1952         if (!lpni)
1953                 return -ENOENT;
1954         lnet_peer_ni_decref_locked(lpni);
1955         lp = lpni->lpni_peer_net->lpn_peer;
1956
1957         if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
1958                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1959                        libcfs_nidstr(prim_nid),
1960                        libcfs_nidstr(&lp->lp_primary_nid));
1961                 return -ENODEV;
1962         }
1963
1964         lnet_net_lock(LNET_LOCK_EX);
1965         if (lp->lp_rtr_refcount > 0) {
1966                 lnet_net_unlock(LNET_LOCK_EX);
1967                 CERROR("%s is a router. Can not be deleted\n",
1968                        libcfs_nidstr(prim_nid));
1969                 return -EBUSY;
1970         }
1971         lnet_net_unlock(LNET_LOCK_EX);
1972
1973         if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid))
1974                 return lnet_peer_del(lp);
1975
1976         flags = LNET_PEER_CONFIGURED;
1977         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1978                 flags |= LNET_PEER_MULTI_RAIL;
1979
1980         return lnet_peer_del_nid(lp, nid, flags);
1981 }
1982
1983 void
1984 lnet_destroy_peer_ni_locked(struct kref *ref)
1985 {
1986         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1987                                                  lpni_kref);
1988         struct lnet_peer_table *ptable;
1989         struct lnet_peer_net *lpn;
1990
1991         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
1992
1993         LASSERT(kref_read(&lpni->lpni_kref) == 0);
1994         LASSERT(list_empty(&lpni->lpni_txq));
1995         LASSERT(lpni->lpni_txqnob == 0);
1996         LASSERT(list_empty(&lpni->lpni_peer_nis));
1997         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1998
1999         lpn = lpni->lpni_peer_net;
2000         lpni->lpni_peer_net = NULL;
2001         lpni->lpni_net = NULL;
2002
2003         if (!list_empty(&lpni->lpni_hashlist)) {
2004                 /* remove the peer ni from the zombie list */
2005                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2006                 spin_lock(&ptable->pt_zombie_lock);
2007                 list_del_init(&lpni->lpni_hashlist);
2008                 ptable->pt_zombies--;
2009                 spin_unlock(&ptable->pt_zombie_lock);
2010         }
2011
2012         if (lpni->lpni_pref_nnids > 1) {
2013                 struct lnet_nid_list *ne, *tmp;
2014
2015                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2016                                          nl_list) {
2017                         list_del_init(&ne->nl_list);
2018                         LIBCFS_FREE(ne, sizeof(*ne));
2019                 }
2020         }
2021         LIBCFS_FREE(lpni, sizeof(*lpni));
2022
2023         if (lpn)
2024                 lnet_peer_net_decref_locked(lpn);
2025 }
2026
2027 struct lnet_peer_ni *
2028 lnet_nid2peerni_ex(struct lnet_nid *nid)
2029 __must_hold(&the_lnet.ln_api_mutex)
2030 {
2031         struct lnet_peer_ni *lpni = NULL;
2032
2033         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2034                 return ERR_PTR(-ESHUTDOWN);
2035
2036         /*
2037          * find if a peer_ni already exists.
2038          * If so then just return that.
2039          */
2040         lpni = lnet_peer_ni_find_locked(nid);
2041         if (lpni)
2042                 return lpni;
2043
2044         lnet_net_unlock(LNET_LOCK_EX);
2045
2046         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2047
2048         lnet_net_lock(LNET_LOCK_EX);
2049
2050         return lpni;
2051 }
2052
2053 /*
2054  * Get a peer_ni for the given nid, create it if necessary. Takes a
2055  * hold on the peer_ni.
2056  */
2057 struct lnet_peer_ni *
2058 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2059                         struct lnet_nid *pref, int cpt)
2060 {
2061         struct lnet_peer_ni *lpni = NULL;
2062
2063         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2064                 return ERR_PTR(-ESHUTDOWN);
2065
2066         /*
2067          * find if a peer_ni already exists.
2068          * If so then just return that.
2069          */
2070         lpni = lnet_peer_ni_find_locked(nid);
2071         if (lpni)
2072                 return lpni;
2073
2074         /*
2075          * Slow path:
2076          * use the lnet_api_mutex to serialize the creation of the peer_ni
2077          * and the creation/deletion of the local ni/net. When a local ni is
2078          * created, if there exists a set of peer_nis on that network,
2079          * they need to be traversed and updated. When a local NI is
2080          * deleted, which could result in a network being deleted, then
2081          * all peer nis on that network need to be removed as well.
2082          *
2083          * Creation through traffic should also be serialized with
2084          * creation through DLC.
2085          */
2086         lnet_net_unlock(cpt);
2087         mutex_lock(&the_lnet.ln_api_mutex);
2088         /*
2089          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2090          * check here is sufficent.
2091          */
2092         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2093                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2094
2095         mutex_unlock(&the_lnet.ln_api_mutex);
2096         lnet_net_lock(cpt);
2097
2098         /* Lock has been dropped, check again for shutdown. */
2099         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2100                 if (!IS_ERR_OR_NULL(lpni))
2101                         lnet_peer_ni_decref_locked(lpni);
2102                 lpni = ERR_PTR(-ESHUTDOWN);
2103         }
2104
2105         return lpni;
2106 }
2107
2108 bool
2109 lnet_peer_gw_discovery(struct lnet_peer *lp)
2110 {
2111         bool rc = false;
2112
2113         spin_lock(&lp->lp_lock);
2114         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2115                 rc = true;
2116         spin_unlock(&lp->lp_lock);
2117
2118         return rc;
2119 }
2120
2121 bool
2122 lnet_peer_is_uptodate(struct lnet_peer *lp)
2123 {
2124         bool rc;
2125
2126         spin_lock(&lp->lp_lock);
2127         rc = lnet_peer_is_uptodate_locked(lp);
2128         spin_unlock(&lp->lp_lock);
2129         return rc;
2130 }
2131
2132 /*
2133  * Is a peer uptodate from the point of view of discovery?
2134  *
2135  * If it is currently being processed, obviously not.
2136  * A forced Ping or Push is also handled by the discovery thread.
2137  *
2138  * Otherwise look at whether the peer needs rediscovering.
2139  */
2140 bool
2141 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2142 __must_hold(&lp->lp_lock)
2143 {
2144         bool rc;
2145
2146         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2147                             LNET_PEER_FORCE_PING |
2148                             LNET_PEER_FORCE_PUSH)) {
2149                 rc = false;
2150         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2151                 rc = false;
2152         } else if (lnet_peer_needs_push(lp)) {
2153                 rc = false;
2154         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2155                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2156                         rc = true;
2157                 else
2158                         rc = false;
2159         } else {
2160                 rc = false;
2161         }
2162
2163         return rc;
2164 }
2165
2166 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2167 void
2168 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2169 {
2170         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2171          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2172          * when adding to the list and queuing the peer to ensure that we do not
2173          * strand any messages on the lp_dc_pendq. This scheme ensures the
2174          * message will be resent even if the peer is already being discovered.
2175          * Therefore we needn't check the return value of
2176          * lnet_peer_queue_for_discovery(lp).
2177          */
2178         lnet_net_lock(LNET_LOCK_EX);
2179         spin_lock(&lp->lp_lock);
2180         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2181         spin_unlock(&lp->lp_lock);
2182         lnet_peer_queue_for_discovery(lp);
2183         lnet_net_unlock(LNET_LOCK_EX);
2184 }
2185
2186 /*
2187  * Queue a peer for the attention of the discovery thread.  Call with
2188  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2189  * -EALREADY if the peer was already queued.
2190  */
2191 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2192 {
2193         int rc;
2194
2195         spin_lock(&lp->lp_lock);
2196         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2197                 lp->lp_state |= LNET_PEER_DISCOVERING;
2198         spin_unlock(&lp->lp_lock);
2199         if (list_empty(&lp->lp_dc_list)) {
2200                 lnet_peer_addref_locked(lp);
2201                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2202                 wake_up(&the_lnet.ln_dc_waitq);
2203                 rc = 0;
2204         } else {
2205                 rc = -EALREADY;
2206         }
2207
2208         CDEBUG(D_NET, "Queue peer %s: %d\n",
2209                libcfs_nidstr(&lp->lp_primary_nid), rc);
2210
2211         return rc;
2212 }
2213
2214 /*
2215  * Discovery of a peer is complete. Wake all waiters on the peer.
2216  * Call with lnet_net_lock/EX held.
2217  */
2218 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2219 {
2220         struct lnet_msg *msg, *tmp;
2221         int rc = 0;
2222         LIST_HEAD(pending_msgs);
2223
2224         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2225                libcfs_nidstr(&lp->lp_primary_nid));
2226
2227         spin_lock(&lp->lp_lock);
2228         /* Our caller dropped lp_lock which may have allowed another thread to
2229          * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2230          * Ensure it is cleared.
2231          */
2232         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2233         if (dc_error) {
2234                 lp->lp_dc_error = dc_error;
2235                 lp->lp_state |= LNET_PEER_REDISCOVER;
2236         }
2237         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2238         spin_unlock(&lp->lp_lock);
2239         list_del_init(&lp->lp_dc_list);
2240         wake_up(&lp->lp_dc_waitq);
2241
2242         if (lp->lp_rtr_refcount > 0)
2243                 lnet_router_discovery_complete(lp);
2244
2245         lnet_net_unlock(LNET_LOCK_EX);
2246
2247         /* iterate through all pending messages and send them again */
2248         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2249                 list_del_init(&msg->msg_list);
2250                 if (dc_error) {
2251                         lnet_finalize(msg, dc_error);
2252                         continue;
2253                 }
2254
2255                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2256                        lnet_msgtyp2str(msg->msg_type),
2257                        libcfs_idstr(&msg->msg_target));
2258                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2259                                &msg->msg_rtr_nid_param);
2260                 if (rc < 0) {
2261                         CNETERR("Error sending %s to %s: %d\n",
2262                                lnet_msgtyp2str(msg->msg_type),
2263                                libcfs_idstr(&msg->msg_target), rc);
2264                         lnet_finalize(msg, rc);
2265                 }
2266         }
2267         lnet_net_lock(LNET_LOCK_EX);
2268         lnet_peer_decref_locked(lp);
2269 }
2270
2271 /*
2272  * Handle inbound push.
2273  * Like any event handler, called with lnet_res_lock/CPT held.
2274  */
2275 void lnet_peer_push_event(struct lnet_event *ev)
2276 {
2277         struct lnet_ping_buffer *pbuf;
2278         struct lnet_peer *lp;
2279         int infobytes;
2280
2281         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2282
2283         /* lnet_find_peer() adds a refcount */
2284         lp = lnet_find_peer(&ev->source.nid);
2285         if (!lp) {
2286                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2287                        libcfs_nidstr(&ev->initiator.nid),
2288                        libcfs_nidstr(&ev->source.nid));
2289                 pbuf->pb_needs_post = true;
2290                 return;
2291         }
2292
2293         /* Ensure peer state remains consistent while we modify it. */
2294         spin_lock(&lp->lp_lock);
2295
2296         /*
2297          * If some kind of error happened the contents of the message
2298          * cannot be used. Clear the NIDS_UPTODATE and set the
2299          * FORCE_PING flag to trigger a ping.
2300          */
2301         if (ev->status) {
2302                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2303                 lp->lp_state |= LNET_PEER_FORCE_PING;
2304                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2305                        ev->status,
2306                        libcfs_nidstr(&lp->lp_primary_nid),
2307                        libcfs_nidstr(&ev->source.nid));
2308                 goto out;
2309         }
2310
2311         /*
2312          * A push with invalid or corrupted info. Clear the UPTODATE
2313          * flag to trigger a ping.
2314          */
2315         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2316                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2317                 lp->lp_state |= LNET_PEER_FORCE_PING;
2318                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2319                        libcfs_nidstr(&lp->lp_primary_nid));
2320                 goto out;
2321         }
2322
2323         /* Make sure we'll allocate the correct size ping buffer when
2324          * pinging the peer.
2325          */
2326         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2327         if (lp->lp_data_bytes < infobytes)
2328                 lp->lp_data_bytes = infobytes;
2329
2330         /*
2331          * A non-Multi-Rail peer is not supposed to be capable of
2332          * sending a push.
2333          */
2334         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2335                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2336                        libcfs_nidstr(&lp->lp_primary_nid));
2337                 goto out;
2338         }
2339
2340         /*
2341          * The peer may have discovery disabled at its end. Set
2342          * NO_DISCOVERY as appropriate.
2343          */
2344         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2345                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2346                        libcfs_nidstr(&lp->lp_primary_nid));
2347                 /*
2348                  * Mark the peer for deletion if we already know about it
2349                  * and it's going from discovery set to no discovery set
2350                  */
2351                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2352                                       LNET_PEER_DISCOVERING)) &&
2353                      lp->lp_state & LNET_PEER_DISCOVERED) {
2354                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2355                                libcfs_nidstr(&lp->lp_primary_nid),
2356                                lp->lp_state);
2357                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2358                 }
2359                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2360         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2361                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2362                        libcfs_nidstr(&lp->lp_primary_nid));
2363                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2364         }
2365
2366         /*
2367          * Update the MULTI_RAIL flag based on the push. If the peer
2368          * was configured with DLC then the setting should match what
2369          * DLC put in.
2370          * NB: We verified above that the MR feature bit is set in pi_features
2371          */
2372         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2373                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2374                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2375         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2376                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2377                       libcfs_nidstr(&lp->lp_primary_nid));
2378         } else if (lnet_peer_discovery_disabled) {
2379                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2380                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2381         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2382                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2383                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2384         } else {
2385                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2386                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2387                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2388                 lnet_peer_clr_non_mr_pref_nids(lp);
2389         }
2390
2391         /* Check for truncation of the Put message. Clear the
2392          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2393          * and tell discovery to allocate a bigger buffer.
2394          */
2395         if (ev->mlength < ev->rlength) {
2396                 if (the_lnet.ln_push_target_nbytes < infobytes)
2397                         the_lnet.ln_push_target_nbytes = infobytes;
2398                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2399                 lp->lp_state |= LNET_PEER_FORCE_PING;
2400                 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2401                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2402                 goto out;
2403         }
2404
2405         /* always assume new data */
2406         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2407         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2408
2409         /* If there is data present that hasn't been processed yet,
2410          * we'll replace it if the Put contained newer data and it
2411          * fits. We're racing with a Ping or earlier Push in this
2412          * case.
2413          */
2414         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2415                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2416                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2417                     infobytes <= lp->lp_data->pb_nbytes) {
2418                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2419                                infobytes);
2420                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2421                               libcfs_nidstr(&lp->lp_primary_nid),
2422                               LNET_PING_BUFFER_SEQNO(pbuf),
2423                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2424                 }
2425                 goto out;
2426         }
2427
2428         /*
2429          * Allocate a buffer to copy the data. On a failure we drop
2430          * the Push and set FORCE_PING to force the discovery
2431          * thread to fix the problem by pinging the peer.
2432          */
2433         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2434         if (!lp->lp_data) {
2435                 lp->lp_state |= LNET_PEER_FORCE_PING;
2436                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2437                        libcfs_nidstr(&lp->lp_primary_nid),
2438                        LNET_PING_BUFFER_SEQNO(pbuf));
2439                 goto out;
2440         }
2441
2442         /* Success */
2443         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes);
2444         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2445         CDEBUG(D_NET, "Received Push %s %u\n",
2446                libcfs_nidstr(&lp->lp_primary_nid),
2447                LNET_PING_BUFFER_SEQNO(pbuf));
2448
2449 out:
2450         /* We've processed this buffer. It can be reposted */
2451         pbuf->pb_needs_post = true;
2452
2453         /*
2454          * Queue the peer for discovery if not done, force it on the request
2455          * queue and wake the discovery thread if the peer was already queued,
2456          * because its status changed.
2457          */
2458         spin_unlock(&lp->lp_lock);
2459         lnet_net_lock(LNET_LOCK_EX);
2460         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2461                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2462                 wake_up(&the_lnet.ln_dc_waitq);
2463         }
2464         /* Drop refcount from lookup */
2465         lnet_peer_decref_locked(lp);
2466         lnet_net_unlock(LNET_LOCK_EX);
2467 }
2468
2469 /*
2470  * Clear the discovery error state, unless we're already discovering
2471  * this peer, in which case the error is current.
2472  */
2473 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2474 {
2475         spin_lock(&lp->lp_lock);
2476         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2477                 lp->lp_dc_error = 0;
2478         spin_unlock(&lp->lp_lock);
2479 }
2480
2481 /*
2482  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2483  * dropped/retaken within this function. An lnet_peer_ni is passed in
2484  * because discovery could tear down an lnet_peer.
2485  */
2486 int
2487 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2488 {
2489         DEFINE_WAIT(wait);
2490         struct lnet_peer *lp;
2491         int rc = 0;
2492         int count = 0;
2493
2494 again:
2495         lnet_net_unlock(cpt);
2496         lnet_net_lock(LNET_LOCK_EX);
2497         lp = lpni->lpni_peer_net->lpn_peer;
2498         lnet_peer_clear_discovery_error(lp);
2499
2500         /*
2501          * We're willing to be interrupted. The lpni can become a
2502          * zombie if we race with DLC, so we must check for that.
2503          */
2504         for (;;) {
2505                 /* Keep lp alive when the lnet_net_lock is unlocked */
2506                 lnet_peer_addref_locked(lp);
2507                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2508                 if (signal_pending(current))
2509                         break;
2510                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2511                         break;
2512                 /*
2513                  * Don't repeat discovery if discovery is disabled. This is
2514                  * done to ensure we can use discovery as a standard ping as
2515                  * well for backwards compatibility with routers which do not
2516                  * have discovery or have discovery disabled
2517                  */
2518                 if (lnet_is_discovery_disabled(lp) && count > 0)
2519                         break;
2520                 if (lp->lp_dc_error)
2521                         break;
2522                 if (lnet_peer_is_uptodate(lp))
2523                         break;
2524                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2525                         break;
2526                 lnet_peer_queue_for_discovery(lp);
2527                 count++;
2528                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2529
2530                 /*
2531                  * If caller requested a non-blocking operation then
2532                  * return immediately. Once discovery is complete any
2533                  * pending messages that were stopped due to discovery
2534                  * will be transmitted.
2535                  */
2536                 if (!block)
2537                         break;
2538
2539                 lnet_net_unlock(LNET_LOCK_EX);
2540                 schedule();
2541                 finish_wait(&lp->lp_dc_waitq, &wait);
2542                 lnet_net_lock(LNET_LOCK_EX);
2543                 lnet_peer_decref_locked(lp);
2544                 /* Peer may have changed */
2545                 lp = lpni->lpni_peer_net->lpn_peer;
2546         }
2547         finish_wait(&lp->lp_dc_waitq, &wait);
2548
2549         lnet_net_unlock(LNET_LOCK_EX);
2550         lnet_net_lock(cpt);
2551         lnet_peer_decref_locked(lp);
2552         /*
2553          * The peer may have changed, so re-check and rediscover if that turns
2554          * out to have been the case. The reference count on lp ensured that
2555          * even if it was unlinked from lpni the memory could not be recycled.
2556          * Thus the check below is sufficient to determine whether the peer
2557          * changed. If the peer changed, then lp must not be dereferenced.
2558          */
2559         if (lp != lpni->lpni_peer_net->lpn_peer)
2560                 goto again;
2561
2562         if (signal_pending(current))
2563                 rc = -EINTR;
2564         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2565                 rc = -ESHUTDOWN;
2566         else if (lp->lp_dc_error)
2567                 rc = lp->lp_dc_error;
2568         else if (!block)
2569                 CDEBUG(D_NET, "non-blocking discovery\n");
2570         else if (!lnet_peer_is_uptodate(lp) &&
2571                  !(lnet_is_discovery_disabled(lp) ||
2572                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2573                 goto again;
2574
2575         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2576                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2577                libcfs_nidstr(&lpni->lpni_nid), rc,
2578                (!block) ? "pending discovery" : "discovery complete");
2579
2580         return rc;
2581 }
2582
2583 /* Handle an incoming ack for a push. */
2584 static void
2585 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2586 {
2587         struct lnet_ping_buffer *pbuf;
2588
2589         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2590         spin_lock(&lp->lp_lock);
2591         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2592         lp->lp_push_error = ev->status;
2593         if (ev->status)
2594                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2595         else
2596                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2597         spin_unlock(&lp->lp_lock);
2598
2599         CDEBUG(D_NET, "peer %s ev->status %d\n",
2600                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2601 }
2602
2603 /* Handle a Reply message. This is the reply to a Ping message. */
2604 static void
2605 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2606 {
2607         struct lnet_ping_buffer *pbuf;
2608         int infobytes;
2609         int rc;
2610         bool ping_feat_disc;
2611
2612         spin_lock(&lp->lp_lock);
2613
2614         lp->lp_disc_src_nid = ev->target.nid;
2615         lp->lp_disc_dst_nid = ev->source.nid;
2616
2617         /*
2618          * If some kind of error happened the contents of message
2619          * cannot be used. Set PING_FAILED to trigger a retry.
2620          */
2621         if (ev->status) {
2622                 lp->lp_state |= LNET_PEER_PING_FAILED;
2623                 lp->lp_ping_error = ev->status;
2624                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2625                        ev->status,
2626                        libcfs_nidstr(&lp->lp_primary_nid),
2627                        libcfs_nidstr(&ev->source.nid));
2628                 goto out;
2629         }
2630
2631         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2632         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2633                 lnet_swap_pinginfo(pbuf);
2634
2635         /*
2636          * A reply with invalid or corrupted info. Set PING_FAILED to
2637          * trigger a retry.
2638          */
2639         rc = lnet_ping_info_validate(&pbuf->pb_info);
2640         if (rc) {
2641                 lp->lp_state |= LNET_PEER_PING_FAILED;
2642                 lp->lp_ping_error = 0;
2643                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2644                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2645                 goto out;
2646         }
2647
2648         /*
2649          * The peer may have discovery disabled at its end. Set
2650          * NO_DISCOVERY as appropriate.
2651          */
2652         ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2653         if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2654                 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2655                        libcfs_nidstr(&lp->lp_primary_nid),
2656                        ping_feat_disc ? "enabled" : "disabled",
2657                        lnet_peer_discovery_disabled ? "disabled" : "enabled");
2658
2659                 /* Detect whether this peer has toggled discovery from on to
2660                  * off and whether we can delete and re-create the peer. Peers
2661                  * that were manually configured cannot be deleted by discovery.
2662                  * We need to delete this peer and re-create it if the peer was
2663                  * not configured manually, is currently considered DD capable,
2664                  * and either:
2665                  * 1. We've already discovered the peer (the peer has toggled
2666                  *    the discovery feature from on to off), or
2667                  * 2. The peer is considered MR, but it was not user configured
2668                  *    (this was a "temporary" peer created via the kernel APIs
2669                  *     that we're discovering for the first time)
2670                  */
2671                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2672                                       LNET_PEER_NO_DISCOVERY)) &&
2673                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2674                                      LNET_PEER_MULTI_RAIL))) {
2675                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2676                                libcfs_nidstr(&lp->lp_primary_nid),
2677                                lp->lp_state);
2678                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2679                 }
2680                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2681         } else {
2682                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2683                        libcfs_nidstr(&lp->lp_primary_nid));
2684                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2685         }
2686
2687         /*
2688          * Update the MULTI_RAIL flag based on the reply. If the peer
2689          * was configured with DLC then the setting should match what
2690          * DLC put in.
2691          */
2692         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2693                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2694                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2695                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2696                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2697                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2698                               libcfs_nidstr(&lp->lp_primary_nid));
2699                 } else if (lnet_peer_discovery_disabled) {
2700                         CDEBUG(D_NET,
2701                                "peer %s(%p) not MR: DD disabled locally\n",
2702                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2703                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2704                         CDEBUG(D_NET,
2705                                "peer %s(%p) not MR: DD disabled remotely\n",
2706                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2707                 } else {
2708                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2709                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2710                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2711                         lnet_peer_clr_non_mr_pref_nids(lp);
2712                 }
2713         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2714                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2715                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2716                               libcfs_nidstr(&lp->lp_primary_nid));
2717                 } else {
2718                         CERROR("Multi-Rail state vanished from %s\n",
2719                                libcfs_nidstr(&lp->lp_primary_nid));
2720                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2721                 }
2722         }
2723
2724         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2725         /*
2726          * Make sure we'll allocate the correct size ping buffer when
2727          * pinging the peer.
2728          */
2729         if (lp->lp_data_bytes < infobytes)
2730                 lp->lp_data_bytes = infobytes;
2731
2732         /* Check for truncation of the Reply. Clear PING_SENT and set
2733          * PING_FAILED to trigger a retry.
2734          */
2735         if (pbuf->pb_nbytes < infobytes) {
2736                 if (the_lnet.ln_push_target_nbytes < infobytes)
2737                         the_lnet.ln_push_target_nbytes = infobytes;
2738                 lp->lp_state |= LNET_PEER_PING_FAILED;
2739                 lp->lp_ping_error = 0;
2740                 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2741                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2742                 goto out;
2743         }
2744
2745         /*
2746          * Check the sequence numbers in the reply. These are only
2747          * available if the reply came from a Multi-Rail peer.
2748          */
2749         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2750             pbuf->pb_info.pi_nnis > 1 &&
2751             lnet_nid_to_nid4(&lp->lp_primary_nid) ==
2752             pbuf->pb_info.pi_ni[1].ns_nid) {
2753                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2754                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2755                                 libcfs_nidstr(&lp->lp_primary_nid),
2756                                 LNET_PING_BUFFER_SEQNO(pbuf),
2757                                 lp->lp_peer_seqno);
2758
2759                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2760         }
2761
2762         /* We're happy with the state of the data in the buffer. */
2763         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2764                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2765                lp->lp_state);
2766         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2767                 lnet_ping_buffer_decref(lp->lp_data);
2768         else
2769                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2770         lnet_ping_buffer_addref(pbuf);
2771         lp->lp_data = pbuf;
2772 out:
2773         lp->lp_state &= ~LNET_PEER_PING_SENT;
2774         spin_unlock(&lp->lp_lock);
2775 }
2776
2777 /*
2778  * Send event handling. Only matters for error cases, where we clean
2779  * up state on the peer and peer_ni that would otherwise be updated in
2780  * the REPLY event handler for a successful Ping, and the ACK event
2781  * handler for a successful Push.
2782  */
2783 static int
2784 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2785 {
2786         int rc = 0;
2787
2788         if (!ev->status)
2789                 goto out;
2790
2791         spin_lock(&lp->lp_lock);
2792         if (ev->msg_type == LNET_MSG_GET) {
2793                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2794                 lp->lp_state |= LNET_PEER_PING_FAILED;
2795                 lp->lp_ping_error = ev->status;
2796         } else { /* ev->msg_type == LNET_MSG_PUT */
2797                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2798                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2799                 lp->lp_push_error = ev->status;
2800         }
2801         spin_unlock(&lp->lp_lock);
2802         rc = LNET_REDISCOVER_PEER;
2803 out:
2804         CDEBUG(D_NET, "%s Send to %s: %d\n",
2805                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2806                 libcfs_nidstr(&ev->target.nid), rc);
2807         return rc;
2808 }
2809
2810 /*
2811  * Unlink event handling. This event is only seen if a call to
2812  * LNetMDUnlink() caused the event to be unlinked. If this call was
2813  * made after the event was set up in LNetGet() or LNetPut() then we
2814  * assume the Ping or Push timed out.
2815  */
2816 static void
2817 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2818 {
2819         spin_lock(&lp->lp_lock);
2820         /* We've passed through LNetGet() */
2821         if (lp->lp_state & LNET_PEER_PING_SENT) {
2822                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2823                 lp->lp_state |= LNET_PEER_PING_FAILED;
2824                 lp->lp_ping_error = -ETIMEDOUT;
2825                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2826                         libcfs_nidstr(&lp->lp_primary_nid));
2827         }
2828         /* We've passed through LNetPut() */
2829         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2830                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2831                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2832                 lp->lp_push_error = -ETIMEDOUT;
2833                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2834                         libcfs_nidstr(&lp->lp_primary_nid));
2835         }
2836         spin_unlock(&lp->lp_lock);
2837 }
2838
2839 /*
2840  * Event handler for the discovery EQ.
2841  *
2842  * Called with lnet_res_lock(cpt) held. The cpt is the
2843  * lnet_cpt_of_cookie() of the md handle cookie.
2844  */
2845 static void lnet_discovery_event_handler(struct lnet_event *event)
2846 {
2847         struct lnet_peer *lp = event->md_user_ptr;
2848         struct lnet_ping_buffer *pbuf;
2849         int rc;
2850
2851         /* discovery needs to take another look */
2852         rc = LNET_REDISCOVER_PEER;
2853
2854         CDEBUG(D_NET, "Received event: %d\n", event->type);
2855
2856         switch (event->type) {
2857         case LNET_EVENT_ACK:
2858                 lnet_discovery_event_ack(lp, event);
2859                 break;
2860         case LNET_EVENT_REPLY:
2861                 lnet_discovery_event_reply(lp, event);
2862                 break;
2863         case LNET_EVENT_SEND:
2864                 /* Only send failure triggers a retry. */
2865                 rc = lnet_discovery_event_send(lp, event);
2866                 break;
2867         case LNET_EVENT_UNLINK:
2868                 /* LNetMDUnlink() was called */
2869                 lnet_discovery_event_unlink(lp, event);
2870                 break;
2871         default:
2872                 /* Invalid events. */
2873                 LBUG();
2874         }
2875         lnet_net_lock(LNET_LOCK_EX);
2876         if (event->unlinked) {
2877                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2878                 lnet_ping_buffer_decref(pbuf);
2879                 lnet_peer_decref_locked(lp);
2880         }
2881
2882         /* put peer back at end of request queue, if discovery not already
2883          * done */
2884         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2885             lnet_peer_queue_for_discovery(lp)) {
2886                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2887                 wake_up(&the_lnet.ln_dc_waitq);
2888         }
2889         lnet_net_unlock(LNET_LOCK_EX);
2890 }
2891
2892 /*
2893  * Build a peer from incoming data.
2894  *
2895  * The NIDs in the incoming data are supposed to be structured as follows:
2896  *  - loopback
2897  *  - primary NID
2898  *  - other NIDs in same net
2899  *  - NIDs in second net
2900  *  - NIDs in third net
2901  *  - ...
2902  * This due to the way the list of NIDs in the data is created.
2903  *
2904  * Note that this function will mark the peer uptodate unless an
2905  * ENOMEM is encontered. All other errors are due to a conflict
2906  * between the DLC configuration and what discovery sees. We treat DLC
2907  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2908  * peer from becoming stuck in discovery.
2909  */
2910 static int lnet_peer_merge_data(struct lnet_peer *lp,
2911                                 struct lnet_ping_buffer *pbuf)
2912 {
2913         struct lnet_peer_net *lpn;
2914         struct lnet_peer_ni *lpni;
2915         lnet_nid_t *curnis = NULL;
2916         struct lnet_ni_status *addnis = NULL;
2917         lnet_nid_t *delnis = NULL;
2918         struct lnet_nid nid;
2919         unsigned int flags;
2920         int ncurnis;
2921         int naddnis;
2922         int ndelnis;
2923         int nnis = 0;
2924         int i;
2925         int j;
2926         int rc;
2927
2928         flags = LNET_PEER_DISCOVERED;
2929         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2930                 flags |= LNET_PEER_MULTI_RAIL;
2931
2932         /*
2933          * Cache the routing feature for the peer; whether it is enabled
2934          * for disabled as reported by the remote peer.
2935          */
2936         spin_lock(&lp->lp_lock);
2937         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2938                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2939         else
2940                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2941         spin_unlock(&lp->lp_lock);
2942
2943         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2944         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2945         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2946         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2947         if (!curnis || !addnis || !delnis) {
2948                 rc = -ENOMEM;
2949                 goto out;
2950         }
2951         ncurnis = 0;
2952         naddnis = 0;
2953         ndelnis = 0;
2954
2955         /* Construct the list of NIDs present in peer. */
2956         lpni = NULL;
2957         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2958                 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
2959
2960         /*
2961          * Check for NIDs in pbuf not present in curnis[].
2962          * The loop starts at 1 to skip the loopback NID.
2963          */
2964         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2965                 for (j = 0; j < ncurnis; j++)
2966                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2967                                 break;
2968                 if (j == ncurnis)
2969                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2970         }
2971         /*
2972          * Check for NIDs in curnis[] not present in pbuf.
2973          * The nested loop starts at 1 to skip the loopback NID.
2974          *
2975          * But never add the loopback NID to delnis[]: if it is
2976          * present in curnis[] then this peer is for this node.
2977          */
2978         for (i = 0; i < ncurnis; i++) {
2979                 if (curnis[i] == LNET_NID_LO_0)
2980                         continue;
2981                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2982                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2983                                 /*
2984                                  * update the information we cache for the
2985                                  * peer with the latest information we
2986                                  * received
2987                                  */
2988                                 lnet_nid4_to_nid(curnis[i], &nid);
2989                                 lpni = lnet_peer_ni_find_locked(&nid);
2990                                 if (lpni) {
2991                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2992                                         lnet_peer_ni_decref_locked(lpni);
2993                                 }
2994                                 break;
2995                         }
2996                 }
2997                 if (j == pbuf->pb_info.pi_nnis)
2998                         delnis[ndelnis++] = curnis[i];
2999         }
3000
3001         /*
3002          * If we get here and the discovery is disabled then we don't want
3003          * to add or delete any NIs. We just updated the ones we have some
3004          * information on, and call it a day
3005          */
3006         rc = 0;
3007         if (lnet_is_discovery_disabled(lp))
3008                 goto out;
3009
3010         for (i = 0; i < naddnis; i++) {
3011                 lnet_nid4_to_nid(addnis[i].ns_nid, &nid);
3012                 rc = lnet_peer_add_nid(lp, &nid, flags);
3013                 if (rc) {
3014                         CERROR("Error adding NID %s to peer %s: %d\n",
3015                                libcfs_nid2str(addnis[i].ns_nid),
3016                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3017                         if (rc == -ENOMEM)
3018                                 goto out;
3019                 }
3020                 lpni = lnet_peer_ni_find_locked(&nid);
3021                 if (lpni) {
3022                         lpni->lpni_ns_status = addnis[i].ns_status;
3023                         lnet_peer_ni_decref_locked(lpni);
3024                 }
3025         }
3026
3027         for (i = 0; i < ndelnis; i++) {
3028                 /*
3029                  * for routers it's okay to delete the primary_nid because
3030                  * the upper layers don't really rely on it. So if we're
3031                  * being told that the router changed its primary_nid
3032                  * then it's okay to delete it.
3033                  */
3034                 lnet_nid4_to_nid(delnis[i], &nid);
3035                 if (lp->lp_rtr_refcount > 0)
3036                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3037                 rc = lnet_peer_del_nid(lp, &nid, flags);
3038                 if (rc) {
3039                         CERROR("Error deleting NID %s from peer %s: %d\n",
3040                                libcfs_nid2str(delnis[i]),
3041                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3042                         if (rc == -ENOMEM)
3043                                 goto out;
3044                 }
3045         }
3046
3047         /* The peer net for the primary NID should be the first entry in the
3048          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3049          * be the first entry in its peer net's lpn_peer_nis list.
3050          */
3051         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, &nid);
3052         lpni = lnet_peer_ni_find_locked(&nid);
3053         if (!lpni) {
3054                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3055                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3056                 goto out;
3057         }
3058
3059         lnet_peer_ni_decref_locked(lpni);
3060
3061         lpn = lpni->lpni_peer_net;
3062         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3063                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3064
3065         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3066                 list_move(&lpni->lpni_peer_nis,
3067                           &lpni->lpni_peer_net->lpn_peer_nis);
3068
3069         /*
3070          * Errors other than -ENOMEM are due to peers having been
3071          * configured with DLC. Ignore these because DLC overrides
3072          * Discovery.
3073          */
3074         rc = 0;
3075 out:
3076         /* If this peer is a gateway, invoke the routing callback to update
3077          * the associated route status
3078          */
3079         if (lp->lp_rtr_refcount > 0)
3080                 lnet_router_discovery_ping_reply(lp, pbuf);
3081
3082         CFS_FREE_PTR_ARRAY(curnis, nnis);
3083         CFS_FREE_PTR_ARRAY(addnis, nnis);
3084         CFS_FREE_PTR_ARRAY(delnis, nnis);
3085         lnet_ping_buffer_decref(pbuf);
3086         CDEBUG(D_NET, "peer %s (%p): %d\n",
3087                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3088
3089         if (rc) {
3090                 spin_lock(&lp->lp_lock);
3091                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3092                 lp->lp_state |= LNET_PEER_FORCE_PING;
3093                 spin_unlock(&lp->lp_lock);
3094         }
3095         return rc;
3096 }
3097
3098 /*
3099  * The data in pbuf says lp is its primary peer, but the data was
3100  * received by a different peer. Try to update lp with the data.
3101  */
3102 static int
3103 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3104 {
3105         struct lnet_handle_md mdh;
3106
3107         /* Queue lp for discovery, and force it on the request queue. */
3108         lnet_net_lock(LNET_LOCK_EX);
3109         if (lnet_peer_queue_for_discovery(lp))
3110                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3111         lnet_net_unlock(LNET_LOCK_EX);
3112
3113         LNetInvalidateMDHandle(&mdh);
3114
3115         /*
3116          * Decide whether we can move the peer to the DATA_PRESENT state.
3117          *
3118          * We replace stale data for a multi-rail peer, repair PING_FAILED
3119          * status, and preempt FORCE_PING.
3120          *
3121          * If after that we have DATA_PRESENT, we merge it into this peer.
3122          */
3123         spin_lock(&lp->lp_lock);
3124         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3125                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3126                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3127                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3128                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3129                         lnet_ping_buffer_decref(pbuf);
3130                         pbuf = lp->lp_data;
3131                         lp->lp_data = NULL;
3132                 }
3133         }
3134         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3135                 lnet_ping_buffer_decref(lp->lp_data);
3136                 lp->lp_data = NULL;
3137                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3138         }
3139         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3140                 mdh = lp->lp_ping_mdh;
3141                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3142                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3143                 lp->lp_ping_error = 0;
3144         }
3145         if (lp->lp_state & LNET_PEER_FORCE_PING)
3146                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3147         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3148         spin_unlock(&lp->lp_lock);
3149
3150         if (!LNetMDHandleIsInvalid(mdh))
3151                 LNetMDUnlink(mdh);
3152
3153         if (pbuf)
3154                 return lnet_peer_merge_data(lp, pbuf);
3155
3156         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3157         return 0;
3158 }
3159
3160 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3161 {
3162         int i;
3163
3164         for (i = 0; i < pinfo->pi_nnis; i++) {
3165                 if (pinfo->pi_ni[i].ns_nid == nid)
3166                         return true;
3167         }
3168
3169         return false;
3170 }
3171
3172 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3173  * to the discovery queue a reference was taken that will prevent the peer from
3174  * actually being freed by this function. After this function exits the
3175  * discovery thread should call lnet_peer_discovery_complete() which will
3176  * drop that reference as well as wake any waiters that may also be holding a
3177  * ref on the peer
3178  */
3179 static int lnet_peer_deletion(struct lnet_peer *lp)
3180 __must_hold(&lp->lp_lock)
3181 {
3182         struct list_head rlist;
3183         struct lnet_route *route, *tmp;
3184         int sensitivity = lp->lp_health_sensitivity;
3185         int rc = 0;
3186
3187         INIT_LIST_HEAD(&rlist);
3188
3189         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3190                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3191
3192         /* no-op if lnet_peer_del() has already been called on this peer */
3193         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3194                 goto clear_discovering;
3195
3196         spin_unlock(&lp->lp_lock);
3197
3198         mutex_lock(&the_lnet.ln_api_mutex);
3199         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3200             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3201                 mutex_unlock(&the_lnet.ln_api_mutex);
3202                 spin_lock(&lp->lp_lock);
3203                 rc = -ESHUTDOWN;
3204                 goto clear_discovering;
3205         }
3206
3207         lnet_peer_cancel_discovery(lp);
3208         lnet_net_lock(LNET_LOCK_EX);
3209         list_for_each_entry_safe(route, tmp,
3210                                  &lp->lp_routes,
3211                                  lr_gwlist)
3212                 lnet_move_route(route, NULL, &rlist);
3213
3214         /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3215         rc = lnet_peer_del_locked(lp);
3216         if (rc)
3217                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3218                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3219
3220         lnet_net_unlock(LNET_LOCK_EX);
3221
3222         list_for_each_entry_safe(route, tmp,
3223                                  &rlist, lr_list) {
3224                 /* re-add these routes */
3225                 lnet_add_route(route->lr_net,
3226                                route->lr_hops,
3227                                &route->lr_nid,
3228                                route->lr_priority,
3229                                sensitivity);
3230                 LIBCFS_FREE(route, sizeof(*route));
3231         }
3232
3233         mutex_unlock(&the_lnet.ln_api_mutex);
3234
3235         spin_lock(&lp->lp_lock);
3236
3237         rc = 0;
3238
3239 clear_discovering:
3240         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3241                           LNET_PEER_FORCE_PUSH);
3242
3243         return rc;
3244 }
3245
3246 /*
3247  * Update a peer using the data received.
3248  */
3249 static int lnet_peer_data_present(struct lnet_peer *lp)
3250 __must_hold(&lp->lp_lock)
3251 {
3252         struct lnet_ping_buffer *pbuf;
3253         struct lnet_peer_ni *lpni;
3254         struct lnet_nid nid;
3255         unsigned int flags;
3256         int rc = 0;
3257
3258         pbuf = lp->lp_data;
3259         lp->lp_data = NULL;
3260         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3261         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3262         spin_unlock(&lp->lp_lock);
3263
3264         /*
3265          * Modifications of peer structures are done while holding the
3266          * ln_api_mutex. A global lock is required because we may be
3267          * modifying multiple peer structures, and a mutex greatly
3268          * simplifies memory management.
3269          *
3270          * The actual changes to the data structures must also protect
3271          * against concurrent lookups, for which the lnet_net_lock in
3272          * LNET_LOCK_EX mode is used.
3273          */
3274         mutex_lock(&the_lnet.ln_api_mutex);
3275         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3276                 rc = -ESHUTDOWN;
3277                 goto out;
3278         }
3279
3280         /*
3281          * If this peer is not on the peer list then it is being torn
3282          * down, and our reference count may be all that is keeping it
3283          * alive. Don't do any work on it.
3284          */
3285         if (list_empty(&lp->lp_peer_list)) {
3286                 lnet_ping_buffer_decref(pbuf);
3287                 goto out;
3288         }
3289
3290         flags = LNET_PEER_DISCOVERED;
3291         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3292                 flags |= LNET_PEER_MULTI_RAIL;
3293
3294         /*
3295          * Check whether the primary NID in the message matches the
3296          * primary NID of the peer. If it does, update the peer, if
3297          * it it does not, check whether there is already a peer with
3298          * that primary NID. If no such peer exists, try to update
3299          * the primary NID of the current peer (allowed if it was
3300          * created due to message traffic) and complete the update.
3301          * If the peer did exist, hand off the data to it.
3302          *
3303          * The peer for the loopback interface is a special case: this
3304          * is the peer for the local node, and we want to set its
3305          * primary NID to the correct value here. Moreover, this peer
3306          * can show up with only the loopback NID in the ping buffer.
3307          */
3308         if (pbuf->pb_info.pi_nnis <= 1) {
3309                 lnet_ping_buffer_decref(pbuf);
3310                 goto out;
3311         }
3312         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, &nid);
3313         if (nid_is_lo0(&lp->lp_primary_nid)) {
3314                 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3315                 if (rc)
3316                         lnet_ping_buffer_decref(pbuf);
3317                 else
3318                         rc = lnet_peer_merge_data(lp, pbuf);
3319         /*
3320          * if the primary nid of the peer is present in the ping info returned
3321          * from the peer, but it's not the local primary peer we have
3322          * cached and discovery is disabled, then we don't want to update
3323          * our local peer info, by adding or removing NIDs, we just want
3324          * to update the status of the nids that we currently have
3325          * recorded in that peer.
3326          */
3327         } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3328                    (lnet_is_nid_in_ping_info(lnet_nid_to_nid4(&lp->lp_primary_nid),
3329                                              &pbuf->pb_info) &&
3330                     lnet_is_discovery_disabled(lp))) {
3331                 rc = lnet_peer_merge_data(lp, pbuf);
3332         } else {
3333                 lpni = lnet_peer_ni_find_locked(&nid);
3334                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3335                         rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3336                         if (rc) {
3337                                 CERROR("Primary NID error %s versus %s: %d\n",
3338                                        libcfs_nidstr(&lp->lp_primary_nid),
3339                                        libcfs_nidstr(&nid), rc);
3340                                 lnet_ping_buffer_decref(pbuf);
3341                         } else {
3342                                 rc = lnet_peer_merge_data(lp, pbuf);
3343                         }
3344                         if (lpni)
3345                                 lnet_peer_ni_decref_locked(lpni);
3346                 } else {
3347                         struct lnet_peer *new_lp;
3348                         new_lp = lpni->lpni_peer_net->lpn_peer;
3349                         /*
3350                          * if lp has discovery/MR enabled that means new_lp
3351                          * should have discovery/MR enabled as well, since
3352                          * it's the same peer, which we're about to merge
3353                          */
3354                         spin_lock(&lp->lp_lock);
3355                         spin_lock(&new_lp->lp_lock);
3356                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3357                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3358                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3359                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3360                         /* If we're processing a ping reply then we may be
3361                          * about to send a push to the peer that we ping'd.
3362                          * Since the ping reply that we're processing was
3363                          * received by lp, we need to set the discovery source
3364                          * NID for new_lp to the NID stored in lp.
3365                          */
3366                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3367                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3368                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3369                         }
3370                         spin_unlock(&new_lp->lp_lock);
3371                         spin_unlock(&lp->lp_lock);
3372
3373                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3374                         lnet_consolidate_routes_locked(lp, new_lp);
3375                         lnet_peer_ni_decref_locked(lpni);
3376                 }
3377         }
3378 out:
3379         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3380                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3381                lp->lp_state);
3382         mutex_unlock(&the_lnet.ln_api_mutex);
3383
3384         spin_lock(&lp->lp_lock);
3385         /* Tell discovery to re-check the peer immediately. */
3386         if (!rc)
3387                 rc = LNET_REDISCOVER_PEER;
3388         return rc;
3389 }
3390
3391 /*
3392  * A ping failed. Clear the PING_FAILED state and set the
3393  * FORCE_PING state, to ensure a retry even if discovery is
3394  * disabled. This avoids being left with incorrect state.
3395  */
3396 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3397 __must_hold(&lp->lp_lock)
3398 {
3399         struct lnet_handle_md mdh;
3400         int rc;
3401
3402         mdh = lp->lp_ping_mdh;
3403         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3404         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3405         lp->lp_state |= LNET_PEER_FORCE_PING;
3406         rc = lp->lp_ping_error;
3407         lp->lp_ping_error = 0;
3408         spin_unlock(&lp->lp_lock);
3409
3410         if (!LNetMDHandleIsInvalid(mdh))
3411                 LNetMDUnlink(mdh);
3412
3413         CDEBUG(D_NET, "peer %s:%d\n",
3414                libcfs_nidstr(&lp->lp_primary_nid), rc);
3415
3416         spin_lock(&lp->lp_lock);
3417         return rc ? rc : LNET_REDISCOVER_PEER;
3418 }
3419
3420 /* Active side of ping. */
3421 static int lnet_peer_send_ping(struct lnet_peer *lp)
3422 __must_hold(&lp->lp_lock)
3423 {
3424         int bytes;
3425         int rc;
3426         int cpt;
3427
3428         lp->lp_state |= LNET_PEER_PING_SENT;
3429         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3430         spin_unlock(&lp->lp_lock);
3431
3432         cpt = lnet_net_lock_current();
3433         /* Refcount for MD. */
3434         lnet_peer_addref_locked(lp);
3435         lnet_net_unlock(cpt);
3436
3437         bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3438
3439         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3440                             the_lnet.ln_dc_handler, false);
3441         /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3442          * refcount on the peer, otherwise LNetMDUnlink will be called
3443          * which will eventually do that.
3444          */
3445         if (rc > 0) {
3446                 lnet_net_lock(cpt);
3447                 lnet_peer_decref_locked(lp);
3448                 lnet_net_unlock(cpt);
3449                 rc = -rc; /* change the rc to negative value */
3450                 goto fail_error;
3451         } else if (rc < 0) {
3452                 goto fail_error;
3453         }
3454
3455         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3456
3457         spin_lock(&lp->lp_lock);
3458         return 0;
3459
3460 fail_error:
3461         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3462         /*
3463          * The errors that get us here are considered hard errors and
3464          * cause Discovery to terminate. So we clear PING_SENT, but do
3465          * not set either PING_FAILED or FORCE_PING. In fact we need
3466          * to clear PING_FAILED, because the unlink event handler will
3467          * have set it if we called LNetMDUnlink() above.
3468          */
3469         spin_lock(&lp->lp_lock);
3470         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3471         return rc;
3472 }
3473
3474 /*
3475  * This function exists because you cannot call LNetMDUnlink() from an
3476  * event handler.
3477  */
3478 static int lnet_peer_push_failed(struct lnet_peer *lp)
3479 __must_hold(&lp->lp_lock)
3480 {
3481         struct lnet_handle_md mdh;
3482         int rc;
3483
3484         mdh = lp->lp_push_mdh;
3485         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3486         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3487         rc = lp->lp_push_error;
3488         lp->lp_push_error = 0;
3489         spin_unlock(&lp->lp_lock);
3490
3491         if (!LNetMDHandleIsInvalid(mdh))
3492                 LNetMDUnlink(mdh);
3493
3494         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3495         spin_lock(&lp->lp_lock);
3496         return rc ? rc : LNET_REDISCOVER_PEER;
3497 }
3498
3499 /*
3500  * Mark the peer as discovered.
3501  */
3502 static int lnet_peer_discovered(struct lnet_peer *lp)
3503 __must_hold(&lp->lp_lock)
3504 {
3505         lp->lp_state |= LNET_PEER_DISCOVERED;
3506         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3507                           LNET_PEER_REDISCOVER);
3508
3509         lp->lp_dc_error = 0;
3510
3511         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3512
3513         return 0;
3514 }
3515
3516 /* Active side of push. */
3517 static int lnet_peer_send_push(struct lnet_peer *lp)
3518 __must_hold(&lp->lp_lock)
3519 {
3520         struct lnet_ping_buffer *pbuf;
3521         struct lnet_processid id;
3522         struct lnet_md md;
3523         int cpt;
3524         int rc;
3525
3526         /* Don't push to a non-multi-rail peer. */
3527         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3528                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3529                 /* if peer's NIDs are uptodate then peer is discovered */
3530                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3531                         rc = lnet_peer_discovered(lp);
3532                         return rc;
3533                 }
3534
3535                 return 0;
3536         }
3537
3538         lp->lp_state |= LNET_PEER_PUSH_SENT;
3539         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3540         spin_unlock(&lp->lp_lock);
3541
3542         cpt = lnet_net_lock_current();
3543         pbuf = the_lnet.ln_ping_target;
3544         lnet_ping_buffer_addref(pbuf);
3545         lnet_net_unlock(cpt);
3546
3547         /* Push source MD */
3548         md.start     = &pbuf->pb_info;
3549         md.length    = pbuf->pb_nbytes;
3550         md.threshold = 2; /* Put/Ack */
3551         md.max_size  = 0;
3552         md.options   = LNET_MD_TRACK_RESPONSE;
3553         md.handler   = the_lnet.ln_dc_handler;
3554         md.user_ptr  = lp;
3555
3556         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3557         if (rc) {
3558                 lnet_ping_buffer_decref(pbuf);
3559                 CERROR("Can't bind push source MD: %d\n", rc);
3560                 goto fail_error;
3561         }
3562
3563         cpt = lnet_net_lock_current();
3564         /* Refcount for MD. */
3565         lnet_peer_addref_locked(lp);
3566         id.pid = LNET_PID_LUSTRE;
3567         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3568                 id.nid = lp->lp_disc_dst_nid;
3569         else
3570                 id.nid = lp->lp_primary_nid;
3571         lnet_net_unlock(cpt);
3572
3573         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3574                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3575                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3576
3577         /*
3578          * reset the discovery nid. There is no need to restrict sending
3579          * from that source, if we call lnet_push_update_to_peers(). It'll
3580          * get set to a specific NID, if we initiate discovery from the
3581          * scratch
3582          */
3583         lp->lp_disc_src_nid = LNET_ANY_NID;
3584         lp->lp_disc_dst_nid = LNET_ANY_NID;
3585
3586         if (rc)
3587                 goto fail_unlink;
3588
3589         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3590
3591         spin_lock(&lp->lp_lock);
3592         return 0;
3593
3594 fail_unlink:
3595         LNetMDUnlink(lp->lp_push_mdh);
3596         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3597 fail_error:
3598         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3599                lp, rc);
3600         /*
3601          * The errors that get us here are considered hard errors and
3602          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3603          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3604          * because the unlink event handler will have set it if we
3605          * called LNetMDUnlink() above.
3606          */
3607         spin_lock(&lp->lp_lock);
3608         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3609         return rc;
3610 }
3611
3612 /*
3613  * Wait for work to be queued or some other change that must be
3614  * attended to. Returns non-zero if the discovery thread should shut
3615  * down.
3616  */
3617 static int lnet_peer_discovery_wait_for_work(void)
3618 {
3619         int cpt;
3620         int rc = 0;
3621
3622         DEFINE_WAIT(wait);
3623
3624         cpt = lnet_net_lock_current();
3625         for (;;) {
3626                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3627                                 TASK_INTERRUPTIBLE);
3628                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3629                         break;
3630                 if (lnet_push_target_resize_needed() ||
3631                     the_lnet.ln_push_target->pb_needs_post)
3632                         break;
3633                 if (!list_empty(&the_lnet.ln_dc_request))
3634                         break;
3635                 if (!list_empty(&the_lnet.ln_msg_resend))
3636                         break;
3637                 lnet_net_unlock(cpt);
3638
3639                 /*
3640                  * wakeup max every second to check if there are peers that
3641                  * have been stuck on the working queue for greater than
3642                  * the peer timeout.
3643                  */
3644                 schedule_timeout(cfs_time_seconds(1));
3645                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3646                 cpt = lnet_net_lock_current();
3647         }
3648         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3649
3650         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3651                 rc = -ESHUTDOWN;
3652
3653         lnet_net_unlock(cpt);
3654
3655         CDEBUG(D_NET, "woken: %d\n", rc);
3656
3657         return rc;
3658 }
3659
3660 /*
3661  * Messages that were pending on a destroyed peer will be put on a global
3662  * resend list. The message resend list will be checked by
3663  * the discovery thread when it wakes up, and will resend messages. These
3664  * messages can still be sendable in the case the lpni which was the initial
3665  * cause of the message re-queue was transfered to another peer.
3666  *
3667  * It is possible that LNet could be shutdown while we're iterating
3668  * through the list. lnet_shudown_lndnets() will attempt to access the
3669  * resend list, but will have to wait until the spinlock is released, by
3670  * which time there shouldn't be any more messages on the resend list.
3671  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3672  * for the messages so they can be released. The other case is that
3673  * lnet_shudown_lndnets() can finalize all the messages before this
3674  * function can visit the resend list, in which case this function will be
3675  * a no-op.
3676  */
3677 static void lnet_resend_msgs(void)
3678 {
3679         struct lnet_msg *msg, *tmp;
3680         LIST_HEAD(resend);
3681         int rc;
3682
3683         spin_lock(&the_lnet.ln_msg_resend_lock);
3684         list_splice(&the_lnet.ln_msg_resend, &resend);
3685         spin_unlock(&the_lnet.ln_msg_resend_lock);
3686
3687         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3688                 list_del_init(&msg->msg_list);
3689                 rc = lnet_send(&msg->msg_src_nid_param, msg,
3690                                &msg->msg_rtr_nid_param);
3691                 if (rc < 0) {
3692                         CNETERR("Error sending %s to %s: %d\n",
3693                                lnet_msgtyp2str(msg->msg_type),
3694                                libcfs_idstr(&msg->msg_target), rc);
3695                         lnet_finalize(msg, rc);
3696                 }
3697         }
3698 }
3699
3700 /* The discovery thread. */
3701 static int lnet_peer_discovery(void *arg)
3702 {
3703         struct lnet_peer *lp;
3704         int rc;
3705
3706         wait_for_completion(&the_lnet.ln_started);
3707
3708         CDEBUG(D_NET, "started\n");
3709
3710         for (;;) {
3711                 if (lnet_peer_discovery_wait_for_work())
3712                         break;
3713
3714                 if (lnet_push_target_resize_needed())
3715                         lnet_push_target_resize();
3716                 else if (the_lnet.ln_push_target->pb_needs_post)
3717                         lnet_push_target_post(the_lnet.ln_push_target,
3718                                               &the_lnet.ln_push_target_md);
3719
3720                 lnet_resend_msgs();
3721
3722                 lnet_net_lock(LNET_LOCK_EX);
3723                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3724                         lnet_net_unlock(LNET_LOCK_EX);
3725                         break;
3726                 }
3727
3728                 /*
3729                  * Process all incoming discovery work requests.  When
3730                  * discovery must wait on a peer to change state, it
3731                  * is added to the tail of the ln_dc_working queue. A
3732                  * timestamp keeps track of when the peer was added,
3733                  * so we can time out discovery requests that take too
3734                  * long.
3735                  */
3736                 while (!list_empty(&the_lnet.ln_dc_request)) {
3737                         lp = list_first_entry(&the_lnet.ln_dc_request,
3738                                               struct lnet_peer, lp_dc_list);
3739                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3740                         /*
3741                          * set the time the peer was put on the dc_working
3742                          * queue. It shouldn't remain on the queue
3743                          * forever, in case the GET message (for ping)
3744                          * doesn't get a REPLY or the PUT message (for
3745                          * push) doesn't get an ACK.
3746                          */
3747                         lp->lp_last_queued = ktime_get_real_seconds();
3748                         lnet_net_unlock(LNET_LOCK_EX);
3749
3750                         if (lnet_push_target_resize_needed())
3751                                 lnet_push_target_resize();
3752                         else if (the_lnet.ln_push_target->pb_needs_post)
3753                                 lnet_push_target_post(the_lnet.ln_push_target,
3754                                                       &the_lnet.ln_push_target_md);
3755
3756                         /*
3757                          * Select an action depending on the state of
3758                          * the peer and whether discovery is disabled.
3759                          * The check whether discovery is disabled is
3760                          * done after the code that handles processing
3761                          * for arrived data, cleanup for failures, and
3762                          * forcing a Ping or Push.
3763                          */
3764                         spin_lock(&lp->lp_lock);
3765                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3766                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3767                                 lp->lp_state);
3768                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3769                                             LNET_PEER_MARK_DELETED))
3770                                 rc = lnet_peer_deletion(lp);
3771                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3772                                 rc = lnet_peer_data_present(lp);
3773                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3774                                 rc = lnet_peer_ping_failed(lp);
3775                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3776                                 rc = lnet_peer_push_failed(lp);
3777                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3778                                 rc = lnet_peer_send_ping(lp);
3779                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3780                                 rc = lnet_peer_send_push(lp);
3781                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3782                                 rc = lnet_peer_send_ping(lp);
3783                         else if (lnet_peer_needs_push(lp))
3784                                 rc = lnet_peer_send_push(lp);
3785                         else
3786                                 rc = lnet_peer_discovered(lp);
3787                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3788                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3789                                 lp->lp_state, rc);
3790
3791                         if (rc == LNET_REDISCOVER_PEER) {
3792                                 spin_unlock(&lp->lp_lock);
3793                                 lnet_net_lock(LNET_LOCK_EX);
3794                                 list_move(&lp->lp_dc_list,
3795                                           &the_lnet.ln_dc_request);
3796                         } else if (rc ||
3797                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
3798                                 spin_unlock(&lp->lp_lock);
3799                                 lnet_net_lock(LNET_LOCK_EX);
3800                                 lnet_peer_discovery_complete(lp, rc);
3801                         } else {
3802                                 spin_unlock(&lp->lp_lock);
3803                                 lnet_net_lock(LNET_LOCK_EX);
3804                         }
3805
3806                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3807                                 break;
3808
3809                 }
3810
3811                 lnet_net_unlock(LNET_LOCK_EX);
3812         }
3813
3814         CDEBUG(D_NET, "stopping\n");
3815         /*
3816          * Clean up before telling lnet_peer_discovery_stop() that
3817          * we're done. Use wake_up() below to somewhat reduce the
3818          * size of the thundering herd if there are multiple threads
3819          * waiting on discovery of a single peer.
3820          */
3821
3822         /* Queue cleanup 1: stop all pending pings and pushes. */
3823         lnet_net_lock(LNET_LOCK_EX);
3824         while (!list_empty(&the_lnet.ln_dc_working)) {
3825                 lp = list_first_entry(&the_lnet.ln_dc_working,
3826                                       struct lnet_peer, lp_dc_list);
3827                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3828                 lnet_net_unlock(LNET_LOCK_EX);
3829                 lnet_peer_cancel_discovery(lp);
3830                 lnet_net_lock(LNET_LOCK_EX);
3831         }
3832         lnet_net_unlock(LNET_LOCK_EX);
3833
3834         /* Queue cleanup 2: wait for the expired queue to clear. */
3835         while (!list_empty(&the_lnet.ln_dc_expired))
3836                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3837
3838         /* Queue cleanup 3: clear the request queue. */
3839         lnet_net_lock(LNET_LOCK_EX);
3840         while (!list_empty(&the_lnet.ln_dc_request)) {
3841                 lp = list_first_entry(&the_lnet.ln_dc_request,
3842                                       struct lnet_peer, lp_dc_list);
3843                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
3844         }
3845         lnet_net_unlock(LNET_LOCK_EX);
3846
3847         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3848         the_lnet.ln_dc_handler = NULL;
3849
3850         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3851         wake_up(&the_lnet.ln_dc_waitq);
3852
3853         CDEBUG(D_NET, "stopped\n");
3854
3855         return 0;
3856 }
3857
3858 /* ln_api_mutex is held on entry. */
3859 int lnet_peer_discovery_start(void)
3860 {
3861         struct task_struct *task;
3862         int rc = 0;
3863
3864         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3865                 return -EALREADY;
3866
3867         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3868         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3869         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3870         if (IS_ERR(task)) {
3871                 rc = PTR_ERR(task);
3872                 CERROR("Can't start peer discovery thread: %d\n", rc);
3873
3874                 the_lnet.ln_dc_handler = NULL;
3875
3876                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3877         }
3878
3879         CDEBUG(D_NET, "discovery start: %d\n", rc);
3880
3881         return rc;
3882 }
3883
3884 /* ln_api_mutex is held on entry. */
3885 void lnet_peer_discovery_stop(void)
3886 {
3887         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3888                 return;
3889
3890         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3891         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3892
3893         /* In the LNetNIInit() path we may be stopping discovery before it
3894          * entered its work loop
3895          */
3896         if (!completion_done(&the_lnet.ln_started))
3897                 complete(&the_lnet.ln_started);
3898         else
3899                 wake_up(&the_lnet.ln_dc_waitq);
3900
3901         mutex_unlock(&the_lnet.ln_api_mutex);
3902         wait_event(the_lnet.ln_dc_waitq,
3903                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3904         mutex_lock(&the_lnet.ln_api_mutex);
3905
3906         LASSERT(list_empty(&the_lnet.ln_dc_request));
3907         LASSERT(list_empty(&the_lnet.ln_dc_working));
3908         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3909
3910         CDEBUG(D_NET, "discovery stopped\n");
3911 }
3912
3913 /* Debugging */
3914
3915 void
3916 lnet_debug_peer(lnet_nid_t nid4)
3917 {
3918         char                    *aliveness = "NA";
3919         struct lnet_peer_ni     *lp;
3920         int                     cpt;
3921         struct lnet_nid nid;
3922
3923         lnet_nid4_to_nid(nid4, &nid);
3924         cpt = lnet_nid2cpt(&nid, NULL);
3925         lnet_net_lock(cpt);
3926
3927         lp = lnet_peerni_by_nid_locked(&nid, NULL, cpt);
3928         if (IS_ERR(lp)) {
3929                 lnet_net_unlock(cpt);
3930                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(&nid));
3931                 return;
3932         }
3933
3934         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3935                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3936
3937         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3938                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
3939                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3940                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3941                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3942
3943         lnet_peer_ni_decref_locked(lp);
3944
3945         lnet_net_unlock(cpt);
3946 }
3947
3948 /* Gathering information for userspace. */
3949
3950 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3951                           char aliveness[LNET_MAX_STR_LEN],
3952                           __u32 *cpt_iter, __u32 *refcount,
3953                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3954                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3955                           __u32 *peer_tx_qnob)
3956 {
3957         struct lnet_peer_table          *peer_table;
3958         struct lnet_peer_ni             *lp;
3959         int                             j;
3960         int                             lncpt;
3961         bool                            found = false;
3962
3963         /* get the number of CPTs */
3964         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3965
3966         /* if the cpt number to be examined is >= the number of cpts in
3967          * the system then indicate that there are no more cpts to examin
3968          */
3969         if (*cpt_iter >= lncpt)
3970                 return -ENOENT;
3971
3972         /* get the current table */
3973         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3974         /* if the ptable is NULL then there are no more cpts to examine */
3975         if (peer_table == NULL)
3976                 return -ENOENT;
3977
3978         lnet_net_lock(*cpt_iter);
3979
3980         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3981                 struct list_head *peers = &peer_table->pt_hash[j];
3982
3983                 list_for_each_entry(lp, peers, lpni_hashlist) {
3984                         if (!nid_is_nid4(&lp->lpni_nid))
3985                                 continue;
3986                         if (peer_index-- > 0)
3987                                 continue;
3988
3989                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3990                         if (lnet_isrouter(lp) ||
3991                                 lnet_peer_aliveness_enabled(lp))
3992                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3993                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3994
3995                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
3996                         *refcount = kref_read(&lp->lpni_kref);
3997                         *ni_peer_tx_credits =
3998                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3999                         *peer_tx_credits = lp->lpni_txcredits;
4000                         *peer_rtr_credits = lp->lpni_rtrcredits;
4001                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4002                         *peer_tx_qnob = lp->lpni_txqnob;
4003
4004                         found = true;
4005                 }
4006
4007         }
4008         lnet_net_unlock(*cpt_iter);
4009
4010         *cpt_iter = lncpt;
4011
4012         return found ? 0 : -ENOENT;
4013 }
4014
4015 /* ln_api_mutex is held, which keeps the peer list stable */
4016 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4017 {
4018         struct lnet_ioctl_element_stats *lpni_stats;
4019         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4020         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4021         struct lnet_peer_ni_credit_info *lpni_info;
4022         struct lnet_peer_ni *lpni;
4023         struct lnet_peer *lp;
4024         lnet_nid_t nid4;
4025         struct lnet_nid nid;
4026         __u32 size;
4027         int rc;
4028
4029         lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4030         lp = lnet_find_peer(&nid);
4031
4032         if (!lp) {
4033                 rc = -ENOENT;
4034                 goto out;
4035         }
4036
4037         size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4038                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4039         size *= lp->lp_nnis;
4040         if (size > cfg->prcfg_size) {
4041                 cfg->prcfg_size = size;
4042                 rc = -E2BIG;
4043                 goto out_lp_decref;
4044         }
4045
4046         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4047         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4048         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4049         cfg->prcfg_count = lp->lp_nnis;
4050         cfg->prcfg_size = size;
4051         cfg->prcfg_state = lp->lp_state;
4052
4053         /* Allocate helper buffers. */
4054         rc = -ENOMEM;
4055         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4056         if (!lpni_info)
4057                 goto out_lp_decref;
4058         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4059         if (!lpni_stats)
4060                 goto out_free_info;
4061         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4062         if (!lpni_msg_stats)
4063                 goto out_free_stats;
4064         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4065         if (!lpni_hstats)
4066                 goto out_free_msg_stats;
4067
4068
4069         lpni = NULL;
4070         rc = -EFAULT;
4071         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4072                 if (!nid_is_nid4(&lpni->lpni_nid))
4073                         continue;
4074                 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4075                 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4076                         goto out_free_hstats;
4077                 bulk += sizeof(nid4);
4078
4079                 memset(lpni_info, 0, sizeof(*lpni_info));
4080                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4081                 if (lnet_isrouter(lpni) ||
4082                         lnet_peer_aliveness_enabled(lpni))
4083                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4084                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4085
4086                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4087                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4088                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4089                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4090                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4091                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4092                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4093                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4094                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4095                         goto out_free_hstats;
4096                 bulk += sizeof(*lpni_info);
4097
4098                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4099                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4100                                                             LNET_STATS_TYPE_SEND);
4101                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4102                                                             LNET_STATS_TYPE_RECV);
4103                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4104                                                             LNET_STATS_TYPE_DROP);
4105                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4106                         goto out_free_hstats;
4107                 bulk += sizeof(*lpni_stats);
4108                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4109                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4110                         goto out_free_hstats;
4111                 bulk += sizeof(*lpni_msg_stats);
4112                 lpni_hstats->hlpni_network_timeout =
4113                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4114                 lpni_hstats->hlpni_remote_dropped =
4115                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4116                 lpni_hstats->hlpni_remote_timeout =
4117                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4118                 lpni_hstats->hlpni_remote_error =
4119                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4120                 lpni_hstats->hlpni_health_value =
4121                   atomic_read(&lpni->lpni_healthv);
4122                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4123                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4124                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4125                         goto out_free_hstats;
4126                 bulk += sizeof(*lpni_hstats);
4127         }
4128         rc = 0;
4129
4130 out_free_hstats:
4131         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4132 out_free_msg_stats:
4133         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4134 out_free_stats:
4135         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4136 out_free_info:
4137         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4138 out_lp_decref:
4139         lnet_peer_decref_locked(lp);
4140 out:
4141         return rc;
4142 }
4143
4144 /* must hold net_lock/0 */
4145 void
4146 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4147                                      struct list_head *recovery_queue,
4148                                      time64_t now)
4149 {
4150         /* the mt could've shutdown and cleaned up the queues */
4151         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4152                 return;
4153
4154         if (!list_empty(&lpni->lpni_recovery))
4155                 return;
4156
4157         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4158                 return;
4159
4160         if (!lpni->lpni_last_alive) {
4161                 CDEBUG(D_NET,
4162                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4163                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4164                        lpni->lpni_last_alive);
4165                 return;
4166         }
4167
4168         if (lnet_recovery_limit &&
4169             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4170                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4171                        libcfs_nidstr(&lpni->lpni_nid),
4172                        lpni->lpni_last_alive);
4173                 /* Reset the ping count so that if this peer NI is added back to
4174                  * the recovery queue we will send the first ping right away.
4175                  */
4176                 lpni->lpni_ping_count = 0;
4177                 return;
4178         }
4179
4180         /* This peer NI is going on the recovery queue, so take a ref on it */
4181         lnet_peer_ni_addref_locked(lpni);
4182
4183         lnet_peer_ni_set_next_ping(lpni, now);
4184
4185         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4186                libcfs_nidstr(&lpni->lpni_nid),
4187                lpni->lpni_ping_count,
4188                lpni->lpni_next_ping,
4189                lpni->lpni_last_alive,
4190                atomic_read(&lpni->lpni_healthv));
4191
4192         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4193 }
4194
4195 /* Call with the ln_api_mutex held */
4196 void
4197 lnet_peer_ni_set_healthv(lnet_nid_t nid4, int value, bool all)
4198 {
4199         struct lnet_peer_table *ptable;
4200         struct lnet_peer *lp;
4201         struct lnet_peer_net *lpn;
4202         struct lnet_peer_ni *lpni;
4203         struct lnet_nid nid;
4204         int lncpt;
4205         int cpt;
4206         time64_t now;
4207
4208         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4209                 return;
4210
4211         lnet_nid4_to_nid(nid4, &nid);
4212         now = ktime_get_seconds();
4213
4214         if (!all) {
4215                 lnet_net_lock(LNET_LOCK_EX);
4216                 lpni = lnet_peer_ni_find_locked(&nid);
4217                 if (!lpni) {
4218                         lnet_net_unlock(LNET_LOCK_EX);
4219                         return;
4220                 }
4221                 lnet_set_lpni_healthv_locked(lpni, value);
4222                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4223                                              &the_lnet.ln_mt_peerNIRecovq, now);
4224                 lnet_peer_ni_decref_locked(lpni);
4225                 lnet_net_unlock(LNET_LOCK_EX);
4226                 return;
4227         }
4228
4229         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4230
4231         /*
4232          * Walk all the peers and reset the health value for each one to the
4233          * specified value.
4234          */
4235         lnet_net_lock(LNET_LOCK_EX);
4236         for (cpt = 0; cpt < lncpt; cpt++) {
4237                 ptable = the_lnet.ln_peer_tables[cpt];
4238                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4239                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4240                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4241                                                     lpni_peer_nis) {
4242                                         lnet_set_lpni_healthv_locked(lpni,
4243                                                                      value);
4244                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4245                                              &the_lnet.ln_mt_peerNIRecovq, now);
4246                                 }
4247                         }
4248                 }
4249         }
4250         lnet_net_unlock(LNET_LOCK_EX);
4251 }
4252