Whamcloud - gitweb
LU-15616 lnet: ln_api_mutex deadlocks
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(struct lnet_nid *nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lpni->lpni_nid = *nid;
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NID_NET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(struct lnet_nid *nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = *nid;
265         lp->lp_disc_src_nid = LNET_ANY_NID;
266         lp->lp_disc_dst_nid = LNET_ANY_NID;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid_is_lo0(nid))
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nidstr(&lp->lp_primary_nid),
382                 libcfs_nidstr(&lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nidstr(&lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
461
462         spin_lock(&peer->lp_lock);
463         peer->lp_state |= LNET_PEER_MARK_DELETED;
464         spin_unlock(&peer->lp_lock);
465
466         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467         while (lpni != NULL) {
468                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469                 rc = lnet_peer_ni_del_locked(lpni, false);
470                 if (rc != 0)
471                         rc2 = rc;
472                 lpni = lpni2;
473         }
474
475         return rc2;
476 }
477
478 /*
479  * Discovering this peer is taking too long. Cancel any Ping or Push
480  * that discovery is waiting on by unlinking the relevant MDs. The
481  * lnet_discovery_event_handler() will proceed from here and complete
482  * the cleanup.
483  */
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
485 {
486         struct lnet_handle_md ping_mdh;
487         struct lnet_handle_md push_mdh;
488
489         LNetInvalidateMDHandle(&ping_mdh);
490         LNetInvalidateMDHandle(&push_mdh);
491
492         spin_lock(&lp->lp_lock);
493         if (lp->lp_state & LNET_PEER_PING_SENT) {
494                 ping_mdh = lp->lp_ping_mdh;
495                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
496         }
497         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498                 push_mdh = lp->lp_push_mdh;
499                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
500         }
501         spin_unlock(&lp->lp_lock);
502
503         if (!LNetMDHandleIsInvalid(ping_mdh))
504                 LNetMDUnlink(ping_mdh);
505         if (!LNetMDHandleIsInvalid(push_mdh))
506                 LNetMDUnlink(push_mdh);
507 }
508
509 static int
510 lnet_peer_del(struct lnet_peer *peer)
511 {
512         int rc;
513
514         lnet_peer_cancel_discovery(peer);
515         lnet_net_lock(LNET_LOCK_EX);
516         rc = lnet_peer_del_locked(peer);
517         lnet_net_unlock(LNET_LOCK_EX);
518
519         return rc;
520 }
521
522 /*
523  * Delete a NID from a peer. Call with ln_api_mutex held.
524  *
525  * Error codes:
526  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
527  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
528  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
529  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
530  */
531 static int
532 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
533 {
534         struct lnet_peer_ni *lpni;
535         struct lnet_nid primary_nid = lp->lp_primary_nid;
536         struct lnet_nid nid;
537         int rc = 0;
538         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
539
540         lnet_nid4_to_nid(nid4, &nid);
541         if (!(flags & LNET_PEER_CONFIGURED)) {
542                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
543                         rc = -EPERM;
544                         goto out;
545                 }
546         }
547
548         lpni = lnet_peer_ni_find_locked(&nid);
549         if (!lpni) {
550                 rc = -ENOENT;
551                 goto out;
552         }
553         lnet_peer_ni_decref_locked(lpni);
554         if (lp != lpni->lpni_peer_net->lpn_peer) {
555                 rc = -ECHILD;
556                 goto out;
557         }
558
559         /*
560          * This function only allows deletion of the primary NID if it
561          * is the only NID.
562          */
563         if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
564                 rc = -EBUSY;
565                 goto out;
566         }
567
568         lnet_net_lock(LNET_LOCK_EX);
569
570         if (nid_same(&nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
571                 struct lnet_peer_ni *lpni2;
572                 /* assign the next peer_ni to be the primary */
573                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
574                 LASSERT(lpni2);
575                 lp->lp_primary_nid = lpni2->lpni_nid;
576         }
577         rc = lnet_peer_ni_del_locked(lpni, force);
578
579         lnet_net_unlock(LNET_LOCK_EX);
580
581 out:
582         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
583                libcfs_nidstr(&primary_nid), libcfs_nidstr(&nid),
584                flags, rc);
585
586         return rc;
587 }
588
589 static void
590 lnet_peer_table_cleanup_locked(struct lnet_net *net,
591                                struct lnet_peer_table *ptable)
592 {
593         int                      i;
594         struct lnet_peer_ni     *next;
595         struct lnet_peer_ni     *lpni;
596         struct lnet_peer        *peer;
597
598         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
599                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
600                                          lpni_hashlist) {
601                         if (net != NULL && net != lpni->lpni_net)
602                                 continue;
603
604                         peer = lpni->lpni_peer_net->lpn_peer;
605                         if (!nid_same(&peer->lp_primary_nid,
606                                        &lpni->lpni_nid)) {
607                                 lnet_peer_ni_del_locked(lpni, false);
608                                 continue;
609                         }
610                         /*
611                          * Removing the primary NID implies removing
612                          * the entire peer. Advance next beyond any
613                          * peer_ni that belongs to the same peer.
614                          */
615                         list_for_each_entry_from(next, &ptable->pt_hash[i],
616                                                  lpni_hashlist) {
617                                 if (next->lpni_peer_net->lpn_peer != peer)
618                                         break;
619                         }
620                         lnet_peer_del_locked(peer);
621                 }
622         }
623 }
624
625 static void
626 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
627 {
628         wait_var_event_warning(&ptable->pt_zombies,
629                                ptable->pt_zombies == 0,
630                                "Waiting for %d zombies on peer table\n",
631                                ptable->pt_zombies);
632 }
633
634 static void
635 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
636                                 struct lnet_peer_table *ptable)
637 {
638         struct lnet_peer_ni     *lp;
639         struct lnet_peer_ni     *tmp;
640         struct lnet_nid         gw_nid;
641         int                     i;
642
643         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
644                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
645                                          lpni_hashlist) {
646                         if (net != lp->lpni_net)
647                                 continue;
648
649                         if (!lnet_isrouter(lp))
650                                 continue;
651
652                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
653
654                         lnet_net_unlock(LNET_LOCK_EX);
655                         lnet_del_route(LNET_NET_ANY, &gw_nid);
656                         lnet_net_lock(LNET_LOCK_EX);
657                 }
658         }
659 }
660
661 void
662 lnet_peer_tables_cleanup(struct lnet_net *net)
663 {
664         int i;
665         struct lnet_peer_table *ptable;
666
667         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
668         /* If just deleting the peers for a NI, get rid of any routes these
669          * peers are gateways for. */
670         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
671                 lnet_net_lock(LNET_LOCK_EX);
672                 lnet_peer_table_del_rtrs_locked(net, ptable);
673                 lnet_net_unlock(LNET_LOCK_EX);
674         }
675
676         /* Start the cleanup process */
677         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
678                 lnet_net_lock(LNET_LOCK_EX);
679                 lnet_peer_table_cleanup_locked(net, ptable);
680                 lnet_net_unlock(LNET_LOCK_EX);
681         }
682
683         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
684                 lnet_peer_ni_finalize_wait(ptable);
685 }
686
687 static struct lnet_peer_ni *
688 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
689 {
690         struct list_head        *peers;
691         struct lnet_peer_ni     *lp;
692
693         if (the_lnet.ln_state != LNET_STATE_RUNNING)
694                 return NULL;
695
696         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
697         list_for_each_entry(lp, peers, lpni_hashlist) {
698                 if (nid_same(&lp->lpni_nid, nid)) {
699                         lnet_peer_ni_addref_locked(lp);
700                         return lp;
701                 }
702         }
703
704         return NULL;
705 }
706
707 struct lnet_peer_ni *
708 lnet_find_peer_ni_locked(lnet_nid_t nid4)
709 {
710         struct lnet_peer_ni *lpni;
711         struct lnet_peer_table *ptable;
712         int cpt;
713         struct lnet_nid nid;
714
715         lnet_nid4_to_nid(nid4, &nid);
716
717         cpt = lnet_nid_cpt_hash(&nid, LNET_CPT_NUMBER);
718
719         ptable = the_lnet.ln_peer_tables[cpt];
720         lpni = lnet_get_peer_ni_locked(ptable, &nid);
721
722         return lpni;
723 }
724
725 struct lnet_peer_ni *
726 lnet_peer_ni_find_locked(struct lnet_nid *nid)
727 {
728         struct lnet_peer_ni *lpni;
729         struct lnet_peer_table *ptable;
730         int cpt;
731
732         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
733
734         ptable = the_lnet.ln_peer_tables[cpt];
735         lpni = lnet_get_peer_ni_locked(ptable, nid);
736
737         return lpni;
738 }
739
740 struct lnet_peer_ni *
741 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
742 {
743         struct lnet_peer_net *lpn;
744         struct lnet_peer_ni *lpni;
745
746         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
747         if (!lpn)
748                 return NULL;
749
750         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
751                 if (lnet_nid_to_nid4(&lpni->lpni_nid) == nid)
752                         return lpni;
753         }
754
755         return NULL;
756 }
757
758 struct lnet_peer_ni *
759 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
760 {
761         struct lnet_peer_net *lpn;
762         struct lnet_peer_ni *lpni;
763
764         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
765         if (!lpn)
766                 return NULL;
767
768         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
769                 if (nid_same(&lpni->lpni_nid, nid))
770                         return lpni;
771         }
772
773         return NULL;
774 }
775
776 struct lnet_peer *
777 lnet_find_peer4(lnet_nid_t nid)
778 {
779         struct lnet_peer_ni *lpni;
780         struct lnet_peer *lp = NULL;
781         int cpt;
782
783         cpt = lnet_net_lock_current();
784         lpni = lnet_find_peer_ni_locked(nid);
785         if (lpni) {
786                 lp = lpni->lpni_peer_net->lpn_peer;
787                 lnet_peer_addref_locked(lp);
788                 lnet_peer_ni_decref_locked(lpni);
789         }
790         lnet_net_unlock(cpt);
791
792         return lp;
793 }
794
795 struct lnet_peer *
796 lnet_find_peer(struct lnet_nid *nid)
797 {
798         struct lnet_peer_ni *lpni;
799         struct lnet_peer *lp = NULL;
800         int cpt;
801
802         cpt = lnet_net_lock_current();
803         lpni = lnet_peer_ni_find_locked(nid);
804         if (lpni) {
805                 lp = lpni->lpni_peer_net->lpn_peer;
806                 lnet_peer_addref_locked(lp);
807                 lnet_peer_ni_decref_locked(lpni);
808         }
809         lnet_net_unlock(cpt);
810
811         return lp;
812 }
813
814 struct lnet_peer_net *
815 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
816 {
817         struct lnet_peer_net *net;
818
819         if (!prev_lpn_id) {
820                 /* no net id provided return the first net */
821                 net = list_first_entry_or_null(&lp->lp_peer_nets,
822                                                struct lnet_peer_net,
823                                                lpn_peer_nets);
824
825                 return net;
826         }
827
828         /* find the net after the one provided */
829         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
830                 if (net->lpn_net_id == prev_lpn_id) {
831                         /*
832                          * if we reached the end of the list loop to the
833                          * beginning.
834                          */
835                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
836                                 return list_first_entry_or_null(&lp->lp_peer_nets,
837                                                                 struct lnet_peer_net,
838                                                                 lpn_peer_nets);
839                         else
840                                 return list_next_entry(net, lpn_peer_nets);
841                 }
842         }
843
844         return NULL;
845 }
846
847 struct lnet_peer_ni *
848 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
849                              struct lnet_peer_net *peer_net,
850                              struct lnet_peer_ni *prev)
851 {
852         struct lnet_peer_ni *lpni;
853         struct lnet_peer_net *net = peer_net;
854
855         if (!prev) {
856                 if (!net) {
857                         if (list_empty(&peer->lp_peer_nets))
858                                 return NULL;
859
860                         net = list_entry(peer->lp_peer_nets.next,
861                                          struct lnet_peer_net,
862                                          lpn_peer_nets);
863                 }
864                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
865                                   lpni_peer_nis);
866
867                 return lpni;
868         }
869
870         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
871                 /*
872                  * if you reached the end of the peer ni list and the peer
873                  * net is specified then there are no more peer nis in that
874                  * net.
875                  */
876                 if (net)
877                         return NULL;
878
879                 /*
880                  * we reached the end of this net ni list. move to the
881                  * next net
882                  */
883                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
884                     &peer->lp_peer_nets)
885                         /* no more nets and no more NIs. */
886                         return NULL;
887
888                 /* get the next net */
889                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
890                                  struct lnet_peer_net,
891                                  lpn_peer_nets);
892                 /* get the ni on it */
893                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
894                                   lpni_peer_nis);
895
896                 return lpni;
897         }
898
899         /* there are more nis left */
900         lpni = list_entry(prev->lpni_peer_nis.next,
901                           struct lnet_peer_ni, lpni_peer_nis);
902
903         return lpni;
904 }
905
906 /* Call with the ln_api_mutex held */
907 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
908 {
909         struct lnet_process_id id;
910         struct lnet_peer_table *ptable;
911         struct lnet_peer *lp;
912         __u32 count = 0;
913         __u32 size = 0;
914         int lncpt;
915         int cpt;
916         __u32 i;
917         int rc;
918
919         rc = -ESHUTDOWN;
920         if (the_lnet.ln_state != LNET_STATE_RUNNING)
921                 goto done;
922
923         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
924
925         /*
926          * Count the number of peers, and return E2BIG if the buffer
927          * is too small. We'll also return the desired size.
928          */
929         rc = -E2BIG;
930         for (cpt = 0; cpt < lncpt; cpt++) {
931                 ptable = the_lnet.ln_peer_tables[cpt];
932                 count += ptable->pt_peers;
933         }
934         size = count * sizeof(*ids);
935         if (size > *sizep)
936                 goto done;
937
938         /*
939          * Walk the peer lists and copy out the primary nids.
940          * This is safe because the peer lists are only modified
941          * while the ln_api_mutex is held. So we don't need to
942          * hold the lnet_net_lock as well, and can therefore
943          * directly call copy_to_user().
944          */
945         rc = -EFAULT;
946         memset(&id, 0, sizeof(id));
947         id.pid = LNET_PID_LUSTRE;
948         i = 0;
949         for (cpt = 0; cpt < lncpt; cpt++) {
950                 ptable = the_lnet.ln_peer_tables[cpt];
951                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
952                         if (!nid_is_nid4(&lp->lp_primary_nid))
953                                 continue;
954                         if (i >= count)
955                                 goto done;
956                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
957                         if (copy_to_user(&ids[i], &id, sizeof(id)))
958                                 goto done;
959                         i++;
960                 }
961         }
962         rc = 0;
963 done:
964         *countp = count;
965         *sizep = size;
966         return rc;
967 }
968
969 /*
970  * Start pushes to peers that need to be updated for a configuration
971  * change on this node.
972  */
973 void
974 lnet_push_update_to_peers(int force)
975 {
976         struct lnet_peer_table *ptable;
977         struct lnet_peer *lp;
978         int lncpt;
979         int cpt;
980
981         lnet_net_lock(LNET_LOCK_EX);
982         if (lnet_peer_discovery_disabled)
983                 force = 0;
984         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
985         for (cpt = 0; cpt < lncpt; cpt++) {
986                 ptable = the_lnet.ln_peer_tables[cpt];
987                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
988                         if (force) {
989                                 spin_lock(&lp->lp_lock);
990                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
991                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
992                                 spin_unlock(&lp->lp_lock);
993                         }
994                         if (lnet_peer_needs_push(lp))
995                                 lnet_peer_queue_for_discovery(lp);
996                 }
997         }
998         lnet_net_unlock(LNET_LOCK_EX);
999         wake_up(&the_lnet.ln_dc_waitq);
1000 }
1001
1002 /* find the NID in the preferred gateways for the remote peer
1003  * return:
1004  *      false: list is not empty and NID is not preferred
1005  *      false: list is empty
1006  *      true: nid is found in the list
1007  */
1008 bool
1009 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
1010                              struct lnet_nid *gw_nid)
1011 {
1012         struct lnet_nid_list *ne;
1013
1014         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
1015                libcfs_nidstr(&lpni->lpni_nid),
1016                list_empty(&lpni->lpni_rtr_pref_nids));
1017
1018         if (list_empty(&lpni->lpni_rtr_pref_nids))
1019                 return false;
1020
1021         /* iterate through all the preferred NIDs and see if any of them
1022          * matches the provided gw_nid
1023          */
1024         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1025                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
1026                        libcfs_nidstr(&ne->nl_nid),
1027                        libcfs_nidstr(gw_nid));
1028                 if (nid_same(&ne->nl_nid, gw_nid))
1029                         return true;
1030         }
1031
1032         return false;
1033 }
1034
1035 void
1036 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
1037 {
1038         struct list_head zombies;
1039         struct lnet_nid_list *ne;
1040         struct lnet_nid_list *tmp;
1041         int cpt = lpni->lpni_cpt;
1042
1043         INIT_LIST_HEAD(&zombies);
1044
1045         lnet_net_lock(cpt);
1046         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1047         lnet_net_unlock(cpt);
1048
1049         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1050                 list_del(&ne->nl_list);
1051                 LIBCFS_FREE(ne, sizeof(*ne));
1052         }
1053 }
1054
1055 int
1056 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1057                        struct lnet_nid *gw_nid)
1058 {
1059         int cpt = lpni->lpni_cpt;
1060         struct lnet_nid_list *ne = NULL;
1061
1062         /* This function is called with api_mutex held. When the api_mutex
1063          * is held the list can not be modified, as it is only modified as
1064          * a result of applying a UDSP and that happens under api_mutex
1065          * lock.
1066          */
1067         __must_hold(&the_lnet.ln_api_mutex);
1068
1069         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1070                 if (nid_same(&ne->nl_nid, gw_nid))
1071                         return -EEXIST;
1072         }
1073
1074         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1075         if (!ne)
1076                 return -ENOMEM;
1077
1078         ne->nl_nid = *gw_nid;
1079
1080         /* Lock the cpt to protect against addition and checks in the
1081          * selection algorithm
1082          */
1083         lnet_net_lock(cpt);
1084         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1085         lnet_net_unlock(cpt);
1086
1087         return 0;
1088 }
1089
1090 /*
1091  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1092  * this is a preferred point-to-point path. Call with lnet_net_lock in
1093  * shared mmode.
1094  */
1095 bool
1096 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1097 {
1098         struct lnet_nid_list *ne;
1099
1100         if (lpni->lpni_pref_nnids == 0)
1101                 return false;
1102         if (lpni->lpni_pref_nnids == 1)
1103                 return nid_same(&lpni->lpni_pref.nid, nid);
1104         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1105                 if (nid_same(&ne->nl_nid, nid))
1106                         return true;
1107         }
1108         return false;
1109 }
1110
1111 /*
1112  * Set a single ni as preferred, provided no preferred ni is already
1113  * defined. Only to be used for non-multi-rail peer_ni.
1114  */
1115 int
1116 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1117                                   struct lnet_nid *nid)
1118 {
1119         int rc = 0;
1120
1121         if (!nid)
1122                 return -EINVAL;
1123         spin_lock(&lpni->lpni_lock);
1124         if (LNET_NID_IS_ANY(nid)) {
1125                 rc = -EINVAL;
1126         } else if (lpni->lpni_pref_nnids > 0) {
1127                 rc = -EPERM;
1128         } else if (lpni->lpni_pref_nnids == 0) {
1129                 lpni->lpni_pref.nid = *nid;
1130                 lpni->lpni_pref_nnids = 1;
1131                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1132         }
1133         spin_unlock(&lpni->lpni_lock);
1134
1135         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1136                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1137         return rc;
1138 }
1139
1140 /*
1141  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1142  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1143  */
1144 int
1145 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1146 {
1147         int rc = 0;
1148
1149         spin_lock(&lpni->lpni_lock);
1150         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1151                 lpni->lpni_pref_nnids = 0;
1152                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1153         } else if (lpni->lpni_pref_nnids == 0) {
1154                 rc = -ENOENT;
1155         } else {
1156                 rc = -EPERM;
1157         }
1158         spin_unlock(&lpni->lpni_lock);
1159
1160         CDEBUG(D_NET, "peer %s: %d\n",
1161                libcfs_nidstr(&lpni->lpni_nid), rc);
1162         return rc;
1163 }
1164
1165 void
1166 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1167 {
1168         lpni->lpni_sel_priority = priority;
1169 }
1170
1171 /*
1172  * Clear the preferred NIDs from a non-multi-rail peer.
1173  */
1174 void
1175 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1176 {
1177         struct lnet_peer_ni *lpni = NULL;
1178
1179         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1180                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1181 }
1182
1183 int
1184 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1185 {
1186         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1187         struct lnet_nid_list *ne1 = NULL;
1188         struct lnet_nid_list *ne2 = NULL;
1189         struct lnet_nid *tmp_nid = NULL;
1190         int rc = 0;
1191
1192         if (LNET_NID_IS_ANY(nid)) {
1193                 rc = -EINVAL;
1194                 goto out;
1195         }
1196
1197         if (lpni->lpni_pref_nnids == 1 &&
1198             nid_same(&lpni->lpni_pref.nid, nid)) {
1199                 rc = -EEXIST;
1200                 goto out;
1201         }
1202
1203         /* A non-MR node may have only one preferred NI per peer_ni */
1204         if (lpni->lpni_pref_nnids > 0 &&
1205             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1206                 rc = -EPERM;
1207                 goto out;
1208         }
1209
1210         /* add the new preferred nid to the list of preferred nids */
1211         if (lpni->lpni_pref_nnids != 0) {
1212                 size_t alloc_size = sizeof(*ne1);
1213
1214                 if (lpni->lpni_pref_nnids == 1) {
1215                         tmp_nid = &lpni->lpni_pref.nid;
1216                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1217                 }
1218
1219                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1220                         if (nid_same(&ne1->nl_nid, nid)) {
1221                                 rc = -EEXIST;
1222                                 goto out;
1223                         }
1224                 }
1225
1226                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1227                                  alloc_size);
1228                 if (!ne1) {
1229                         rc = -ENOMEM;
1230                         goto out;
1231                 }
1232
1233                 /* move the originally stored nid to the list */
1234                 if (lpni->lpni_pref_nnids == 1) {
1235                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1236                                 lpni->lpni_cpt, alloc_size);
1237                         if (!ne2) {
1238                                 rc = -ENOMEM;
1239                                 goto out;
1240                         }
1241                         INIT_LIST_HEAD(&ne2->nl_list);
1242                         ne2->nl_nid = *tmp_nid;
1243                 }
1244                 ne1->nl_nid = *nid;
1245         }
1246
1247         lnet_net_lock(LNET_LOCK_EX);
1248         spin_lock(&lpni->lpni_lock);
1249         if (lpni->lpni_pref_nnids == 0) {
1250                 lpni->lpni_pref.nid = *nid;
1251         } else {
1252                 if (ne2)
1253                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1254                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1255         }
1256         lpni->lpni_pref_nnids++;
1257         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1258         spin_unlock(&lpni->lpni_lock);
1259         lnet_net_unlock(LNET_LOCK_EX);
1260
1261 out:
1262         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1263                 spin_lock(&lpni->lpni_lock);
1264                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1265                 spin_unlock(&lpni->lpni_lock);
1266         }
1267         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1268                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1269         return rc;
1270 }
1271
1272 int
1273 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1274 {
1275         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1276         struct lnet_nid_list *ne = NULL;
1277         int rc = 0;
1278
1279         if (lpni->lpni_pref_nnids == 0) {
1280                 rc = -ENOENT;
1281                 goto out;
1282         }
1283
1284         if (lpni->lpni_pref_nnids == 1) {
1285                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1286                         rc = -ENOENT;
1287                         goto out;
1288                 }
1289         } else {
1290                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1291                         if (nid_same(&ne->nl_nid, nid))
1292                                 goto remove_nid_entry;
1293                 }
1294                 rc = -ENOENT;
1295                 ne = NULL;
1296                 goto out;
1297         }
1298
1299 remove_nid_entry:
1300         lnet_net_lock(LNET_LOCK_EX);
1301         spin_lock(&lpni->lpni_lock);
1302         if (lpni->lpni_pref_nnids == 1)
1303                 lpni->lpni_pref.nid = LNET_ANY_NID;
1304         else {
1305                 list_del_init(&ne->nl_list);
1306                 if (lpni->lpni_pref_nnids == 2) {
1307                         struct lnet_nid_list *ne, *tmp;
1308
1309                         list_for_each_entry_safe(ne, tmp,
1310                                                  &lpni->lpni_pref.nids,
1311                                                  nl_list) {
1312                                 lpni->lpni_pref.nid = ne->nl_nid;
1313                                 list_del_init(&ne->nl_list);
1314                                 LIBCFS_FREE(ne, sizeof(*ne));
1315                         }
1316                 }
1317         }
1318         lpni->lpni_pref_nnids--;
1319         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1320         spin_unlock(&lpni->lpni_lock);
1321         lnet_net_unlock(LNET_LOCK_EX);
1322
1323         if (ne)
1324                 LIBCFS_FREE(ne, sizeof(*ne));
1325 out:
1326         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1327                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1328         return rc;
1329 }
1330
1331 void
1332 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1333 {
1334         struct list_head zombies;
1335         struct lnet_nid_list *ne;
1336         struct lnet_nid_list *tmp;
1337
1338         INIT_LIST_HEAD(&zombies);
1339
1340         lnet_net_lock(LNET_LOCK_EX);
1341         if (lpni->lpni_pref_nnids == 1)
1342                 lpni->lpni_pref.nid = LNET_ANY_NID;
1343         else if (lpni->lpni_pref_nnids > 1)
1344                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1345         lpni->lpni_pref_nnids = 0;
1346         lnet_net_unlock(LNET_LOCK_EX);
1347
1348         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1349                 list_del_init(&ne->nl_list);
1350                 LIBCFS_FREE(ne, sizeof(*ne));
1351         }
1352 }
1353
1354 void
1355 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1356 {
1357         struct lnet_peer_ni *lpni;
1358
1359         *result = *nid;
1360         lpni = lnet_peer_ni_find_locked(nid);
1361         if (lpni) {
1362                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1363                 lnet_peer_ni_decref_locked(lpni);
1364         }
1365 }
1366
1367 bool
1368 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1369 __must_hold(&lp->lp_lock)
1370 {
1371         if (lnet_peer_discovery_disabled)
1372                 return true;
1373
1374         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1375             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1376                 return true;
1377         }
1378
1379         return false;
1380 }
1381
1382 /*
1383  * Peer Discovery
1384  */
1385 bool
1386 lnet_is_discovery_disabled(struct lnet_peer *lp)
1387 {
1388         bool rc = false;
1389
1390         spin_lock(&lp->lp_lock);
1391         rc = lnet_is_discovery_disabled_locked(lp);
1392         spin_unlock(&lp->lp_lock);
1393
1394         return rc;
1395 }
1396
1397 int
1398 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1399 {
1400         lnet_nid_t pnid = 0;
1401         bool mr;
1402         int i, rc;
1403
1404         if (!nids || num_nids < 1)
1405                 return -EINVAL;
1406
1407         rc = LNetNIInit(LNET_PID_ANY);
1408         if (rc < 0)
1409                 return rc;
1410
1411         mutex_lock(&the_lnet.ln_api_mutex);
1412
1413         mr = lnet_peer_discovery_disabled == 0;
1414
1415         rc = 0;
1416         for (i = 0; i < num_nids; i++) {
1417                 if (nids[i] == LNET_NID_LO_0)
1418                         continue;
1419
1420                 if (!pnid) {
1421                         pnid = nids[i];
1422                         rc = lnet_add_peer_ni(pnid, LNET_NID_ANY, mr, true);
1423                 } else if (lnet_peer_discovery_disabled) {
1424                         rc = lnet_add_peer_ni(nids[i], LNET_NID_ANY, mr, true);
1425                 } else {
1426                         rc = lnet_add_peer_ni(pnid, nids[i], mr, true);
1427                 }
1428
1429                 if (rc && rc != -EEXIST)
1430                         goto unlock;
1431         }
1432
1433 unlock:
1434         mutex_unlock(&the_lnet.ln_api_mutex);
1435
1436         LNetNIFini();
1437
1438         return rc == -EEXIST ? 0 : rc;
1439 }
1440 EXPORT_SYMBOL(LNetAddPeer);
1441
1442 /* FIXME support large-addr nid */
1443 lnet_nid_t
1444 LNetPrimaryNID(lnet_nid_t nid)
1445 {
1446         struct lnet_peer *lp;
1447         struct lnet_peer_ni *lpni;
1448         lnet_nid_t primary_nid = nid;
1449         int rc = 0;
1450         int cpt;
1451
1452         if (nid == LNET_NID_LO_0)
1453                 return LNET_NID_LO_0;
1454
1455         cpt = lnet_net_lock_current();
1456         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1457         if (IS_ERR(lpni)) {
1458                 rc = PTR_ERR(lpni);
1459                 goto out_unlock;
1460         }
1461         lp = lpni->lpni_peer_net->lpn_peer;
1462
1463         /* If discovery is disabled locally then we needn't bother running
1464          * discovery here because discovery will not modify whatever
1465          * primary NID is currently set for this peer. If the specified peer is
1466          * down then this discovery can introduce long delays into the mount
1467          * process, so skip it if it isn't necessary.
1468          */
1469         while (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1470                 spin_lock(&lp->lp_lock);
1471                 /* force a full discovery cycle */
1472                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1473                 spin_unlock(&lp->lp_lock);
1474
1475                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1476                 if (rc)
1477                         goto out_decref;
1478                 /* The lpni (or lp) for this NID may have changed and our ref is
1479                  * the only thing keeping the old one around. Release the ref
1480                  * and lookup the lpni again
1481                  */
1482                 lnet_peer_ni_decref_locked(lpni);
1483                 lpni = lnet_find_peer_ni_locked(nid);
1484                 if (!lpni) {
1485                         rc = -ENOENT;
1486                         goto out_unlock;
1487                 }
1488                 lp = lpni->lpni_peer_net->lpn_peer;
1489
1490                 /* If we find that the peer has discovery disabled then we will
1491                  * not modify whatever primary NID is currently set for this
1492                  * peer. Thus, we can break out of this loop even if the peer
1493                  * is not fully up to date.
1494                  */
1495                 if (lnet_is_discovery_disabled(lp))
1496                         break;
1497         }
1498         primary_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
1499 out_decref:
1500         lnet_peer_ni_decref_locked(lpni);
1501 out_unlock:
1502         lnet_net_unlock(cpt);
1503
1504         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1505                libcfs_nid2str(primary_nid), rc);
1506         return primary_nid;
1507 }
1508 EXPORT_SYMBOL(LNetPrimaryNID);
1509
1510 struct lnet_peer_net *
1511 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1512 {
1513         struct lnet_peer_net *peer_net;
1514         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1515                 if (peer_net->lpn_net_id == net_id)
1516                         return peer_net;
1517         }
1518         return NULL;
1519 }
1520
1521 /*
1522  * Attach a peer_ni to a peer_net and peer. This function assumes
1523  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1524  * may be attached to a different peer, in which case it will be
1525  * properly detached first. The whole operation is done atomically.
1526  *
1527  * This function consumes the reference on lpni and Always returns 0.
1528  * This is the last function called from functions that do return an
1529  * int, so returning 0 here allows the compiler to do a tail call.
1530  */
1531 static int
1532 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1533                                 struct lnet_peer_net *lpn,
1534                                 struct lnet_peer_ni *lpni,
1535                                 unsigned flags)
1536 {
1537         struct lnet_peer_table *ptable;
1538         bool new_lpn = false;
1539         int rc;
1540
1541         /* Install the new peer_ni */
1542         lnet_net_lock(LNET_LOCK_EX);
1543         /* Add peer_ni to global peer table hash, if necessary. */
1544         if (list_empty(&lpni->lpni_hashlist)) {
1545                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1546
1547                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1548                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1549                 ptable->pt_version++;
1550                 lnet_peer_ni_addref_locked(lpni);
1551         }
1552
1553         /* Detach the peer_ni from an existing peer, if necessary. */
1554         if (lpni->lpni_peer_net) {
1555                 LASSERT(lpni->lpni_peer_net != lpn);
1556                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1557                 lnet_peer_detach_peer_ni_locked(lpni);
1558                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1559                 lpni->lpni_peer_net = NULL;
1560         }
1561
1562         /* Add peer_ni to peer_net */
1563         lpni->lpni_peer_net = lpn;
1564         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1565                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1566         else
1567                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1568         lnet_update_peer_net_healthv(lpni);
1569         lnet_peer_net_addref_locked(lpn);
1570
1571         /* Add peer_net to peer */
1572         if (!lpn->lpn_peer) {
1573                 new_lpn = true;
1574                 lpn->lpn_peer = lp;
1575                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1576                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1577                 else
1578                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1579                 lnet_peer_addref_locked(lp);
1580         }
1581
1582         /* Add peer to global peer list, if necessary */
1583         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1584         if (list_empty(&lp->lp_peer_list)) {
1585                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1586                 ptable->pt_peers++;
1587         }
1588
1589
1590         /* Update peer state */
1591         spin_lock(&lp->lp_lock);
1592         if (flags & LNET_PEER_CONFIGURED) {
1593                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1594                         lp->lp_state |= LNET_PEER_CONFIGURED;
1595         }
1596         if (flags & LNET_PEER_MULTI_RAIL) {
1597                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1598                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1599                         lnet_peer_clr_non_mr_pref_nids(lp);
1600                 }
1601         }
1602         spin_unlock(&lp->lp_lock);
1603
1604         lp->lp_nnis++;
1605
1606         /* apply UDSPs */
1607         if (new_lpn) {
1608                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1609                 if (rc)
1610                         CERROR("Failed to apply UDSPs on lpn %s\n",
1611                                libcfs_net2str(lpn->lpn_net_id));
1612         }
1613         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1614         if (rc)
1615                 CERROR("Failed to apply UDSPs on lpni %s\n",
1616                        libcfs_nidstr(&lpni->lpni_nid));
1617
1618         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1619                libcfs_nidstr(&lp->lp_primary_nid),
1620                libcfs_nidstr(&lpni->lpni_nid), flags);
1621         lnet_peer_ni_decref_locked(lpni);
1622         lnet_net_unlock(LNET_LOCK_EX);
1623
1624         return 0;
1625 }
1626
1627 /*
1628  * Create a new peer, with nid as its primary nid.
1629  *
1630  * Call with the lnet_api_mutex held.
1631  */
1632 static int
1633 lnet_peer_add(lnet_nid_t nid4, unsigned int flags)
1634 {
1635         struct lnet_nid nid;
1636         struct lnet_peer *lp;
1637         struct lnet_peer_net *lpn;
1638         struct lnet_peer_ni *lpni;
1639         int rc = 0;
1640
1641         LASSERT(nid4 != LNET_NID_ANY);
1642
1643         /*
1644          * No need for the lnet_net_lock here, because the
1645          * lnet_api_mutex is held.
1646          */
1647         lpni = lnet_find_peer_ni_locked(nid4);
1648         if (lpni) {
1649                 /* A peer with this NID already exists. */
1650                 lp = lpni->lpni_peer_net->lpn_peer;
1651                 lnet_peer_ni_decref_locked(lpni);
1652                 /*
1653                  * This is an error if the peer was configured and the
1654                  * primary NID differs or an attempt is made to change
1655                  * the Multi-Rail flag. Otherwise the assumption is
1656                  * that an existing peer is being modified.
1657                  */
1658                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1659                         if (lnet_nid_to_nid4(&lp->lp_primary_nid) != nid4)
1660                                 rc = -EEXIST;
1661                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1662                                 rc = -EPERM;
1663                         goto out;
1664                 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1665                         if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid4) {
1666                                 rc = -EEXIST;
1667                                 goto out;
1668                         }
1669                 }
1670                 /* Delete and recreate as a configured peer. */
1671                 rc = lnet_peer_del(lp);
1672                 if (rc)
1673                         goto out;
1674         }
1675
1676         /* Create peer, peer_net, and peer_ni. */
1677         rc = -ENOMEM;
1678         lnet_nid4_to_nid(nid4, &nid);
1679         lp = lnet_peer_alloc(&nid);
1680         if (!lp)
1681                 goto out;
1682         lpn = lnet_peer_net_alloc(LNET_NID_NET(&nid));
1683         if (!lpn)
1684                 goto out_free_lp;
1685         lpni = lnet_peer_ni_alloc(&nid);
1686         if (!lpni)
1687                 goto out_free_lpn;
1688
1689         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1690
1691 out_free_lpn:
1692         LIBCFS_FREE(lpn, sizeof(*lpn));
1693 out_free_lp:
1694         LIBCFS_FREE(lp, sizeof(*lp));
1695 out:
1696         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1697                libcfs_nid2str(nid4), flags, rc);
1698         return rc;
1699 }
1700
1701 /*
1702  * Add a NID to a peer. Call with ln_api_mutex held.
1703  *
1704  * Error codes:
1705  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1706  *  -EEXIST:   The NID was configured by DLC for a different peer.
1707  *  -ENOMEM:   Out of memory.
1708  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1709  *             non-multi-rail peer.
1710  */
1711 static int
1712 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid4, unsigned int flags)
1713 {
1714         struct lnet_peer_net *lpn;
1715         struct lnet_peer_ni *lpni;
1716         struct lnet_nid nid;
1717         int rc = 0;
1718
1719         LASSERT(lp);
1720         LASSERT(nid4 != LNET_NID_ANY);
1721
1722         lnet_nid4_to_nid(nid4, &nid);
1723
1724         /* A configured peer can only be updated through configuration. */
1725         if (!(flags & LNET_PEER_CONFIGURED)) {
1726                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1727                         rc = -EPERM;
1728                         goto out;
1729                 }
1730         }
1731
1732         /*
1733          * The MULTI_RAIL flag can be set but not cleared, because
1734          * that would leave the peer struct in an invalid state.
1735          */
1736         if (flags & LNET_PEER_MULTI_RAIL) {
1737                 spin_lock(&lp->lp_lock);
1738                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1739                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1740                         lnet_peer_clr_non_mr_pref_nids(lp);
1741                 }
1742                 spin_unlock(&lp->lp_lock);
1743         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1744                 rc = -EPERM;
1745                 goto out;
1746         }
1747
1748         lpni = lnet_find_peer_ni_locked(nid4);
1749         if (lpni) {
1750                 /*
1751                  * A peer_ni already exists. This is only a problem if
1752                  * it is not connected to this peer and was configured
1753                  * by DLC.
1754                  */
1755                 if (lpni->lpni_peer_net->lpn_peer == lp)
1756                         goto out_free_lpni;
1757                 if (lnet_peer_ni_is_configured(lpni)) {
1758                         rc = -EEXIST;
1759                         goto out_free_lpni;
1760                 }
1761                 /* If this is the primary NID, destroy the peer. */
1762                 if (lnet_peer_ni_is_primary(lpni)) {
1763                         struct lnet_peer *rtr_lp =
1764                                 lpni->lpni_peer_net->lpn_peer;
1765                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1766                         /*
1767                          * if we're trying to delete a router it means
1768                          * we're moving this peer NI to a new peer so must
1769                          * transfer router properties to the new peer
1770                          */
1771                         if (rtr_refcount > 0) {
1772                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1773                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1774                         }
1775                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1776                         lnet_peer_ni_decref_locked(lpni);
1777                         lpni = lnet_peer_ni_alloc(&nid);
1778                         if (!lpni) {
1779                                 rc = -ENOMEM;
1780                                 goto out_free_lpni;
1781                         }
1782                 }
1783         } else {
1784                 lpni = lnet_peer_ni_alloc(&nid);
1785                 if (!lpni) {
1786                         rc = -ENOMEM;
1787                         goto out_free_lpni;
1788                 }
1789         }
1790
1791         /*
1792          * Get the peer_net. Check that we're not adding a second
1793          * peer_ni on a peer_net of a non-multi-rail peer.
1794          */
1795         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid4));
1796         if (!lpn) {
1797                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid4));
1798                 if (!lpn) {
1799                         rc = -ENOMEM;
1800                         goto out_free_lpni;
1801                 }
1802         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1803                 rc = -ENOTUNIQ;
1804                 goto out_free_lpni;
1805         }
1806
1807         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1808
1809 out_free_lpni:
1810         lnet_peer_ni_decref_locked(lpni);
1811 out:
1812         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1813                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nid2str(nid4),
1814                flags, rc);
1815         return rc;
1816 }
1817
1818 /*
1819  * Update the primary NID of a peer, if possible.
1820  *
1821  * Call with the lnet_api_mutex held.
1822  */
1823 static int
1824 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid,
1825                           unsigned int flags)
1826 {
1827         struct lnet_nid old = lp->lp_primary_nid;
1828         int rc = 0;
1829
1830         if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid)
1831                 goto out;
1832
1833         lnet_nid4_to_nid(nid, &lp->lp_primary_nid);
1834
1835         rc = lnet_peer_add_nid(lp, nid, flags);
1836         if (rc) {
1837                 lp->lp_primary_nid = old;
1838                 goto out;
1839         }
1840 out:
1841         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1842                libcfs_nidstr(&old), libcfs_nid2str(nid), rc);
1843
1844         return rc;
1845 }
1846
1847 /*
1848  * lpni creation initiated due to traffic either sending or receiving.
1849  */
1850 static int
1851 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1852 {
1853         struct lnet_peer *lp;
1854         struct lnet_peer_net *lpn;
1855         struct lnet_peer_ni *lpni;
1856         unsigned flags = 0;
1857         int rc = 0;
1858
1859         if (LNET_NID_IS_ANY(nid)) {
1860                 rc = -EINVAL;
1861                 goto out;
1862         }
1863
1864         /* lnet_net_lock is not needed here because ln_api_lock is held */
1865         lpni = lnet_peer_ni_find_locked(nid);
1866         if (lpni) {
1867                 /*
1868                  * We must have raced with another thread. Since we
1869                  * know next to nothing about a peer_ni created by
1870                  * traffic, we just assume everything is ok and
1871                  * return.
1872                  */
1873                 lnet_peer_ni_decref_locked(lpni);
1874                 goto out;
1875         }
1876
1877         /* Create peer, peer_net, and peer_ni. */
1878         rc = -ENOMEM;
1879         lp = lnet_peer_alloc(nid);
1880         if (!lp)
1881                 goto out;
1882         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1883         if (!lpn)
1884                 goto out_free_lp;
1885         lpni = lnet_peer_ni_alloc(nid);
1886         if (!lpni)
1887                 goto out_free_lpn;
1888         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1889
1890         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1891
1892 out_free_lpn:
1893         LIBCFS_FREE(lpn, sizeof(*lpn));
1894 out_free_lp:
1895         LIBCFS_FREE(lp, sizeof(*lp));
1896 out:
1897         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1898         return rc;
1899 }
1900
1901 /*
1902  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1903  *
1904  * This API handles the following combinations:
1905  *   Create a peer with its primary NI if only the prim_nid is provided
1906  *   Add a NID to a peer identified by the prim_nid. The peer identified
1907  *   by the prim_nid must already exist.
1908  *   The peer being created may be non-MR.
1909  *
1910  * The caller must hold ln_api_mutex. This prevents the peer from
1911  * being created/modified/deleted by a different thread.
1912  */
1913 int
1914 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr, bool temp)
1915 {
1916         struct lnet_peer *lp = NULL;
1917         struct lnet_peer_ni *lpni;
1918         unsigned int flags = 0;
1919
1920         /* The prim_nid must always be specified */
1921         if (prim_nid == LNET_NID_ANY)
1922                 return -EINVAL;
1923
1924         if (!temp)
1925                 flags = LNET_PEER_CONFIGURED;
1926
1927         if (mr)
1928                 flags |= LNET_PEER_MULTI_RAIL;
1929
1930         /*
1931          * If nid isn't specified, we must create a new peer with
1932          * prim_nid as its primary nid.
1933          */
1934         if (nid == LNET_NID_ANY)
1935                 return lnet_peer_add(prim_nid, flags);
1936
1937         /* Look up the prim_nid, which must exist. */
1938         lpni = lnet_find_peer_ni_locked(prim_nid);
1939         if (!lpni)
1940                 return -ENOENT;
1941         lnet_peer_ni_decref_locked(lpni);
1942         lp = lpni->lpni_peer_net->lpn_peer;
1943
1944         /* Peer must have been configured. */
1945         if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1946                 CDEBUG(D_NET, "peer %s was not configured\n",
1947                        libcfs_nid2str(prim_nid));
1948                 return -ENOENT;
1949         }
1950
1951         /* Primary NID must match */
1952         if (lnet_nid_to_nid4(&lp->lp_primary_nid) != prim_nid) {
1953                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1954                        libcfs_nid2str(prim_nid),
1955                        libcfs_nidstr(&lp->lp_primary_nid));
1956                 return -ENODEV;
1957         }
1958
1959         /* Multi-Rail flag must match. */
1960         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1961                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1962                        libcfs_nid2str(prim_nid));
1963                 return -EPERM;
1964         }
1965
1966         return lnet_peer_add_nid(lp, nid, flags);
1967 }
1968
1969 /*
1970  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1971  *
1972  * This API handles the following combinations:
1973  *   Delete a NI from a peer if both prim_nid and nid are provided.
1974  *   Delete a peer if only prim_nid is provided.
1975  *   Delete a peer if its primary nid is provided.
1976  *
1977  * The caller must hold ln_api_mutex. This prevents the peer from
1978  * being modified/deleted by a different thread.
1979  */
1980 int
1981 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1982 {
1983         struct lnet_peer *lp;
1984         struct lnet_peer_ni *lpni;
1985         unsigned flags;
1986
1987         if (prim_nid == LNET_NID_ANY)
1988                 return -EINVAL;
1989
1990         lpni = lnet_find_peer_ni_locked(prim_nid);
1991         if (!lpni)
1992                 return -ENOENT;
1993         lnet_peer_ni_decref_locked(lpni);
1994         lp = lpni->lpni_peer_net->lpn_peer;
1995
1996         if (prim_nid != lnet_nid_to_nid4(&lp->lp_primary_nid)) {
1997                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1998                        libcfs_nid2str(prim_nid),
1999                        libcfs_nidstr(&lp->lp_primary_nid));
2000                 return -ENODEV;
2001         }
2002
2003         lnet_net_lock(LNET_LOCK_EX);
2004         if (lp->lp_rtr_refcount > 0) {
2005                 lnet_net_unlock(LNET_LOCK_EX);
2006                 CERROR("%s is a router. Can not be deleted\n",
2007                        libcfs_nid2str(prim_nid));
2008                 return -EBUSY;
2009         }
2010         lnet_net_unlock(LNET_LOCK_EX);
2011
2012         if (nid == LNET_NID_ANY || nid == lnet_nid_to_nid4(&lp->lp_primary_nid))
2013                 return lnet_peer_del(lp);
2014
2015         flags = LNET_PEER_CONFIGURED;
2016         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2017                 flags |= LNET_PEER_MULTI_RAIL;
2018
2019         return lnet_peer_del_nid(lp, nid, flags);
2020 }
2021
2022 void
2023 lnet_destroy_peer_ni_locked(struct kref *ref)
2024 {
2025         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2026                                                  lpni_kref);
2027         struct lnet_peer_table *ptable;
2028         struct lnet_peer_net *lpn;
2029
2030         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2031
2032         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2033         LASSERT(list_empty(&lpni->lpni_txq));
2034         LASSERT(lpni->lpni_txqnob == 0);
2035         LASSERT(list_empty(&lpni->lpni_peer_nis));
2036         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2037
2038         lpn = lpni->lpni_peer_net;
2039         lpni->lpni_peer_net = NULL;
2040         lpni->lpni_net = NULL;
2041
2042         if (!list_empty(&lpni->lpni_hashlist)) {
2043                 /* remove the peer ni from the zombie list */
2044                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2045                 spin_lock(&ptable->pt_zombie_lock);
2046                 list_del_init(&lpni->lpni_hashlist);
2047                 ptable->pt_zombies--;
2048                 spin_unlock(&ptable->pt_zombie_lock);
2049         }
2050
2051         if (lpni->lpni_pref_nnids > 1) {
2052                 struct lnet_nid_list *ne, *tmp;
2053
2054                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2055                                          nl_list) {
2056                         list_del_init(&ne->nl_list);
2057                         LIBCFS_FREE(ne, sizeof(*ne));
2058                 }
2059         }
2060         LIBCFS_FREE(lpni, sizeof(*lpni));
2061
2062         if (lpn)
2063                 lnet_peer_net_decref_locked(lpn);
2064 }
2065
2066 struct lnet_peer_ni *
2067 lnet_nid2peerni_ex(struct lnet_nid *nid, int cpt)
2068 {
2069         struct lnet_peer_ni *lpni = NULL;
2070         int rc;
2071
2072         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2073                 return ERR_PTR(-ESHUTDOWN);
2074
2075         /*
2076          * find if a peer_ni already exists.
2077          * If so then just return that.
2078          */
2079         lpni = lnet_peer_ni_find_locked(nid);
2080         if (lpni)
2081                 return lpni;
2082
2083         lnet_net_unlock(cpt);
2084
2085         rc = lnet_peer_ni_traffic_add(nid, NULL);
2086         if (rc) {
2087                 lpni = ERR_PTR(rc);
2088                 goto out_net_relock;
2089         }
2090
2091         lpni = lnet_peer_ni_find_locked(nid);
2092         LASSERT(lpni);
2093
2094 out_net_relock:
2095         lnet_net_lock(cpt);
2096
2097         return lpni;
2098 }
2099
2100 /*
2101  * Get a peer_ni for the given nid, create it if necessary. Takes a
2102  * hold on the peer_ni.
2103  */
2104 struct lnet_peer_ni *
2105 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2106                         struct lnet_nid *pref, int cpt)
2107 {
2108         struct lnet_peer_ni *lpni = NULL;
2109         int rc;
2110
2111         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2112                 return ERR_PTR(-ESHUTDOWN);
2113
2114         /*
2115          * find if a peer_ni already exists.
2116          * If so then just return that.
2117          */
2118         lpni = lnet_peer_ni_find_locked(nid);
2119         if (lpni)
2120                 return lpni;
2121
2122         /*
2123          * Slow path:
2124          * use the lnet_api_mutex to serialize the creation of the peer_ni
2125          * and the creation/deletion of the local ni/net. When a local ni is
2126          * created, if there exists a set of peer_nis on that network,
2127          * they need to be traversed and updated. When a local NI is
2128          * deleted, which could result in a network being deleted, then
2129          * all peer nis on that network need to be removed as well.
2130          *
2131          * Creation through traffic should also be serialized with
2132          * creation through DLC.
2133          */
2134         lnet_net_unlock(cpt);
2135         mutex_lock(&the_lnet.ln_api_mutex);
2136         /*
2137          * Shutdown is only set under the ln_api_lock, so a single
2138          * check here is sufficent.
2139          */
2140         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2141                 lpni = ERR_PTR(-ESHUTDOWN);
2142                 goto out_mutex_unlock;
2143         }
2144
2145         rc = lnet_peer_ni_traffic_add(nid, pref);
2146         if (rc) {
2147                 lpni = ERR_PTR(rc);
2148                 goto out_mutex_unlock;
2149         }
2150
2151         lpni = lnet_peer_ni_find_locked(nid);
2152         LASSERT(lpni);
2153
2154 out_mutex_unlock:
2155         mutex_unlock(&the_lnet.ln_api_mutex);
2156         lnet_net_lock(cpt);
2157
2158         /* Lock has been dropped, check again for shutdown. */
2159         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2160                 if (!IS_ERR(lpni))
2161                         lnet_peer_ni_decref_locked(lpni);
2162                 lpni = ERR_PTR(-ESHUTDOWN);
2163         }
2164
2165         return lpni;
2166 }
2167
2168 struct lnet_peer_ni *
2169 lnet_nid2peerni_locked(lnet_nid_t nid4, lnet_nid_t pref4, int cpt)
2170 {
2171         struct lnet_nid nid, pref;
2172
2173         lnet_nid4_to_nid(nid4, &nid);
2174         lnet_nid4_to_nid(pref4, &pref);
2175         if (pref4 == LNET_NID_ANY)
2176                 return lnet_peerni_by_nid_locked(&nid, NULL, cpt);
2177         else
2178                 return lnet_peerni_by_nid_locked(&nid, &pref, cpt);
2179 }
2180
2181 bool
2182 lnet_peer_gw_discovery(struct lnet_peer *lp)
2183 {
2184         bool rc = false;
2185
2186         spin_lock(&lp->lp_lock);
2187         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2188                 rc = true;
2189         spin_unlock(&lp->lp_lock);
2190
2191         return rc;
2192 }
2193
2194 bool
2195 lnet_peer_is_uptodate(struct lnet_peer *lp)
2196 {
2197         bool rc;
2198
2199         spin_lock(&lp->lp_lock);
2200         rc = lnet_peer_is_uptodate_locked(lp);
2201         spin_unlock(&lp->lp_lock);
2202         return rc;
2203 }
2204
2205 /*
2206  * Is a peer uptodate from the point of view of discovery?
2207  *
2208  * If it is currently being processed, obviously not.
2209  * A forced Ping or Push is also handled by the discovery thread.
2210  *
2211  * Otherwise look at whether the peer needs rediscovering.
2212  */
2213 bool
2214 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2215 __must_hold(&lp->lp_lock)
2216 {
2217         bool rc;
2218
2219         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2220                             LNET_PEER_FORCE_PING |
2221                             LNET_PEER_FORCE_PUSH)) {
2222                 rc = false;
2223         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2224                 rc = false;
2225         } else if (lnet_peer_needs_push(lp)) {
2226                 rc = false;
2227         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2228                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2229                         rc = true;
2230                 else
2231                         rc = false;
2232         } else {
2233                 rc = false;
2234         }
2235
2236         return rc;
2237 }
2238
2239 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2240 void
2241 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2242 {
2243         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2244          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2245          * when adding to the list and queuing the peer to ensure that we do not
2246          * strand any messages on the lp_dc_pendq. This scheme ensures the
2247          * message will be resent even if the peer is already being discovered.
2248          * Therefore we needn't check the return value of
2249          * lnet_peer_queue_for_discovery(lp).
2250          */
2251         lnet_net_lock(LNET_LOCK_EX);
2252         spin_lock(&lp->lp_lock);
2253         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2254         spin_unlock(&lp->lp_lock);
2255         lnet_peer_queue_for_discovery(lp);
2256         lnet_net_unlock(LNET_LOCK_EX);
2257 }
2258
2259 /*
2260  * Queue a peer for the attention of the discovery thread.  Call with
2261  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2262  * -EALREADY if the peer was already queued.
2263  */
2264 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2265 {
2266         int rc;
2267
2268         spin_lock(&lp->lp_lock);
2269         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2270                 lp->lp_state |= LNET_PEER_DISCOVERING;
2271         spin_unlock(&lp->lp_lock);
2272         if (list_empty(&lp->lp_dc_list)) {
2273                 lnet_peer_addref_locked(lp);
2274                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2275                 wake_up(&the_lnet.ln_dc_waitq);
2276                 rc = 0;
2277         } else {
2278                 rc = -EALREADY;
2279         }
2280
2281         CDEBUG(D_NET, "Queue peer %s: %d\n",
2282                libcfs_nidstr(&lp->lp_primary_nid), rc);
2283
2284         return rc;
2285 }
2286
2287 /*
2288  * Discovery of a peer is complete. Wake all waiters on the peer.
2289  * Call with lnet_net_lock/EX held.
2290  */
2291 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2292 {
2293         struct lnet_msg *msg, *tmp;
2294         int rc = 0;
2295         LIST_HEAD(pending_msgs);
2296
2297         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2298                libcfs_nidstr(&lp->lp_primary_nid));
2299
2300         list_del_init(&lp->lp_dc_list);
2301         spin_lock(&lp->lp_lock);
2302         if (dc_error) {
2303                 lp->lp_dc_error = dc_error;
2304                 lp->lp_state &= ~LNET_PEER_DISCOVERING;
2305                 lp->lp_state |= LNET_PEER_REDISCOVER;
2306         }
2307         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2308         spin_unlock(&lp->lp_lock);
2309         wake_up(&lp->lp_dc_waitq);
2310
2311         if (lp->lp_rtr_refcount > 0)
2312                 lnet_router_discovery_complete(lp);
2313
2314         lnet_net_unlock(LNET_LOCK_EX);
2315
2316         /* iterate through all pending messages and send them again */
2317         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2318                 list_del_init(&msg->msg_list);
2319                 if (dc_error) {
2320                         lnet_finalize(msg, dc_error);
2321                         continue;
2322                 }
2323
2324                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2325                        lnet_msgtyp2str(msg->msg_type),
2326                        libcfs_idstr(&msg->msg_target));
2327                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2328                                &msg->msg_rtr_nid_param);
2329                 if (rc < 0) {
2330                         CNETERR("Error sending %s to %s: %d\n",
2331                                lnet_msgtyp2str(msg->msg_type),
2332                                libcfs_idstr(&msg->msg_target), rc);
2333                         lnet_finalize(msg, rc);
2334                 }
2335         }
2336         lnet_net_lock(LNET_LOCK_EX);
2337         lnet_peer_decref_locked(lp);
2338 }
2339
2340 /*
2341  * Handle inbound push.
2342  * Like any event handler, called with lnet_res_lock/CPT held.
2343  */
2344 void lnet_peer_push_event(struct lnet_event *ev)
2345 {
2346         struct lnet_ping_buffer *pbuf;
2347         struct lnet_peer *lp;
2348
2349         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2350
2351         /* lnet_find_peer() adds a refcount */
2352         lp = lnet_find_peer(&ev->source.nid);
2353         if (!lp) {
2354                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2355                        libcfs_nidstr(&ev->initiator.nid),
2356                        libcfs_nidstr(&ev->source.nid));
2357                 pbuf->pb_needs_post = true;
2358                 return;
2359         }
2360
2361         /* Ensure peer state remains consistent while we modify it. */
2362         spin_lock(&lp->lp_lock);
2363
2364         /*
2365          * If some kind of error happened the contents of the message
2366          * cannot be used. Clear the NIDS_UPTODATE and set the
2367          * FORCE_PING flag to trigger a ping.
2368          */
2369         if (ev->status) {
2370                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2371                 lp->lp_state |= LNET_PEER_FORCE_PING;
2372                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2373                        ev->status,
2374                        libcfs_nidstr(&lp->lp_primary_nid),
2375                        libcfs_nidstr(&ev->source.nid));
2376                 goto out;
2377         }
2378
2379         /*
2380          * A push with invalid or corrupted info. Clear the UPTODATE
2381          * flag to trigger a ping.
2382          */
2383         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2384                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2385                 lp->lp_state |= LNET_PEER_FORCE_PING;
2386                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2387                        libcfs_nidstr(&lp->lp_primary_nid));
2388                 goto out;
2389         }
2390
2391         /*
2392          * Make sure we'll allocate the correct size ping buffer when
2393          * pinging the peer.
2394          */
2395         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2396                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2397
2398         /*
2399          * A non-Multi-Rail peer is not supposed to be capable of
2400          * sending a push.
2401          */
2402         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2403                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2404                        libcfs_nidstr(&lp->lp_primary_nid));
2405                 goto out;
2406         }
2407
2408         /*
2409          * The peer may have discovery disabled at its end. Set
2410          * NO_DISCOVERY as appropriate.
2411          */
2412         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2413                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2414                        libcfs_nidstr(&lp->lp_primary_nid));
2415                 /*
2416                  * Mark the peer for deletion if we already know about it
2417                  * and it's going from discovery set to no discovery set
2418                  */
2419                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2420                                       LNET_PEER_DISCOVERING)) &&
2421                      lp->lp_state & LNET_PEER_DISCOVERED) {
2422                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2423                                libcfs_nidstr(&lp->lp_primary_nid),
2424                                lp->lp_state);
2425                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2426                 }
2427                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2428         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2429                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2430                        libcfs_nidstr(&lp->lp_primary_nid));
2431                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2432         }
2433
2434         /*
2435          * Update the MULTI_RAIL flag based on the push. If the peer
2436          * was configured with DLC then the setting should match what
2437          * DLC put in.
2438          * NB: We verified above that the MR feature bit is set in pi_features
2439          */
2440         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2441                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2442                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2443         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2444                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2445                       libcfs_nidstr(&lp->lp_primary_nid));
2446         } else if (lnet_peer_discovery_disabled) {
2447                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2448                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2449         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2450                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2451                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2452         } else {
2453                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2454                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2455                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2456                 lnet_peer_clr_non_mr_pref_nids(lp);
2457         }
2458
2459         /*
2460          * Check for truncation of the Put message. Clear the
2461          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2462          * and tell discovery to allocate a bigger buffer.
2463          */
2464         if (ev->mlength < ev->rlength) {
2465                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2466                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2467                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2468                 lp->lp_state |= LNET_PEER_FORCE_PING;
2469                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2470                        libcfs_nidstr(&lp->lp_primary_nid),
2471                        pbuf->pb_info.pi_nnis);
2472                 goto out;
2473         }
2474
2475         /* always assume new data */
2476         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2477         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2478
2479         /*
2480          * If there is data present that hasn't been processed yet,
2481          * we'll replace it if the Put contained newer data and it
2482          * fits. We're racing with a Ping or earlier Push in this
2483          * case.
2484          */
2485         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2486                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2487                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2488                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2489                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2490                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2491                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2492                               libcfs_nidstr(&lp->lp_primary_nid),
2493                               LNET_PING_BUFFER_SEQNO(pbuf),
2494                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2495                 }
2496                 goto out;
2497         }
2498
2499         /*
2500          * Allocate a buffer to copy the data. On a failure we drop
2501          * the Push and set FORCE_PING to force the discovery
2502          * thread to fix the problem by pinging the peer.
2503          */
2504         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2505         if (!lp->lp_data) {
2506                 lp->lp_state |= LNET_PEER_FORCE_PING;
2507                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2508                        libcfs_nidstr(&lp->lp_primary_nid),
2509                        LNET_PING_BUFFER_SEQNO(pbuf));
2510                 goto out;
2511         }
2512
2513         /* Success */
2514         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2515                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2516         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2517         CDEBUG(D_NET, "Received Push %s %u\n",
2518                libcfs_nidstr(&lp->lp_primary_nid),
2519                LNET_PING_BUFFER_SEQNO(pbuf));
2520
2521 out:
2522         /* We've processed this buffer. It can be reposted */
2523         pbuf->pb_needs_post = true;
2524
2525         /*
2526          * Queue the peer for discovery if not done, force it on the request
2527          * queue and wake the discovery thread if the peer was already queued,
2528          * because its status changed.
2529          */
2530         spin_unlock(&lp->lp_lock);
2531         lnet_net_lock(LNET_LOCK_EX);
2532         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2533                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2534                 wake_up(&the_lnet.ln_dc_waitq);
2535         }
2536         /* Drop refcount from lookup */
2537         lnet_peer_decref_locked(lp);
2538         lnet_net_unlock(LNET_LOCK_EX);
2539 }
2540
2541 /*
2542  * Clear the discovery error state, unless we're already discovering
2543  * this peer, in which case the error is current.
2544  */
2545 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2546 {
2547         spin_lock(&lp->lp_lock);
2548         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2549                 lp->lp_dc_error = 0;
2550         spin_unlock(&lp->lp_lock);
2551 }
2552
2553 /*
2554  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2555  * dropped/retaken within this function. An lnet_peer_ni is passed in
2556  * because discovery could tear down an lnet_peer.
2557  */
2558 int
2559 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2560 {
2561         DEFINE_WAIT(wait);
2562         struct lnet_peer *lp;
2563         int rc = 0;
2564         int count = 0;
2565
2566 again:
2567         lnet_net_unlock(cpt);
2568         lnet_net_lock(LNET_LOCK_EX);
2569         lp = lpni->lpni_peer_net->lpn_peer;
2570         lnet_peer_clear_discovery_error(lp);
2571
2572         /*
2573          * We're willing to be interrupted. The lpni can become a
2574          * zombie if we race with DLC, so we must check for that.
2575          */
2576         for (;;) {
2577                 /* Keep lp alive when the lnet_net_lock is unlocked */
2578                 lnet_peer_addref_locked(lp);
2579                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2580                 if (signal_pending(current))
2581                         break;
2582                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2583                         break;
2584                 /*
2585                  * Don't repeat discovery if discovery is disabled. This is
2586                  * done to ensure we can use discovery as a standard ping as
2587                  * well for backwards compatibility with routers which do not
2588                  * have discovery or have discovery disabled
2589                  */
2590                 if (lnet_is_discovery_disabled(lp) && count > 0)
2591                         break;
2592                 if (lp->lp_dc_error)
2593                         break;
2594                 if (lnet_peer_is_uptodate(lp))
2595                         break;
2596                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2597                         break;
2598                 lnet_peer_queue_for_discovery(lp);
2599                 count++;
2600                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2601
2602                 /*
2603                  * If caller requested a non-blocking operation then
2604                  * return immediately. Once discovery is complete any
2605                  * pending messages that were stopped due to discovery
2606                  * will be transmitted.
2607                  */
2608                 if (!block)
2609                         break;
2610
2611                 lnet_net_unlock(LNET_LOCK_EX);
2612                 schedule();
2613                 finish_wait(&lp->lp_dc_waitq, &wait);
2614                 lnet_net_lock(LNET_LOCK_EX);
2615                 lnet_peer_decref_locked(lp);
2616                 /* Peer may have changed */
2617                 lp = lpni->lpni_peer_net->lpn_peer;
2618         }
2619         finish_wait(&lp->lp_dc_waitq, &wait);
2620
2621         lnet_net_unlock(LNET_LOCK_EX);
2622         lnet_net_lock(cpt);
2623         lnet_peer_decref_locked(lp);
2624         /*
2625          * The peer may have changed, so re-check and rediscover if that turns
2626          * out to have been the case. The reference count on lp ensured that
2627          * even if it was unlinked from lpni the memory could not be recycled.
2628          * Thus the check below is sufficient to determine whether the peer
2629          * changed. If the peer changed, then lp must not be dereferenced.
2630          */
2631         if (lp != lpni->lpni_peer_net->lpn_peer)
2632                 goto again;
2633
2634         if (signal_pending(current))
2635                 rc = -EINTR;
2636         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2637                 rc = -ESHUTDOWN;
2638         else if (lp->lp_dc_error)
2639                 rc = lp->lp_dc_error;
2640         else if (!block)
2641                 CDEBUG(D_NET, "non-blocking discovery\n");
2642         else if (!lnet_peer_is_uptodate(lp) &&
2643                  !(lnet_is_discovery_disabled(lp) ||
2644                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2645                 goto again;
2646
2647         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2648                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2649                libcfs_nidstr(&lpni->lpni_nid), rc,
2650                (!block) ? "pending discovery" : "discovery complete");
2651
2652         return rc;
2653 }
2654
2655 /* Handle an incoming ack for a push. */
2656 static void
2657 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2658 {
2659         struct lnet_ping_buffer *pbuf;
2660
2661         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2662         spin_lock(&lp->lp_lock);
2663         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2664         lp->lp_push_error = ev->status;
2665         if (ev->status)
2666                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2667         else
2668                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2669         spin_unlock(&lp->lp_lock);
2670
2671         CDEBUG(D_NET, "peer %s ev->status %d\n",
2672                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2673 }
2674
2675 /* Handle a Reply message. This is the reply to a Ping message. */
2676 static void
2677 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2678 {
2679         struct lnet_ping_buffer *pbuf;
2680         int rc;
2681
2682         spin_lock(&lp->lp_lock);
2683
2684         lp->lp_disc_src_nid = ev->target.nid;
2685         lp->lp_disc_dst_nid = ev->source.nid;
2686
2687         /*
2688          * If some kind of error happened the contents of message
2689          * cannot be used. Set PING_FAILED to trigger a retry.
2690          */
2691         if (ev->status) {
2692                 lp->lp_state |= LNET_PEER_PING_FAILED;
2693                 lp->lp_ping_error = ev->status;
2694                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2695                        ev->status,
2696                        libcfs_nidstr(&lp->lp_primary_nid),
2697                        libcfs_nidstr(&ev->source.nid));
2698                 goto out;
2699         }
2700
2701         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2702         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2703                 lnet_swap_pinginfo(pbuf);
2704
2705         /*
2706          * A reply with invalid or corrupted info. Set PING_FAILED to
2707          * trigger a retry.
2708          */
2709         rc = lnet_ping_info_validate(&pbuf->pb_info);
2710         if (rc) {
2711                 lp->lp_state |= LNET_PEER_PING_FAILED;
2712                 lp->lp_ping_error = 0;
2713                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2714                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2715                 goto out;
2716         }
2717
2718         /*
2719          * The peer may have discovery disabled at its end. Set
2720          * NO_DISCOVERY as appropriate.
2721          */
2722         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2723             lnet_peer_discovery_disabled) {
2724                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2725                        libcfs_nidstr(&lp->lp_primary_nid));
2726
2727                 /* Detect whether this peer has toggled discovery from on to
2728                  * off and whether we can delete and re-create the peer. Peers
2729                  * that were manually configured cannot be deleted by discovery.
2730                  * We need to delete this peer and re-create it if the peer was
2731                  * not configured manually, is currently considered DD capable,
2732                  * and either:
2733                  * 1. We've already discovered the peer (the peer has toggled
2734                  *    the discovery feature from on to off), or
2735                  * 2. The peer is considered MR, but it was not user configured
2736                  *    (this was a "temporary" peer created via the kernel APIs
2737                  *     that we're discovering for the first time)
2738                  */
2739                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2740                                       LNET_PEER_NO_DISCOVERY)) &&
2741                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2742                                      LNET_PEER_MULTI_RAIL))) {
2743                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2744                                libcfs_nidstr(&lp->lp_primary_nid),
2745                                lp->lp_state);
2746                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2747                 }
2748                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2749         } else {
2750                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2751                        libcfs_nidstr(&lp->lp_primary_nid));
2752                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2753         }
2754
2755         /*
2756          * Update the MULTI_RAIL flag based on the reply. If the peer
2757          * was configured with DLC then the setting should match what
2758          * DLC put in.
2759          */
2760         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2761                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2762                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2763                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2764                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2765                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2766                               libcfs_nidstr(&lp->lp_primary_nid));
2767                 } else if (lnet_peer_discovery_disabled) {
2768                         CDEBUG(D_NET,
2769                                "peer %s(%p) not MR: DD disabled locally\n",
2770                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2771                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2772                         CDEBUG(D_NET,
2773                                "peer %s(%p) not MR: DD disabled remotely\n",
2774                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2775                 } else {
2776                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2777                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2778                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2779                         lnet_peer_clr_non_mr_pref_nids(lp);
2780                 }
2781         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2782                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2783                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2784                               libcfs_nidstr(&lp->lp_primary_nid));
2785                 } else {
2786                         CERROR("Multi-Rail state vanished from %s\n",
2787                                libcfs_nidstr(&lp->lp_primary_nid));
2788                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2789                 }
2790         }
2791
2792         /*
2793          * Make sure we'll allocate the correct size ping buffer when
2794          * pinging the peer.
2795          */
2796         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2797                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2798
2799         /*
2800          * Check for truncation of the Reply. Clear PING_SENT and set
2801          * PING_FAILED to trigger a retry.
2802          */
2803         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2804                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2805                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2806                 lp->lp_state |= LNET_PEER_PING_FAILED;
2807                 lp->lp_ping_error = 0;
2808                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2809                        libcfs_nidstr(&lp->lp_primary_nid),
2810                        pbuf->pb_info.pi_nnis);
2811                 goto out;
2812         }
2813
2814         /*
2815          * Check the sequence numbers in the reply. These are only
2816          * available if the reply came from a Multi-Rail peer.
2817          */
2818         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2819             pbuf->pb_info.pi_nnis > 1 &&
2820             lnet_nid_to_nid4(&lp->lp_primary_nid) ==
2821             pbuf->pb_info.pi_ni[1].ns_nid) {
2822                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2823                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2824                                 libcfs_nidstr(&lp->lp_primary_nid),
2825                                 LNET_PING_BUFFER_SEQNO(pbuf),
2826                                 lp->lp_peer_seqno);
2827
2828                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2829         }
2830
2831         /* We're happy with the state of the data in the buffer. */
2832         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2833                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2834                lp->lp_state);
2835         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2836                 lnet_ping_buffer_decref(lp->lp_data);
2837         else
2838                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2839         lnet_ping_buffer_addref(pbuf);
2840         lp->lp_data = pbuf;
2841 out:
2842         lp->lp_state &= ~LNET_PEER_PING_SENT;
2843         spin_unlock(&lp->lp_lock);
2844
2845         lnet_net_lock(LNET_LOCK_EX);
2846         /*
2847          * If this peer is a gateway, call the routing callback to
2848          * handle the ping reply
2849          */
2850         if (lp->lp_rtr_refcount > 0)
2851                 lnet_router_discovery_ping_reply(lp);
2852         lnet_net_unlock(LNET_LOCK_EX);
2853 }
2854
2855 /*
2856  * Send event handling. Only matters for error cases, where we clean
2857  * up state on the peer and peer_ni that would otherwise be updated in
2858  * the REPLY event handler for a successful Ping, and the ACK event
2859  * handler for a successful Push.
2860  */
2861 static int
2862 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2863 {
2864         int rc = 0;
2865
2866         if (!ev->status)
2867                 goto out;
2868
2869         spin_lock(&lp->lp_lock);
2870         if (ev->msg_type == LNET_MSG_GET) {
2871                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2872                 lp->lp_state |= LNET_PEER_PING_FAILED;
2873                 lp->lp_ping_error = ev->status;
2874         } else { /* ev->msg_type == LNET_MSG_PUT */
2875                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2876                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2877                 lp->lp_push_error = ev->status;
2878         }
2879         spin_unlock(&lp->lp_lock);
2880         rc = LNET_REDISCOVER_PEER;
2881 out:
2882         CDEBUG(D_NET, "%s Send to %s: %d\n",
2883                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2884                 libcfs_nidstr(&ev->target.nid), rc);
2885         return rc;
2886 }
2887
2888 /*
2889  * Unlink event handling. This event is only seen if a call to
2890  * LNetMDUnlink() caused the event to be unlinked. If this call was
2891  * made after the event was set up in LNetGet() or LNetPut() then we
2892  * assume the Ping or Push timed out.
2893  */
2894 static void
2895 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2896 {
2897         spin_lock(&lp->lp_lock);
2898         /* We've passed through LNetGet() */
2899         if (lp->lp_state & LNET_PEER_PING_SENT) {
2900                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2901                 lp->lp_state |= LNET_PEER_PING_FAILED;
2902                 lp->lp_ping_error = -ETIMEDOUT;
2903                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2904                         libcfs_nidstr(&lp->lp_primary_nid));
2905         }
2906         /* We've passed through LNetPut() */
2907         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2908                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2909                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2910                 lp->lp_push_error = -ETIMEDOUT;
2911                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2912                         libcfs_nidstr(&lp->lp_primary_nid));
2913         }
2914         spin_unlock(&lp->lp_lock);
2915 }
2916
2917 /*
2918  * Event handler for the discovery EQ.
2919  *
2920  * Called with lnet_res_lock(cpt) held. The cpt is the
2921  * lnet_cpt_of_cookie() of the md handle cookie.
2922  */
2923 static void lnet_discovery_event_handler(struct lnet_event *event)
2924 {
2925         struct lnet_peer *lp = event->md_user_ptr;
2926         struct lnet_ping_buffer *pbuf;
2927         int rc;
2928
2929         /* discovery needs to take another look */
2930         rc = LNET_REDISCOVER_PEER;
2931
2932         CDEBUG(D_NET, "Received event: %d\n", event->type);
2933
2934         switch (event->type) {
2935         case LNET_EVENT_ACK:
2936                 lnet_discovery_event_ack(lp, event);
2937                 break;
2938         case LNET_EVENT_REPLY:
2939                 lnet_discovery_event_reply(lp, event);
2940                 break;
2941         case LNET_EVENT_SEND:
2942                 /* Only send failure triggers a retry. */
2943                 rc = lnet_discovery_event_send(lp, event);
2944                 break;
2945         case LNET_EVENT_UNLINK:
2946                 /* LNetMDUnlink() was called */
2947                 lnet_discovery_event_unlink(lp, event);
2948                 break;
2949         default:
2950                 /* Invalid events. */
2951                 LBUG();
2952         }
2953         lnet_net_lock(LNET_LOCK_EX);
2954         if (event->unlinked) {
2955                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2956                 lnet_ping_buffer_decref(pbuf);
2957                 lnet_peer_decref_locked(lp);
2958         }
2959
2960         /* put peer back at end of request queue, if discovery not already
2961          * done */
2962         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2963             lnet_peer_queue_for_discovery(lp)) {
2964                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2965                 wake_up(&the_lnet.ln_dc_waitq);
2966         }
2967         lnet_net_unlock(LNET_LOCK_EX);
2968 }
2969
2970 /*
2971  * Build a peer from incoming data.
2972  *
2973  * The NIDs in the incoming data are supposed to be structured as follows:
2974  *  - loopback
2975  *  - primary NID
2976  *  - other NIDs in same net
2977  *  - NIDs in second net
2978  *  - NIDs in third net
2979  *  - ...
2980  * This due to the way the list of NIDs in the data is created.
2981  *
2982  * Note that this function will mark the peer uptodate unless an
2983  * ENOMEM is encontered. All other errors are due to a conflict
2984  * between the DLC configuration and what discovery sees. We treat DLC
2985  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2986  * peer from becoming stuck in discovery.
2987  */
2988 static int lnet_peer_merge_data(struct lnet_peer *lp,
2989                                 struct lnet_ping_buffer *pbuf)
2990 {
2991         struct lnet_peer_net *lpn;
2992         struct lnet_peer_ni *lpni;
2993         lnet_nid_t *curnis = NULL;
2994         struct lnet_ni_status *addnis = NULL;
2995         lnet_nid_t *delnis = NULL;
2996         unsigned flags;
2997         int ncurnis;
2998         int naddnis;
2999         int ndelnis;
3000         int nnis = 0;
3001         int i;
3002         int j;
3003         int rc;
3004
3005         flags = LNET_PEER_DISCOVERED;
3006         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3007                 flags |= LNET_PEER_MULTI_RAIL;
3008
3009         /*
3010          * Cache the routing feature for the peer; whether it is enabled
3011          * for disabled as reported by the remote peer.
3012          */
3013         spin_lock(&lp->lp_lock);
3014         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3015                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3016         else
3017                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3018         spin_unlock(&lp->lp_lock);
3019
3020         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
3021         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3022         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3023         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3024         if (!curnis || !addnis || !delnis) {
3025                 rc = -ENOMEM;
3026                 goto out;
3027         }
3028         ncurnis = 0;
3029         naddnis = 0;
3030         ndelnis = 0;
3031
3032         /* Construct the list of NIDs present in peer. */
3033         lpni = NULL;
3034         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3035                 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
3036
3037         /*
3038          * Check for NIDs in pbuf not present in curnis[].
3039          * The loop starts at 1 to skip the loopback NID.
3040          */
3041         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
3042                 for (j = 0; j < ncurnis; j++)
3043                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
3044                                 break;
3045                 if (j == ncurnis)
3046                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
3047         }
3048         /*
3049          * Check for NIDs in curnis[] not present in pbuf.
3050          * The nested loop starts at 1 to skip the loopback NID.
3051          *
3052          * But never add the loopback NID to delnis[]: if it is
3053          * present in curnis[] then this peer is for this node.
3054          */
3055         for (i = 0; i < ncurnis; i++) {
3056                 if (curnis[i] == LNET_NID_LO_0)
3057                         continue;
3058                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
3059                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
3060                                 /*
3061                                  * update the information we cache for the
3062                                  * peer with the latest information we
3063                                  * received
3064                                  */
3065                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
3066                                 if (lpni) {
3067                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
3068                                         lnet_peer_ni_decref_locked(lpni);
3069                                 }
3070                                 break;
3071                         }
3072                 }
3073                 if (j == pbuf->pb_info.pi_nnis)
3074                         delnis[ndelnis++] = curnis[i];
3075         }
3076
3077         /*
3078          * If we get here and the discovery is disabled then we don't want
3079          * to add or delete any NIs. We just updated the ones we have some
3080          * information on, and call it a day
3081          */
3082         rc = 0;
3083         if (lnet_is_discovery_disabled(lp))
3084                 goto out;
3085
3086         for (i = 0; i < naddnis; i++) {
3087                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
3088                 if (rc) {
3089                         CERROR("Error adding NID %s to peer %s: %d\n",
3090                                libcfs_nid2str(addnis[i].ns_nid),
3091                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3092                         if (rc == -ENOMEM)
3093                                 goto out;
3094                 }
3095                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
3096                 if (lpni) {
3097                         lpni->lpni_ns_status = addnis[i].ns_status;
3098                         lnet_peer_ni_decref_locked(lpni);
3099                 }
3100         }
3101
3102         for (i = 0; i < ndelnis; i++) {
3103                 /*
3104                  * for routers it's okay to delete the primary_nid because
3105                  * the upper layers don't really rely on it. So if we're
3106                  * being told that the router changed its primary_nid
3107                  * then it's okay to delete it.
3108                  */
3109                 if (lp->lp_rtr_refcount > 0)
3110                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3111                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
3112                 if (rc) {
3113                         CERROR("Error deleting NID %s from peer %s: %d\n",
3114                                libcfs_nid2str(delnis[i]),
3115                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3116                         if (rc == -ENOMEM)
3117                                 goto out;
3118                 }
3119         }
3120
3121         /* The peer net for the primary NID should be the first entry in the
3122          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3123          * be the first entry in its peer net's lpn_peer_nis list.
3124          */
3125         lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
3126         if (!lpni) {
3127                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3128                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3129                 goto out;
3130         }
3131
3132         lnet_peer_ni_decref_locked(lpni);
3133
3134         lpn = lpni->lpni_peer_net;
3135         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3136                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3137
3138         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3139                 list_move(&lpni->lpni_peer_nis,
3140                           &lpni->lpni_peer_net->lpn_peer_nis);
3141
3142         /*
3143          * Errors other than -ENOMEM are due to peers having been
3144          * configured with DLC. Ignore these because DLC overrides
3145          * Discovery.
3146          */
3147         rc = 0;
3148 out:
3149         CFS_FREE_PTR_ARRAY(curnis, nnis);
3150         CFS_FREE_PTR_ARRAY(addnis, nnis);
3151         CFS_FREE_PTR_ARRAY(delnis, nnis);
3152         lnet_ping_buffer_decref(pbuf);
3153         CDEBUG(D_NET, "peer %s (%p): %d\n",
3154                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3155
3156         if (rc) {
3157                 spin_lock(&lp->lp_lock);
3158                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3159                 lp->lp_state |= LNET_PEER_FORCE_PING;
3160                 spin_unlock(&lp->lp_lock);
3161         }
3162         return rc;
3163 }
3164
3165 /*
3166  * The data in pbuf says lp is its primary peer, but the data was
3167  * received by a different peer. Try to update lp with the data.
3168  */
3169 static int
3170 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3171 {
3172         struct lnet_handle_md mdh;
3173
3174         /* Queue lp for discovery, and force it on the request queue. */
3175         lnet_net_lock(LNET_LOCK_EX);
3176         if (lnet_peer_queue_for_discovery(lp))
3177                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3178         lnet_net_unlock(LNET_LOCK_EX);
3179
3180         LNetInvalidateMDHandle(&mdh);
3181
3182         /*
3183          * Decide whether we can move the peer to the DATA_PRESENT state.
3184          *
3185          * We replace stale data for a multi-rail peer, repair PING_FAILED
3186          * status, and preempt FORCE_PING.
3187          *
3188          * If after that we have DATA_PRESENT, we merge it into this peer.
3189          */
3190         spin_lock(&lp->lp_lock);
3191         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3192                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3193                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3194                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3195                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3196                         lnet_ping_buffer_decref(pbuf);
3197                         pbuf = lp->lp_data;
3198                         lp->lp_data = NULL;
3199                 }
3200         }
3201         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3202                 lnet_ping_buffer_decref(lp->lp_data);
3203                 lp->lp_data = NULL;
3204                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3205         }
3206         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3207                 mdh = lp->lp_ping_mdh;
3208                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3209                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3210                 lp->lp_ping_error = 0;
3211         }
3212         if (lp->lp_state & LNET_PEER_FORCE_PING)
3213                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3214         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3215         spin_unlock(&lp->lp_lock);
3216
3217         if (!LNetMDHandleIsInvalid(mdh))
3218                 LNetMDUnlink(mdh);
3219
3220         if (pbuf)
3221                 return lnet_peer_merge_data(lp, pbuf);
3222
3223         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3224         return 0;
3225 }
3226
3227 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3228 {
3229         int i;
3230
3231         for (i = 0; i < pinfo->pi_nnis; i++) {
3232                 if (pinfo->pi_ni[i].ns_nid == nid)
3233                         return true;
3234         }
3235
3236         return false;
3237 }
3238
3239 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3240  * to the discovery queue a reference was taken that will prevent the peer from
3241  * actually being freed by this function. After this function exits the
3242  * discovery thread should call lnet_peer_discovery_complete() which will
3243  * drop that reference as well as wake any waiters that may also be holding a
3244  * ref on the peer
3245  */
3246 static int lnet_peer_deletion(struct lnet_peer *lp)
3247 __must_hold(&lp->lp_lock)
3248 {
3249         struct list_head rlist;
3250         struct lnet_route *route, *tmp;
3251         int sensitivity = lp->lp_health_sensitivity;
3252         int rc;
3253
3254         INIT_LIST_HEAD(&rlist);
3255
3256         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3257                           LNET_PEER_FORCE_PUSH);
3258         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3259                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3260
3261         /* no-op if lnet_peer_del() has already been called on this peer */
3262         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3263                 return 0;
3264
3265         spin_unlock(&lp->lp_lock);
3266
3267         mutex_lock(&the_lnet.ln_api_mutex);
3268         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3269             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3270                 mutex_unlock(&the_lnet.ln_api_mutex);
3271                 spin_lock(&lp->lp_lock);
3272                 return -ESHUTDOWN;
3273         }
3274
3275         lnet_net_lock(LNET_LOCK_EX);
3276         /* remove the peer from the discovery work
3277          * queue if it's on there in preparation
3278          * of deleting it.
3279          */
3280         if (!list_empty(&lp->lp_dc_list))
3281                 list_del_init(&lp->lp_dc_list);
3282         list_for_each_entry_safe(route, tmp,
3283                                  &lp->lp_routes,
3284                                  lr_gwlist)
3285                 lnet_move_route(route, NULL, &rlist);
3286         lnet_net_unlock(LNET_LOCK_EX);
3287
3288         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3289         rc = lnet_peer_del(lp);
3290         if (rc)
3291                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3292                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3293
3294         list_for_each_entry_safe(route, tmp,
3295                                  &rlist, lr_list) {
3296                 /* re-add these routes */
3297                 lnet_add_route(route->lr_net,
3298                                route->lr_hops,
3299                                &route->lr_nid,
3300                                route->lr_priority,
3301                                sensitivity);
3302                 LIBCFS_FREE(route, sizeof(*route));
3303         }
3304
3305         mutex_unlock(&the_lnet.ln_api_mutex);
3306
3307         spin_lock(&lp->lp_lock);
3308
3309         return 0;
3310 }
3311
3312 /*
3313  * Update a peer using the data received.
3314  */
3315 static int lnet_peer_data_present(struct lnet_peer *lp)
3316 __must_hold(&lp->lp_lock)
3317 {
3318         struct lnet_ping_buffer *pbuf;
3319         struct lnet_peer_ni *lpni;
3320         lnet_nid_t nid = LNET_NID_ANY;
3321         unsigned flags;
3322         int rc = 0;
3323
3324         pbuf = lp->lp_data;
3325         lp->lp_data = NULL;
3326         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3327         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3328         spin_unlock(&lp->lp_lock);
3329
3330         /*
3331          * Modifications of peer structures are done while holding the
3332          * ln_api_mutex. A global lock is required because we may be
3333          * modifying multiple peer structures, and a mutex greatly
3334          * simplifies memory management.
3335          *
3336          * The actual changes to the data structures must also protect
3337          * against concurrent lookups, for which the lnet_net_lock in
3338          * LNET_LOCK_EX mode is used.
3339          */
3340         mutex_lock(&the_lnet.ln_api_mutex);
3341         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3342                 rc = -ESHUTDOWN;
3343                 goto out;
3344         }
3345
3346         /*
3347          * If this peer is not on the peer list then it is being torn
3348          * down, and our reference count may be all that is keeping it
3349          * alive. Don't do any work on it.
3350          */
3351         if (list_empty(&lp->lp_peer_list))
3352                 goto out;
3353
3354         flags = LNET_PEER_DISCOVERED;
3355         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3356                 flags |= LNET_PEER_MULTI_RAIL;
3357
3358         /*
3359          * Check whether the primary NID in the message matches the
3360          * primary NID of the peer. If it does, update the peer, if
3361          * it it does not, check whether there is already a peer with
3362          * that primary NID. If no such peer exists, try to update
3363          * the primary NID of the current peer (allowed if it was
3364          * created due to message traffic) and complete the update.
3365          * If the peer did exist, hand off the data to it.
3366          *
3367          * The peer for the loopback interface is a special case: this
3368          * is the peer for the local node, and we want to set its
3369          * primary NID to the correct value here. Moreover, this peer
3370          * can show up with only the loopback NID in the ping buffer.
3371          */
3372         if (pbuf->pb_info.pi_nnis <= 1) {
3373                 lnet_ping_buffer_decref(pbuf);
3374                 goto out;
3375         }
3376         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3377         if (nid_is_lo0(&lp->lp_primary_nid)) {
3378                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3379                 if (!rc)
3380                         rc = lnet_peer_merge_data(lp, pbuf);
3381         /*
3382          * if the primary nid of the peer is present in the ping info returned
3383          * from the peer, but it's not the local primary peer we have
3384          * cached and discovery is disabled, then we don't want to update
3385          * our local peer info, by adding or removing NIDs, we just want
3386          * to update the status of the nids that we currently have
3387          * recorded in that peer.
3388          */
3389         } else if (lnet_nid_to_nid4(&lp->lp_primary_nid) == nid ||
3390                    (lnet_is_nid_in_ping_info(lnet_nid_to_nid4(&lp->lp_primary_nid),
3391                                              &pbuf->pb_info) &&
3392                     lnet_is_discovery_disabled(lp))) {
3393                 rc = lnet_peer_merge_data(lp, pbuf);
3394         } else {
3395                 lpni = lnet_find_peer_ni_locked(nid);
3396                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3397                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3398                         if (rc) {
3399                                 CERROR("Primary NID error %s versus %s: %d\n",
3400                                        libcfs_nidstr(&lp->lp_primary_nid),
3401                                        libcfs_nid2str(nid), rc);
3402                         } else {
3403                                 rc = lnet_peer_merge_data(lp, pbuf);
3404                         }
3405                         if (lpni)
3406                                 lnet_peer_ni_decref_locked(lpni);
3407                 } else {
3408                         struct lnet_peer *new_lp;
3409                         new_lp = lpni->lpni_peer_net->lpn_peer;
3410                         /*
3411                          * if lp has discovery/MR enabled that means new_lp
3412                          * should have discovery/MR enabled as well, since
3413                          * it's the same peer, which we're about to merge
3414                          */
3415                         spin_lock(&lp->lp_lock);
3416                         spin_lock(&new_lp->lp_lock);
3417                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3418                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3419                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3420                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3421                         /* If we're processing a ping reply then we may be
3422                          * about to send a push to the peer that we ping'd.
3423                          * Since the ping reply that we're processing was
3424                          * received by lp, we need to set the discovery source
3425                          * NID for new_lp to the NID stored in lp.
3426                          */
3427                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3428                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3429                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3430                         }
3431                         spin_unlock(&new_lp->lp_lock);
3432                         spin_unlock(&lp->lp_lock);
3433
3434                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3435                         lnet_consolidate_routes_locked(lp, new_lp);
3436                         lnet_peer_ni_decref_locked(lpni);
3437                 }
3438         }
3439 out:
3440         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3441                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3442                lp->lp_state);
3443         mutex_unlock(&the_lnet.ln_api_mutex);
3444
3445         spin_lock(&lp->lp_lock);
3446         /* Tell discovery to re-check the peer immediately. */
3447         if (!rc)
3448                 rc = LNET_REDISCOVER_PEER;
3449         return rc;
3450 }
3451
3452 /*
3453  * A ping failed. Clear the PING_FAILED state and set the
3454  * FORCE_PING state, to ensure a retry even if discovery is
3455  * disabled. This avoids being left with incorrect state.
3456  */
3457 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3458 __must_hold(&lp->lp_lock)
3459 {
3460         struct lnet_handle_md mdh;
3461         int rc;
3462
3463         mdh = lp->lp_ping_mdh;
3464         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3465         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3466         lp->lp_state |= LNET_PEER_FORCE_PING;
3467         rc = lp->lp_ping_error;
3468         lp->lp_ping_error = 0;
3469         spin_unlock(&lp->lp_lock);
3470
3471         if (!LNetMDHandleIsInvalid(mdh))
3472                 LNetMDUnlink(mdh);
3473
3474         CDEBUG(D_NET, "peer %s:%d\n",
3475                libcfs_nidstr(&lp->lp_primary_nid), rc);
3476
3477         spin_lock(&lp->lp_lock);
3478         return rc ? rc : LNET_REDISCOVER_PEER;
3479 }
3480
3481 /* Active side of ping. */
3482 static int lnet_peer_send_ping(struct lnet_peer *lp)
3483 __must_hold(&lp->lp_lock)
3484 {
3485         int nnis;
3486         int rc;
3487         int cpt;
3488
3489         lp->lp_state |= LNET_PEER_PING_SENT;
3490         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3491         spin_unlock(&lp->lp_lock);
3492
3493         cpt = lnet_net_lock_current();
3494         /* Refcount for MD. */
3495         lnet_peer_addref_locked(lp);
3496         lnet_net_unlock(cpt);
3497
3498         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3499
3500         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
3501                             the_lnet.ln_dc_handler, false);
3502
3503         /*
3504          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3505          * refcount on the peer, otherwise LNetMDUnlink will be called
3506          * which will eventually do that.
3507          */
3508         if (rc > 0) {
3509                 lnet_net_lock(cpt);
3510                 lnet_peer_decref_locked(lp);
3511                 lnet_net_unlock(cpt);
3512                 rc = -rc; /* change the rc to negative value */
3513                 goto fail_error;
3514         } else if (rc < 0) {
3515                 goto fail_error;
3516         }
3517
3518         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3519
3520         spin_lock(&lp->lp_lock);
3521         return 0;
3522
3523 fail_error:
3524         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3525         /*
3526          * The errors that get us here are considered hard errors and
3527          * cause Discovery to terminate. So we clear PING_SENT, but do
3528          * not set either PING_FAILED or FORCE_PING. In fact we need
3529          * to clear PING_FAILED, because the unlink event handler will
3530          * have set it if we called LNetMDUnlink() above.
3531          */
3532         spin_lock(&lp->lp_lock);
3533         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3534         return rc;
3535 }
3536
3537 /*
3538  * This function exists because you cannot call LNetMDUnlink() from an
3539  * event handler.
3540  */
3541 static int lnet_peer_push_failed(struct lnet_peer *lp)
3542 __must_hold(&lp->lp_lock)
3543 {
3544         struct lnet_handle_md mdh;
3545         int rc;
3546
3547         mdh = lp->lp_push_mdh;
3548         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3549         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3550         rc = lp->lp_push_error;
3551         lp->lp_push_error = 0;
3552         spin_unlock(&lp->lp_lock);
3553
3554         if (!LNetMDHandleIsInvalid(mdh))
3555                 LNetMDUnlink(mdh);
3556
3557         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3558         spin_lock(&lp->lp_lock);
3559         return rc ? rc : LNET_REDISCOVER_PEER;
3560 }
3561
3562 /*
3563  * Mark the peer as discovered.
3564  */
3565 static int lnet_peer_discovered(struct lnet_peer *lp)
3566 __must_hold(&lp->lp_lock)
3567 {
3568         lp->lp_state |= LNET_PEER_DISCOVERED;
3569         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3570                           LNET_PEER_REDISCOVER);
3571
3572         lp->lp_dc_error = 0;
3573
3574         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3575
3576         return 0;
3577 }
3578
3579 /* Active side of push. */
3580 static int lnet_peer_send_push(struct lnet_peer *lp)
3581 __must_hold(&lp->lp_lock)
3582 {
3583         struct lnet_ping_buffer *pbuf;
3584         struct lnet_process_id id;
3585         struct lnet_md md;
3586         int cpt;
3587         int rc;
3588
3589         /* Don't push to a non-multi-rail peer. */
3590         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3591                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3592                 /* if peer's NIDs are uptodate then peer is discovered */
3593                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3594                         rc = lnet_peer_discovered(lp);
3595                         return rc;
3596                 }
3597
3598                 return 0;
3599         }
3600
3601         lp->lp_state |= LNET_PEER_PUSH_SENT;
3602         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3603         spin_unlock(&lp->lp_lock);
3604
3605         cpt = lnet_net_lock_current();
3606         pbuf = the_lnet.ln_ping_target;
3607         lnet_ping_buffer_addref(pbuf);
3608         lnet_net_unlock(cpt);
3609
3610         /* Push source MD */
3611         md.start     = &pbuf->pb_info;
3612         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3613         md.threshold = 2; /* Put/Ack */
3614         md.max_size  = 0;
3615         md.options   = LNET_MD_TRACK_RESPONSE;
3616         md.handler   = the_lnet.ln_dc_handler;
3617         md.user_ptr  = lp;
3618
3619         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3620         if (rc) {
3621                 lnet_ping_buffer_decref(pbuf);
3622                 CERROR("Can't bind push source MD: %d\n", rc);
3623                 goto fail_error;
3624         }
3625
3626         cpt = lnet_net_lock_current();
3627         /* Refcount for MD. */
3628         lnet_peer_addref_locked(lp);
3629         id.pid = LNET_PID_LUSTRE;
3630         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3631                 id.nid = lnet_nid_to_nid4(&lp->lp_disc_dst_nid);
3632         else
3633                 id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
3634         lnet_net_unlock(cpt);
3635
3636         rc = LNetPut(lnet_nid_to_nid4(&lp->lp_disc_src_nid), lp->lp_push_mdh,
3637                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3638                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3639
3640         /*
3641          * reset the discovery nid. There is no need to restrict sending
3642          * from that source, if we call lnet_push_update_to_peers(). It'll
3643          * get set to a specific NID, if we initiate discovery from the
3644          * scratch
3645          */
3646         lp->lp_disc_src_nid = LNET_ANY_NID;
3647         lp->lp_disc_dst_nid = LNET_ANY_NID;
3648
3649         if (rc)
3650                 goto fail_unlink;
3651
3652         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3653
3654         spin_lock(&lp->lp_lock);
3655         return 0;
3656
3657 fail_unlink:
3658         LNetMDUnlink(lp->lp_push_mdh);
3659         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3660 fail_error:
3661         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3662                lp, rc);
3663         /*
3664          * The errors that get us here are considered hard errors and
3665          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3666          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3667          * because the unlink event handler will have set it if we
3668          * called LNetMDUnlink() above.
3669          */
3670         spin_lock(&lp->lp_lock);
3671         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3672         return rc;
3673 }
3674
3675 /*
3676  * Wait for work to be queued or some other change that must be
3677  * attended to. Returns non-zero if the discovery thread should shut
3678  * down.
3679  */
3680 static int lnet_peer_discovery_wait_for_work(void)
3681 {
3682         int cpt;
3683         int rc = 0;
3684
3685         DEFINE_WAIT(wait);
3686
3687         cpt = lnet_net_lock_current();
3688         for (;;) {
3689                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3690                                 TASK_INTERRUPTIBLE);
3691                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3692                         break;
3693                 if (lnet_push_target_resize_needed() ||
3694                     the_lnet.ln_push_target->pb_needs_post)
3695                         break;
3696                 if (!list_empty(&the_lnet.ln_dc_request))
3697                         break;
3698                 if (!list_empty(&the_lnet.ln_msg_resend))
3699                         break;
3700                 lnet_net_unlock(cpt);
3701
3702                 /*
3703                  * wakeup max every second to check if there are peers that
3704                  * have been stuck on the working queue for greater than
3705                  * the peer timeout.
3706                  */
3707                 schedule_timeout(cfs_time_seconds(1));
3708                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3709                 cpt = lnet_net_lock_current();
3710         }
3711         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3712
3713         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3714                 rc = -ESHUTDOWN;
3715
3716         lnet_net_unlock(cpt);
3717
3718         CDEBUG(D_NET, "woken: %d\n", rc);
3719
3720         return rc;
3721 }
3722
3723 /*
3724  * Messages that were pending on a destroyed peer will be put on a global
3725  * resend list. The message resend list will be checked by
3726  * the discovery thread when it wakes up, and will resend messages. These
3727  * messages can still be sendable in the case the lpni which was the initial
3728  * cause of the message re-queue was transfered to another peer.
3729  *
3730  * It is possible that LNet could be shutdown while we're iterating
3731  * through the list. lnet_shudown_lndnets() will attempt to access the
3732  * resend list, but will have to wait until the spinlock is released, by
3733  * which time there shouldn't be any more messages on the resend list.
3734  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3735  * for the messages so they can be released. The other case is that
3736  * lnet_shudown_lndnets() can finalize all the messages before this
3737  * function can visit the resend list, in which case this function will be
3738  * a no-op.
3739  */
3740 static void lnet_resend_msgs(void)
3741 {
3742         struct lnet_msg *msg, *tmp;
3743         LIST_HEAD(resend);
3744         int rc;
3745
3746         spin_lock(&the_lnet.ln_msg_resend_lock);
3747         list_splice(&the_lnet.ln_msg_resend, &resend);
3748         spin_unlock(&the_lnet.ln_msg_resend_lock);
3749
3750         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3751                 list_del_init(&msg->msg_list);
3752                 rc = lnet_send(&msg->msg_src_nid_param, msg,
3753                                &msg->msg_rtr_nid_param);
3754                 if (rc < 0) {
3755                         CNETERR("Error sending %s to %s: %d\n",
3756                                lnet_msgtyp2str(msg->msg_type),
3757                                libcfs_idstr(&msg->msg_target), rc);
3758                         lnet_finalize(msg, rc);
3759                 }
3760         }
3761 }
3762
3763 /* The discovery thread. */
3764 static int lnet_peer_discovery(void *arg)
3765 {
3766         struct lnet_peer *lp;
3767         int rc;
3768
3769         wait_for_completion(&the_lnet.ln_started);
3770
3771         CDEBUG(D_NET, "started\n");
3772
3773         for (;;) {
3774                 if (lnet_peer_discovery_wait_for_work())
3775                         break;
3776
3777                 if (lnet_push_target_resize_needed())
3778                         lnet_push_target_resize();
3779                 else if (the_lnet.ln_push_target->pb_needs_post)
3780                         lnet_push_target_post(the_lnet.ln_push_target,
3781                                               &the_lnet.ln_push_target_md);
3782
3783                 lnet_resend_msgs();
3784
3785                 lnet_net_lock(LNET_LOCK_EX);
3786                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3787                         lnet_net_unlock(LNET_LOCK_EX);
3788                         break;
3789                 }
3790
3791                 /*
3792                  * Process all incoming discovery work requests.  When
3793                  * discovery must wait on a peer to change state, it
3794                  * is added to the tail of the ln_dc_working queue. A
3795                  * timestamp keeps track of when the peer was added,
3796                  * so we can time out discovery requests that take too
3797                  * long.
3798                  */
3799                 while (!list_empty(&the_lnet.ln_dc_request)) {
3800                         lp = list_first_entry(&the_lnet.ln_dc_request,
3801                                               struct lnet_peer, lp_dc_list);
3802                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3803                         /*
3804                          * set the time the peer was put on the dc_working
3805                          * queue. It shouldn't remain on the queue
3806                          * forever, in case the GET message (for ping)
3807                          * doesn't get a REPLY or the PUT message (for
3808                          * push) doesn't get an ACK.
3809                          */
3810                         lp->lp_last_queued = ktime_get_real_seconds();
3811                         lnet_net_unlock(LNET_LOCK_EX);
3812
3813                         if (lnet_push_target_resize_needed())
3814                                 lnet_push_target_resize();
3815                         else if (the_lnet.ln_push_target->pb_needs_post)
3816                                 lnet_push_target_post(the_lnet.ln_push_target,
3817                                                       &the_lnet.ln_push_target_md);
3818
3819                         /*
3820                          * Select an action depending on the state of
3821                          * the peer and whether discovery is disabled.
3822                          * The check whether discovery is disabled is
3823                          * done after the code that handles processing
3824                          * for arrived data, cleanup for failures, and
3825                          * forcing a Ping or Push.
3826                          */
3827                         spin_lock(&lp->lp_lock);
3828                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3829                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3830                                 lp->lp_state);
3831                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3832                                             LNET_PEER_MARK_DELETED))
3833                                 rc = lnet_peer_deletion(lp);
3834                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3835                                 rc = lnet_peer_data_present(lp);
3836                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3837                                 rc = lnet_peer_ping_failed(lp);
3838                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3839                                 rc = lnet_peer_push_failed(lp);
3840                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3841                                 rc = lnet_peer_send_ping(lp);
3842                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3843                                 rc = lnet_peer_send_push(lp);
3844                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3845                                 rc = lnet_peer_send_ping(lp);
3846                         else if (lnet_peer_needs_push(lp))
3847                                 rc = lnet_peer_send_push(lp);
3848                         else
3849                                 rc = lnet_peer_discovered(lp);
3850                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3851                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
3852                                 lp->lp_state, rc);
3853
3854                         if (rc == LNET_REDISCOVER_PEER) {
3855                                 spin_unlock(&lp->lp_lock);
3856                                 lnet_net_lock(LNET_LOCK_EX);
3857                                 list_move(&lp->lp_dc_list,
3858                                           &the_lnet.ln_dc_request);
3859                         } else if (rc ||
3860                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
3861                                 spin_unlock(&lp->lp_lock);
3862                                 lnet_net_lock(LNET_LOCK_EX);
3863                                 lnet_peer_discovery_complete(lp, rc);
3864                         } else {
3865                                 spin_unlock(&lp->lp_lock);
3866                                 lnet_net_lock(LNET_LOCK_EX);
3867                         }
3868
3869                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3870                                 break;
3871
3872                 }
3873
3874                 lnet_net_unlock(LNET_LOCK_EX);
3875         }
3876
3877         CDEBUG(D_NET, "stopping\n");
3878         /*
3879          * Clean up before telling lnet_peer_discovery_stop() that
3880          * we're done. Use wake_up() below to somewhat reduce the
3881          * size of the thundering herd if there are multiple threads
3882          * waiting on discovery of a single peer.
3883          */
3884
3885         /* Queue cleanup 1: stop all pending pings and pushes. */
3886         lnet_net_lock(LNET_LOCK_EX);
3887         while (!list_empty(&the_lnet.ln_dc_working)) {
3888                 lp = list_first_entry(&the_lnet.ln_dc_working,
3889                                       struct lnet_peer, lp_dc_list);
3890                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3891                 lnet_net_unlock(LNET_LOCK_EX);
3892                 lnet_peer_cancel_discovery(lp);
3893                 lnet_net_lock(LNET_LOCK_EX);
3894         }
3895         lnet_net_unlock(LNET_LOCK_EX);
3896
3897         /* Queue cleanup 2: wait for the expired queue to clear. */
3898         while (!list_empty(&the_lnet.ln_dc_expired))
3899                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3900
3901         /* Queue cleanup 3: clear the request queue. */
3902         lnet_net_lock(LNET_LOCK_EX);
3903         while (!list_empty(&the_lnet.ln_dc_request)) {
3904                 lp = list_first_entry(&the_lnet.ln_dc_request,
3905                                       struct lnet_peer, lp_dc_list);
3906                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
3907         }
3908         lnet_net_unlock(LNET_LOCK_EX);
3909
3910         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3911         the_lnet.ln_dc_handler = NULL;
3912
3913         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3914         wake_up(&the_lnet.ln_dc_waitq);
3915
3916         CDEBUG(D_NET, "stopped\n");
3917
3918         return 0;
3919 }
3920
3921 /* ln_api_mutex is held on entry. */
3922 int lnet_peer_discovery_start(void)
3923 {
3924         struct task_struct *task;
3925         int rc = 0;
3926
3927         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3928                 return -EALREADY;
3929
3930         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3931         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3932         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3933         if (IS_ERR(task)) {
3934                 rc = PTR_ERR(task);
3935                 CERROR("Can't start peer discovery thread: %d\n", rc);
3936
3937                 the_lnet.ln_dc_handler = NULL;
3938
3939                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3940         }
3941
3942         CDEBUG(D_NET, "discovery start: %d\n", rc);
3943
3944         return rc;
3945 }
3946
3947 /* ln_api_mutex is held on entry. */
3948 void lnet_peer_discovery_stop(void)
3949 {
3950         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3951                 return;
3952
3953         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3954         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3955
3956         /* In the LNetNIInit() path we may be stopping discovery before it
3957          * entered its work loop
3958          */
3959         if (!completion_done(&the_lnet.ln_started))
3960                 complete(&the_lnet.ln_started);
3961         else
3962                 wake_up(&the_lnet.ln_dc_waitq);
3963
3964         mutex_unlock(&the_lnet.ln_api_mutex);
3965         wait_event(the_lnet.ln_dc_waitq,
3966                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3967         mutex_lock(&the_lnet.ln_api_mutex);
3968
3969         LASSERT(list_empty(&the_lnet.ln_dc_request));
3970         LASSERT(list_empty(&the_lnet.ln_dc_working));
3971         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3972
3973         CDEBUG(D_NET, "discovery stopped\n");
3974 }
3975
3976 /* Debugging */
3977
3978 void
3979 lnet_debug_peer(lnet_nid_t nid)
3980 {
3981         char                    *aliveness = "NA";
3982         struct lnet_peer_ni     *lp;
3983         int                     cpt;
3984
3985         cpt = lnet_cpt_of_nid(nid, NULL);
3986         lnet_net_lock(cpt);
3987
3988         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3989         if (IS_ERR(lp)) {
3990                 lnet_net_unlock(cpt);
3991                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3992                 return;
3993         }
3994
3995         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3996                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3997
3998         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3999                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4000                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4001                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4002                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4003
4004         lnet_peer_ni_decref_locked(lp);
4005
4006         lnet_net_unlock(cpt);
4007 }
4008
4009 /* Gathering information for userspace. */
4010
4011 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4012                           char aliveness[LNET_MAX_STR_LEN],
4013                           __u32 *cpt_iter, __u32 *refcount,
4014                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4015                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4016                           __u32 *peer_tx_qnob)
4017 {
4018         struct lnet_peer_table          *peer_table;
4019         struct lnet_peer_ni             *lp;
4020         int                             j;
4021         int                             lncpt;
4022         bool                            found = false;
4023
4024         /* get the number of CPTs */
4025         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4026
4027         /* if the cpt number to be examined is >= the number of cpts in
4028          * the system then indicate that there are no more cpts to examin
4029          */
4030         if (*cpt_iter >= lncpt)
4031                 return -ENOENT;
4032
4033         /* get the current table */
4034         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4035         /* if the ptable is NULL then there are no more cpts to examine */
4036         if (peer_table == NULL)
4037                 return -ENOENT;
4038
4039         lnet_net_lock(*cpt_iter);
4040
4041         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4042                 struct list_head *peers = &peer_table->pt_hash[j];
4043
4044                 list_for_each_entry(lp, peers, lpni_hashlist) {
4045                         if (!nid_is_nid4(&lp->lpni_nid))
4046                                 continue;
4047                         if (peer_index-- > 0)
4048                                 continue;
4049
4050                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4051                         if (lnet_isrouter(lp) ||
4052                                 lnet_peer_aliveness_enabled(lp))
4053                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4054                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4055
4056                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4057                         *refcount = kref_read(&lp->lpni_kref);
4058                         *ni_peer_tx_credits =
4059                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4060                         *peer_tx_credits = lp->lpni_txcredits;
4061                         *peer_rtr_credits = lp->lpni_rtrcredits;
4062                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4063                         *peer_tx_qnob = lp->lpni_txqnob;
4064
4065                         found = true;
4066                 }
4067
4068         }
4069         lnet_net_unlock(*cpt_iter);
4070
4071         *cpt_iter = lncpt;
4072
4073         return found ? 0 : -ENOENT;
4074 }
4075
4076 /* ln_api_mutex is held, which keeps the peer list stable */
4077 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4078 {
4079         struct lnet_ioctl_element_stats *lpni_stats;
4080         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4081         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4082         struct lnet_peer_ni_credit_info *lpni_info;
4083         struct lnet_peer_ni *lpni;
4084         struct lnet_peer *lp;
4085         lnet_nid_t nid;
4086         __u32 size;
4087         int rc;
4088
4089         lp = lnet_find_peer4(cfg->prcfg_prim_nid);
4090
4091         if (!lp) {
4092                 rc = -ENOENT;
4093                 goto out;
4094         }
4095
4096         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4097                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4098         size *= lp->lp_nnis;
4099         if (size > cfg->prcfg_size) {
4100                 cfg->prcfg_size = size;
4101                 rc = -E2BIG;
4102                 goto out_lp_decref;
4103         }
4104
4105         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4106         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4107         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4108         cfg->prcfg_count = lp->lp_nnis;
4109         cfg->prcfg_size = size;
4110         cfg->prcfg_state = lp->lp_state;
4111
4112         /* Allocate helper buffers. */
4113         rc = -ENOMEM;
4114         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4115         if (!lpni_info)
4116                 goto out_lp_decref;
4117         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4118         if (!lpni_stats)
4119                 goto out_free_info;
4120         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4121         if (!lpni_msg_stats)
4122                 goto out_free_stats;
4123         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4124         if (!lpni_hstats)
4125                 goto out_free_msg_stats;
4126
4127
4128         lpni = NULL;
4129         rc = -EFAULT;
4130         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4131                 if (!nid_is_nid4(&lpni->lpni_nid))
4132                         continue;
4133                 nid = lnet_nid_to_nid4(&lpni->lpni_nid);
4134                 if (copy_to_user(bulk, &nid, sizeof(nid)))
4135                         goto out_free_hstats;
4136                 bulk += sizeof(nid);
4137
4138                 memset(lpni_info, 0, sizeof(*lpni_info));
4139                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4140                 if (lnet_isrouter(lpni) ||
4141                         lnet_peer_aliveness_enabled(lpni))
4142                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4143                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4144
4145                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4146                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4147                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4148                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4149                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4150                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4151                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4152                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4153                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4154                         goto out_free_hstats;
4155                 bulk += sizeof(*lpni_info);
4156
4157                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4158                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4159                                                             LNET_STATS_TYPE_SEND);
4160                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4161                                                             LNET_STATS_TYPE_RECV);
4162                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4163                                                             LNET_STATS_TYPE_DROP);
4164                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4165                         goto out_free_hstats;
4166                 bulk += sizeof(*lpni_stats);
4167                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4168                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4169                         goto out_free_hstats;
4170                 bulk += sizeof(*lpni_msg_stats);
4171                 lpni_hstats->hlpni_network_timeout =
4172                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4173                 lpni_hstats->hlpni_remote_dropped =
4174                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4175                 lpni_hstats->hlpni_remote_timeout =
4176                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4177                 lpni_hstats->hlpni_remote_error =
4178                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4179                 lpni_hstats->hlpni_health_value =
4180                   atomic_read(&lpni->lpni_healthv);
4181                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4182                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4183                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4184                         goto out_free_hstats;
4185                 bulk += sizeof(*lpni_hstats);
4186         }
4187         rc = 0;
4188
4189 out_free_hstats:
4190         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4191 out_free_msg_stats:
4192         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4193 out_free_stats:
4194         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4195 out_free_info:
4196         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4197 out_lp_decref:
4198         lnet_peer_decref_locked(lp);
4199 out:
4200         return rc;
4201 }
4202
4203 /* must hold net_lock/0 */
4204 void
4205 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4206                                      struct list_head *recovery_queue,
4207                                      time64_t now)
4208 {
4209         /* the mt could've shutdown and cleaned up the queues */
4210         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4211                 return;
4212
4213         if (!list_empty(&lpni->lpni_recovery))
4214                 return;
4215
4216         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4217                 return;
4218
4219         if (!lpni->lpni_last_alive) {
4220                 CDEBUG(D_NET,
4221                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4222                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4223                        lpni->lpni_last_alive);
4224                 return;
4225         }
4226
4227         if (lnet_recovery_limit &&
4228             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4229                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4230                        libcfs_nidstr(&lpni->lpni_nid),
4231                        lpni->lpni_last_alive);
4232                 /* Reset the ping count so that if this peer NI is added back to
4233                  * the recovery queue we will send the first ping right away.
4234                  */
4235                 lpni->lpni_ping_count = 0;
4236                 return;
4237         }
4238
4239         /* This peer NI is going on the recovery queue, so take a ref on it */
4240         lnet_peer_ni_addref_locked(lpni);
4241
4242         lnet_peer_ni_set_next_ping(lpni, now);
4243
4244         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4245                libcfs_nidstr(&lpni->lpni_nid),
4246                lpni->lpni_ping_count,
4247                lpni->lpni_next_ping,
4248                lpni->lpni_last_alive,
4249                atomic_read(&lpni->lpni_healthv));
4250
4251         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4252 }
4253
4254 /* Call with the ln_api_mutex held */
4255 void
4256 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4257 {
4258         struct lnet_peer_table *ptable;
4259         struct lnet_peer *lp;
4260         struct lnet_peer_net *lpn;
4261         struct lnet_peer_ni *lpni;
4262         int lncpt;
4263         int cpt;
4264         time64_t now;
4265
4266         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4267                 return;
4268
4269         now = ktime_get_seconds();
4270
4271         if (!all) {
4272                 lnet_net_lock(LNET_LOCK_EX);
4273                 lpni = lnet_find_peer_ni_locked(nid);
4274                 if (!lpni) {
4275                         lnet_net_unlock(LNET_LOCK_EX);
4276                         return;
4277                 }
4278                 lnet_set_lpni_healthv_locked(lpni, value);
4279                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4280                                              &the_lnet.ln_mt_peerNIRecovq, now);
4281                 lnet_peer_ni_decref_locked(lpni);
4282                 lnet_net_unlock(LNET_LOCK_EX);
4283                 return;
4284         }
4285
4286         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4287
4288         /*
4289          * Walk all the peers and reset the health value for each one to the
4290          * specified value.
4291          */
4292         lnet_net_lock(LNET_LOCK_EX);
4293         for (cpt = 0; cpt < lncpt; cpt++) {
4294                 ptable = the_lnet.ln_peer_tables[cpt];
4295                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4296                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4297                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4298                                                     lpni_peer_nis) {
4299                                         lnet_set_lpni_healthv_locked(lpni,
4300                                                                      value);
4301                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4302                                              &the_lnet.ln_mt_peerNIRecovq, now);
4303                                 }
4304                         }
4305                 }
4306         }
4307         lnet_net_unlock(LNET_LOCK_EX);
4308 }
4309