Whamcloud - gitweb
a2b339b7309ed880d95249c3cde84fab0c06cc75
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
170         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
171         kref_init(&lpni->lpni_kref);
172         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
173
174         spin_lock_init(&lpni->lpni_lock);
175
176         if (lnet_peers_start_down())
177                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
178         else
179                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
180         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
181         lnet_nid4_to_nid(nid, &lpni->lpni_nid);
182         lpni->lpni_cpt = cpt;
183         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
184
185         net = lnet_get_net_locked(LNET_NIDNET(nid));
186         lpni->lpni_net = net;
187         if (net) {
188                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
189                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
190                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
191                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
192         } else {
193                 /*
194                  * This peer_ni is not on a local network, so we
195                  * cannot add the credits here. In case the net is
196                  * added later, add the peer_ni to the remote peer ni
197                  * list so it can be easily found and revisited.
198                  */
199                 /* FIXME: per-net implementation instead? */
200                 lnet_peer_ni_addref_locked(lpni);
201                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
202                               &the_lnet.ln_remote_peer_ni_list);
203         }
204
205         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
206
207         return lpni;
208 }
209
210 static struct lnet_peer_net *
211 lnet_peer_net_alloc(__u32 net_id)
212 {
213         struct lnet_peer_net *lpn;
214
215         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
216         if (!lpn)
217                 return NULL;
218
219         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
220         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
221         lpn->lpn_net_id = net_id;
222         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
223
224         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
225
226         return lpn;
227 }
228
229 void
230 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
231 {
232         struct lnet_peer *lp;
233
234         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
235
236         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
237         LASSERT(list_empty(&lpn->lpn_peer_nis));
238         LASSERT(list_empty(&lpn->lpn_peer_nets));
239         lp = lpn->lpn_peer;
240         lpn->lpn_peer = NULL;
241         LIBCFS_FREE(lpn, sizeof(*lpn));
242
243         lnet_peer_decref_locked(lp);
244 }
245
246 static struct lnet_peer *
247 lnet_peer_alloc(lnet_nid_t nid)
248 {
249         struct lnet_peer *lp;
250
251         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
252         if (!lp)
253                 return NULL;
254
255         INIT_LIST_HEAD(&lp->lp_rtrq);
256         INIT_LIST_HEAD(&lp->lp_routes);
257         INIT_LIST_HEAD(&lp->lp_peer_list);
258         INIT_LIST_HEAD(&lp->lp_peer_nets);
259         INIT_LIST_HEAD(&lp->lp_dc_list);
260         INIT_LIST_HEAD(&lp->lp_dc_pendq);
261         INIT_LIST_HEAD(&lp->lp_rtr_list);
262         init_waitqueue_head(&lp->lp_dc_waitq);
263         spin_lock_init(&lp->lp_lock);
264         lp->lp_primary_nid = nid;
265         lp->lp_disc_src_nid = LNET_NID_ANY;
266         lp->lp_disc_dst_nid = LNET_NID_ANY;
267         if (lnet_peers_start_down())
268                 lp->lp_alive = false;
269         else
270                 lp->lp_alive = true;
271
272         /*
273          * all peers created on a router should have health on
274          * if it's not already on.
275          */
276         if (the_lnet.ln_routing && !lnet_health_sensitivity)
277                 lp->lp_health_sensitivity = 1;
278
279         /*
280          * Turn off discovery for loopback peer. If you're creating a peer
281          * for the loopback interface then that was initiated when we
282          * attempted to send a message over the loopback. There is no need
283          * to ever use a different interface when sending messages to
284          * myself.
285          */
286         if (nid == LNET_NID_LO_0)
287                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
288         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
289
290         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
291
292         return lp;
293 }
294
295 void
296 lnet_destroy_peer_locked(struct lnet_peer *lp)
297 {
298         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
299
300         LASSERT(atomic_read(&lp->lp_refcount) == 0);
301         LASSERT(lp->lp_rtr_refcount == 0);
302         LASSERT(list_empty(&lp->lp_peer_nets));
303         LASSERT(list_empty(&lp->lp_peer_list));
304         LASSERT(list_empty(&lp->lp_dc_list));
305
306         if (lp->lp_data)
307                 lnet_ping_buffer_decref(lp->lp_data);
308
309         /*
310          * if there are messages still on the pending queue, then make
311          * sure to queue them on the ln_msg_resend list so they can be
312          * resent at a later point if the discovery thread is still
313          * running.
314          * If the discovery thread has stopped, then the wakeup will be a
315          * no-op, and it is expected the lnet_shutdown_lndnets() will
316          * eventually be called, which will traverse this list and
317          * finalize the messages on the list.
318          * We can not resend them now because we're holding the cpt lock.
319          * Releasing the lock can cause an inconsistent state
320          */
321         spin_lock(&the_lnet.ln_msg_resend_lock);
322         spin_lock(&lp->lp_lock);
323         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
324         spin_unlock(&lp->lp_lock);
325         spin_unlock(&the_lnet.ln_msg_resend_lock);
326         wake_up(&the_lnet.ln_dc_waitq);
327
328         LIBCFS_FREE(lp, sizeof(*lp));
329 }
330
331 /*
332  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
333  * that peer_net, detach the peer_net from the peer.
334  *
335  * Call with lnet_net_lock/EX held
336  */
337 static void
338 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
339 {
340         struct lnet_peer_table *ptable;
341         struct lnet_peer_net *lpn;
342         struct lnet_peer *lp;
343
344         /*
345          * Belts and suspenders: gracefully handle teardown of a
346          * partially connected peer_ni.
347          */
348         lpn = lpni->lpni_peer_net;
349
350         list_del_init(&lpni->lpni_peer_nis);
351         /*
352          * If there are no lpni's left, we detach lpn from
353          * lp_peer_nets, so it cannot be found anymore.
354          */
355         if (list_empty(&lpn->lpn_peer_nis))
356                 list_del_init(&lpn->lpn_peer_nets);
357
358         /* Update peer NID count. */
359         lp = lpn->lpn_peer;
360         lp->lp_nnis--;
361
362         /*
363          * If there are no more peer nets, make the peer unfindable
364          * via the peer_tables.
365          *
366          * Otherwise, if the peer is DISCOVERED, tell discovery to
367          * take another look at it. This is a no-op if discovery for
368          * this peer did the detaching.
369          */
370         if (list_empty(&lp->lp_peer_nets)) {
371                 list_del_init(&lp->lp_peer_list);
372                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
373                 ptable->pt_peers--;
374         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
375                 /* Discovery isn't running, nothing to do here. */
376         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
377                 lnet_peer_queue_for_discovery(lp);
378                 wake_up(&the_lnet.ln_dc_waitq);
379         }
380         CDEBUG(D_NET, "peer %s NID %s\n",
381                 libcfs_nid2str(lp->lp_primary_nid),
382                 libcfs_nidstr(&lpni->lpni_nid));
383 }
384
385 /* called with lnet_net_lock LNET_LOCK_EX held */
386 static int
387 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
388 {
389         struct lnet_peer_table *ptable = NULL;
390
391         /* don't remove a peer_ni if it's also a gateway */
392         if (lnet_isrouter(lpni) && !force) {
393                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
394                        libcfs_nidstr(&lpni->lpni_nid));
395                 return -EBUSY;
396         }
397
398         lnet_peer_remove_from_remote_list(lpni);
399
400         /* remove peer ni from the hash list. */
401         list_del_init(&lpni->lpni_hashlist);
402
403         /*
404          * indicate the peer is being deleted so the monitor thread can
405          * remove it from the recovery queue.
406          */
407         spin_lock(&lpni->lpni_lock);
408         lpni->lpni_state |= LNET_PEER_NI_DELETING;
409         spin_unlock(&lpni->lpni_lock);
410
411         /* decrement the ref count on the peer table */
412         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
413
414         /*
415          * The peer_ni can no longer be found with a lookup. But there
416          * can be current users, so keep track of it on the zombie
417          * list until the reference count has gone to zero.
418          *
419          * The last reference may be lost in a place where the
420          * lnet_net_lock locks only a single cpt, and that cpt may not
421          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
422          * has its own lock.
423          */
424         spin_lock(&ptable->pt_zombie_lock);
425         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
426         ptable->pt_zombies++;
427         spin_unlock(&ptable->pt_zombie_lock);
428
429         /* no need to keep this peer_ni on the hierarchy anymore */
430         lnet_peer_detach_peer_ni_locked(lpni);
431
432         /* remove hashlist reference on peer_ni */
433         lnet_peer_ni_decref_locked(lpni);
434
435         return 0;
436 }
437
438 void lnet_peer_uninit(void)
439 {
440         struct lnet_peer_ni *lpni, *tmp;
441
442         lnet_net_lock(LNET_LOCK_EX);
443
444         /* remove all peer_nis from the remote peer and the hash list */
445         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
446                                  lpni_on_remote_peer_ni_list)
447                 lnet_peer_ni_del_locked(lpni, false);
448
449         lnet_peer_tables_destroy();
450
451         lnet_net_unlock(LNET_LOCK_EX);
452 }
453
454 static int
455 lnet_peer_del_locked(struct lnet_peer *peer)
456 {
457         struct lnet_peer_ni *lpni = NULL, *lpni2;
458         int rc = 0, rc2 = 0;
459
460         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
461
462         spin_lock(&peer->lp_lock);
463         peer->lp_state |= LNET_PEER_MARK_DELETED;
464         spin_unlock(&peer->lp_lock);
465
466         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
467         while (lpni != NULL) {
468                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469                 rc = lnet_peer_ni_del_locked(lpni, false);
470                 if (rc != 0)
471                         rc2 = rc;
472                 lpni = lpni2;
473         }
474
475         return rc2;
476 }
477
478 /*
479  * Discovering this peer is taking too long. Cancel any Ping or Push
480  * that discovery is waiting on by unlinking the relevant MDs. The
481  * lnet_discovery_event_handler() will proceed from here and complete
482  * the cleanup.
483  */
484 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
485 {
486         struct lnet_handle_md ping_mdh;
487         struct lnet_handle_md push_mdh;
488
489         LNetInvalidateMDHandle(&ping_mdh);
490         LNetInvalidateMDHandle(&push_mdh);
491
492         spin_lock(&lp->lp_lock);
493         if (lp->lp_state & LNET_PEER_PING_SENT) {
494                 ping_mdh = lp->lp_ping_mdh;
495                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
496         }
497         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
498                 push_mdh = lp->lp_push_mdh;
499                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
500         }
501         spin_unlock(&lp->lp_lock);
502
503         if (!LNetMDHandleIsInvalid(ping_mdh))
504                 LNetMDUnlink(ping_mdh);
505         if (!LNetMDHandleIsInvalid(push_mdh))
506                 LNetMDUnlink(push_mdh);
507 }
508
509 static int
510 lnet_peer_del(struct lnet_peer *peer)
511 {
512         lnet_peer_cancel_discovery(peer);
513         lnet_net_lock(LNET_LOCK_EX);
514         lnet_peer_del_locked(peer);
515         lnet_net_unlock(LNET_LOCK_EX);
516
517         return 0;
518 }
519
520 /*
521  * Delete a NID from a peer. Call with ln_api_mutex held.
522  *
523  * Error codes:
524  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
525  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
526  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
527  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
528  */
529 static int
530 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
531 {
532         struct lnet_peer_ni *lpni;
533         lnet_nid_t primary_nid = lp->lp_primary_nid;
534         int rc = 0;
535         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
536
537         if (!(flags & LNET_PEER_CONFIGURED)) {
538                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
539                         rc = -EPERM;
540                         goto out;
541                 }
542         }
543
544         /* If we're asked to lock down the primary NID we shouldn't be
545          * deleting it
546          */
547         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
548             primary_nid == nid) {
549                 rc = -EPERM;
550                 goto out;
551         }
552
553         lpni = lnet_find_peer_ni_locked(nid);
554         if (!lpni) {
555                 rc = -ENOENT;
556                 goto out;
557         }
558         lnet_peer_ni_decref_locked(lpni);
559         if (lp != lpni->lpni_peer_net->lpn_peer) {
560                 rc = -ECHILD;
561                 goto out;
562         }
563
564         /*
565          * This function only allows deletion of the primary NID if it
566          * is the only NID.
567          */
568         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
569                 rc = -EBUSY;
570                 goto out;
571         }
572
573         lnet_net_lock(LNET_LOCK_EX);
574
575         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
576                 struct lnet_peer_ni *lpni2;
577                 /* assign the next peer_ni to be the primary */
578                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
579                 LASSERT(lpni2);
580                 lp->lp_primary_nid = lnet_nid_to_nid4(&lpni2->lpni_nid);
581         }
582         rc = lnet_peer_ni_del_locked(lpni, force);
583
584         lnet_net_unlock(LNET_LOCK_EX);
585
586 out:
587         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
588                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
589
590         return rc;
591 }
592
593 static void
594 lnet_peer_table_cleanup_locked(struct lnet_net *net,
595                                struct lnet_peer_table *ptable)
596 {
597         int                      i;
598         struct lnet_peer_ni     *next;
599         struct lnet_peer_ni     *lpni;
600         struct lnet_peer        *peer;
601
602         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
603                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
604                                          lpni_hashlist) {
605                         if (net != NULL && net != lpni->lpni_net)
606                                 continue;
607
608                         peer = lpni->lpni_peer_net->lpn_peer;
609                         if (peer->lp_primary_nid !=
610                             lnet_nid_to_nid4(&lpni->lpni_nid)) {
611                                 lnet_peer_ni_del_locked(lpni, false);
612                                 continue;
613                         }
614                         /*
615                          * Removing the primary NID implies removing
616                          * the entire peer. Advance next beyond any
617                          * peer_ni that belongs to the same peer.
618                          */
619                         list_for_each_entry_from(next, &ptable->pt_hash[i],
620                                                  lpni_hashlist) {
621                                 if (next->lpni_peer_net->lpn_peer != peer)
622                                         break;
623                         }
624                         lnet_peer_del_locked(peer);
625                 }
626         }
627 }
628
629 static void
630 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
631 {
632         wait_var_event_warning(&ptable->pt_zombies,
633                                ptable->pt_zombies == 0,
634                                "Waiting for %d zombies on peer table\n",
635                                ptable->pt_zombies);
636 }
637
638 static void
639 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
640                                 struct lnet_peer_table *ptable)
641 {
642         struct lnet_peer_ni     *lp;
643         struct lnet_peer_ni     *tmp;
644         lnet_nid_t              gw_nid;
645         int                     i;
646
647         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
648                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
649                                          lpni_hashlist) {
650                         if (net != lp->lpni_net)
651                                 continue;
652
653                         if (!lnet_isrouter(lp))
654                                 continue;
655
656                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
657
658                         lnet_net_unlock(LNET_LOCK_EX);
659                         lnet_del_route(LNET_NET_ANY, gw_nid);
660                         lnet_net_lock(LNET_LOCK_EX);
661                 }
662         }
663 }
664
665 void
666 lnet_peer_tables_cleanup(struct lnet_net *net)
667 {
668         int i;
669         struct lnet_peer_table *ptable;
670
671         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
672         /* If just deleting the peers for a NI, get rid of any routes these
673          * peers are gateways for. */
674         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
675                 lnet_net_lock(LNET_LOCK_EX);
676                 lnet_peer_table_del_rtrs_locked(net, ptable);
677                 lnet_net_unlock(LNET_LOCK_EX);
678         }
679
680         /* Start the cleanup process */
681         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
682                 lnet_net_lock(LNET_LOCK_EX);
683                 lnet_peer_table_cleanup_locked(net, ptable);
684                 lnet_net_unlock(LNET_LOCK_EX);
685         }
686
687         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
688                 lnet_peer_ni_finalize_wait(ptable);
689 }
690
691 static struct lnet_peer_ni *
692 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
693 {
694         struct list_head        *peers;
695         struct lnet_peer_ni     *lp;
696
697         if (the_lnet.ln_state != LNET_STATE_RUNNING)
698                 return NULL;
699
700         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
701         list_for_each_entry(lp, peers, lpni_hashlist) {
702                 if (nid_same(&lp->lpni_nid, nid)) {
703                         lnet_peer_ni_addref_locked(lp);
704                         return lp;
705                 }
706         }
707
708         return NULL;
709 }
710
711 struct lnet_peer_ni *
712 lnet_find_peer_ni_locked(lnet_nid_t nid4)
713 {
714         struct lnet_peer_ni *lpni;
715         struct lnet_peer_table *ptable;
716         int cpt;
717         struct lnet_nid nid;
718
719         lnet_nid4_to_nid(nid4, &nid);
720
721         cpt = lnet_nid_cpt_hash(nid4, LNET_CPT_NUMBER);
722
723         ptable = the_lnet.ln_peer_tables[cpt];
724         lpni = lnet_get_peer_ni_locked(ptable, &nid);
725
726         return lpni;
727 }
728
729 struct lnet_peer_ni *
730 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
731 {
732         struct lnet_peer_net *lpn;
733         struct lnet_peer_ni *lpni;
734
735         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
736         if (!lpn)
737                 return NULL;
738
739         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
740                 if (lnet_nid_to_nid4(&lpni->lpni_nid) == nid)
741                         return lpni;
742         }
743
744         return NULL;
745 }
746
747 struct lnet_peer *
748 lnet_find_peer(lnet_nid_t nid)
749 {
750         struct lnet_peer_ni *lpni;
751         struct lnet_peer *lp = NULL;
752         int cpt;
753
754         cpt = lnet_net_lock_current();
755         lpni = lnet_find_peer_ni_locked(nid);
756         if (lpni) {
757                 lp = lpni->lpni_peer_net->lpn_peer;
758                 lnet_peer_addref_locked(lp);
759                 lnet_peer_ni_decref_locked(lpni);
760         }
761         lnet_net_unlock(cpt);
762
763         return lp;
764 }
765
766 struct lnet_peer_net *
767 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
768 {
769         struct lnet_peer_net *net;
770
771         if (!prev_lpn_id) {
772                 /* no net id provided return the first net */
773                 net = list_first_entry_or_null(&lp->lp_peer_nets,
774                                                struct lnet_peer_net,
775                                                lpn_peer_nets);
776
777                 return net;
778         }
779
780         /* find the net after the one provided */
781         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
782                 if (net->lpn_net_id == prev_lpn_id) {
783                         /*
784                          * if we reached the end of the list loop to the
785                          * beginning.
786                          */
787                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
788                                 return list_first_entry_or_null(&lp->lp_peer_nets,
789                                                                 struct lnet_peer_net,
790                                                                 lpn_peer_nets);
791                         else
792                                 return list_next_entry(net, lpn_peer_nets);
793                 }
794         }
795
796         return NULL;
797 }
798
799 struct lnet_peer_ni *
800 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
801                              struct lnet_peer_net *peer_net,
802                              struct lnet_peer_ni *prev)
803 {
804         struct lnet_peer_ni *lpni;
805         struct lnet_peer_net *net = peer_net;
806
807         if (!prev) {
808                 if (!net) {
809                         if (list_empty(&peer->lp_peer_nets))
810                                 return NULL;
811
812                         net = list_entry(peer->lp_peer_nets.next,
813                                          struct lnet_peer_net,
814                                          lpn_peer_nets);
815                 }
816                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
817                                   lpni_peer_nis);
818
819                 return lpni;
820         }
821
822         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
823                 /*
824                  * if you reached the end of the peer ni list and the peer
825                  * net is specified then there are no more peer nis in that
826                  * net.
827                  */
828                 if (net)
829                         return NULL;
830
831                 /*
832                  * we reached the end of this net ni list. move to the
833                  * next net
834                  */
835                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
836                     &peer->lp_peer_nets)
837                         /* no more nets and no more NIs. */
838                         return NULL;
839
840                 /* get the next net */
841                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
842                                  struct lnet_peer_net,
843                                  lpn_peer_nets);
844                 /* get the ni on it */
845                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
846                                   lpni_peer_nis);
847
848                 return lpni;
849         }
850
851         /* there are more nis left */
852         lpni = list_entry(prev->lpni_peer_nis.next,
853                           struct lnet_peer_ni, lpni_peer_nis);
854
855         return lpni;
856 }
857
858 /* Call with the ln_api_mutex held */
859 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
860 {
861         struct lnet_process_id id;
862         struct lnet_peer_table *ptable;
863         struct lnet_peer *lp;
864         __u32 count = 0;
865         __u32 size = 0;
866         int lncpt;
867         int cpt;
868         __u32 i;
869         int rc;
870
871         rc = -ESHUTDOWN;
872         if (the_lnet.ln_state != LNET_STATE_RUNNING)
873                 goto done;
874
875         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
876
877         /*
878          * Count the number of peers, and return E2BIG if the buffer
879          * is too small. We'll also return the desired size.
880          */
881         rc = -E2BIG;
882         for (cpt = 0; cpt < lncpt; cpt++) {
883                 ptable = the_lnet.ln_peer_tables[cpt];
884                 count += ptable->pt_peers;
885         }
886         size = count * sizeof(*ids);
887         if (size > *sizep)
888                 goto done;
889
890         /*
891          * Walk the peer lists and copy out the primary nids.
892          * This is safe because the peer lists are only modified
893          * while the ln_api_mutex is held. So we don't need to
894          * hold the lnet_net_lock as well, and can therefore
895          * directly call copy_to_user().
896          */
897         rc = -EFAULT;
898         memset(&id, 0, sizeof(id));
899         id.pid = LNET_PID_LUSTRE;
900         i = 0;
901         for (cpt = 0; cpt < lncpt; cpt++) {
902                 ptable = the_lnet.ln_peer_tables[cpt];
903                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
904                         if (i >= count)
905                                 goto done;
906                         id.nid = lp->lp_primary_nid;
907                         if (copy_to_user(&ids[i], &id, sizeof(id)))
908                                 goto done;
909                         i++;
910                 }
911         }
912         rc = 0;
913 done:
914         *countp = count;
915         *sizep = size;
916         return rc;
917 }
918
919 /*
920  * Start pushes to peers that need to be updated for a configuration
921  * change on this node.
922  */
923 void
924 lnet_push_update_to_peers(int force)
925 {
926         struct lnet_peer_table *ptable;
927         struct lnet_peer *lp;
928         int lncpt;
929         int cpt;
930
931         lnet_net_lock(LNET_LOCK_EX);
932         if (lnet_peer_discovery_disabled)
933                 force = 0;
934         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
935         for (cpt = 0; cpt < lncpt; cpt++) {
936                 ptable = the_lnet.ln_peer_tables[cpt];
937                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
938                         if (force) {
939                                 spin_lock(&lp->lp_lock);
940                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
941                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
942                                 spin_unlock(&lp->lp_lock);
943                         }
944                         if (lnet_peer_needs_push(lp))
945                                 lnet_peer_queue_for_discovery(lp);
946                 }
947         }
948         lnet_net_unlock(LNET_LOCK_EX);
949         wake_up(&the_lnet.ln_dc_waitq);
950 }
951
952 /* find the NID in the preferred gateways for the remote peer
953  * return:
954  *      false: list is not empty and NID is not preferred
955  *      false: list is empty
956  *      true: nid is found in the list
957  */
958 bool
959 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
960                              lnet_nid_t gw_nid)
961 {
962         struct lnet_nid_list *ne;
963
964         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
965                libcfs_nidstr(&lpni->lpni_nid),
966                list_empty(&lpni->lpni_rtr_pref_nids));
967
968         if (list_empty(&lpni->lpni_rtr_pref_nids))
969                 return false;
970
971         /* iterate through all the preferred NIDs and see if any of them
972          * matches the provided gw_nid
973          */
974         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
975                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
976                        libcfs_nid2str(ne->nl_nid),
977                        libcfs_nid2str(gw_nid));
978                 if (ne->nl_nid == gw_nid)
979                         return true;
980         }
981
982         return false;
983 }
984
985 void
986 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
987 {
988         struct list_head zombies;
989         struct lnet_nid_list *ne;
990         struct lnet_nid_list *tmp;
991         int cpt = lpni->lpni_cpt;
992
993         INIT_LIST_HEAD(&zombies);
994
995         lnet_net_lock(cpt);
996         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
997         lnet_net_unlock(cpt);
998
999         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1000                 list_del(&ne->nl_list);
1001                 LIBCFS_FREE(ne, sizeof(*ne));
1002         }
1003 }
1004
1005 int
1006 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1007                        lnet_nid_t gw_nid)
1008 {
1009         int cpt = lpni->lpni_cpt;
1010         struct lnet_nid_list *ne = NULL;
1011
1012         /* This function is called with api_mutex held. When the api_mutex
1013          * is held the list can not be modified, as it is only modified as
1014          * a result of applying a UDSP and that happens under api_mutex
1015          * lock.
1016          */
1017         __must_hold(&the_lnet.ln_api_mutex);
1018
1019         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1020                 if (ne->nl_nid == gw_nid)
1021                         return -EEXIST;
1022         }
1023
1024         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1025         if (!ne)
1026                 return -ENOMEM;
1027
1028         ne->nl_nid = gw_nid;
1029
1030         /* Lock the cpt to protect against addition and checks in the
1031          * selection algorithm
1032          */
1033         lnet_net_lock(cpt);
1034         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1035         lnet_net_unlock(cpt);
1036
1037         return 0;
1038 }
1039
1040 /*
1041  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1042  * this is a preferred point-to-point path. Call with lnet_net_lock in
1043  * shared mmode.
1044  */
1045 bool
1046 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1047 {
1048         struct lnet_nid_list *ne;
1049
1050         if (lpni->lpni_pref_nnids == 0)
1051                 return false;
1052         if (lpni->lpni_pref_nnids == 1)
1053                 return lpni->lpni_pref.nid == nid;
1054         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1055                 if (ne->nl_nid == nid)
1056                         return true;
1057         }
1058         return false;
1059 }
1060
1061 /*
1062  * Set a single ni as preferred, provided no preferred ni is already
1063  * defined. Only to be used for non-multi-rail peer_ni.
1064  */
1065 int
1066 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1067 {
1068         int rc = 0;
1069
1070         spin_lock(&lpni->lpni_lock);
1071         if (nid == LNET_NID_ANY) {
1072                 rc = -EINVAL;
1073         } else if (lpni->lpni_pref_nnids > 0) {
1074                 rc = -EPERM;
1075         } else if (lpni->lpni_pref_nnids == 0) {
1076                 lpni->lpni_pref.nid = nid;
1077                 lpni->lpni_pref_nnids = 1;
1078                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1079         }
1080         spin_unlock(&lpni->lpni_lock);
1081
1082         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1083                libcfs_nidstr(&lpni->lpni_nid), libcfs_nid2str(nid), rc);
1084         return rc;
1085 }
1086
1087 /*
1088  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1089  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1090  */
1091 int
1092 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1093 {
1094         int rc = 0;
1095
1096         spin_lock(&lpni->lpni_lock);
1097         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1098                 lpni->lpni_pref_nnids = 0;
1099                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1100         } else if (lpni->lpni_pref_nnids == 0) {
1101                 rc = -ENOENT;
1102         } else {
1103                 rc = -EPERM;
1104         }
1105         spin_unlock(&lpni->lpni_lock);
1106
1107         CDEBUG(D_NET, "peer %s: %d\n",
1108                libcfs_nidstr(&lpni->lpni_nid), rc);
1109         return rc;
1110 }
1111
1112 void
1113 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1114 {
1115         lpni->lpni_sel_priority = priority;
1116 }
1117
1118 /*
1119  * Clear the preferred NIDs from a non-multi-rail peer.
1120  */
1121 void
1122 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1123 {
1124         struct lnet_peer_ni *lpni = NULL;
1125
1126         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1127                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1128 }
1129
1130 int
1131 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1132 {
1133         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1134         struct lnet_nid_list *ne1 = NULL;
1135         struct lnet_nid_list *ne2 = NULL;
1136         lnet_nid_t tmp_nid = LNET_NID_ANY;
1137         int rc = 0;
1138
1139         if (nid == LNET_NID_ANY) {
1140                 rc = -EINVAL;
1141                 goto out;
1142         }
1143
1144         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1145                 rc = -EEXIST;
1146                 goto out;
1147         }
1148
1149         /* A non-MR node may have only one preferred NI per peer_ni */
1150         if (lpni->lpni_pref_nnids > 0 &&
1151             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1152                 rc = -EPERM;
1153                 goto out;
1154         }
1155
1156         /* add the new preferred nid to the list of preferred nids */
1157         if (lpni->lpni_pref_nnids != 0) {
1158                 size_t alloc_size = sizeof(*ne1);
1159
1160                 if (lpni->lpni_pref_nnids == 1) {
1161                         tmp_nid = lpni->lpni_pref.nid;
1162                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1163                 }
1164
1165                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1166                         if (ne1->nl_nid == nid) {
1167                                 rc = -EEXIST;
1168                                 goto out;
1169                         }
1170                 }
1171
1172                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1173                                  alloc_size);
1174                 if (!ne1) {
1175                         rc = -ENOMEM;
1176                         goto out;
1177                 }
1178
1179                 /* move the originally stored nid to the list */
1180                 if (lpni->lpni_pref_nnids == 1) {
1181                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1182                                 lpni->lpni_cpt, alloc_size);
1183                         if (!ne2) {
1184                                 rc = -ENOMEM;
1185                                 goto out;
1186                         }
1187                         INIT_LIST_HEAD(&ne2->nl_list);
1188                         ne2->nl_nid = tmp_nid;
1189                 }
1190                 ne1->nl_nid = nid;
1191         }
1192
1193         lnet_net_lock(LNET_LOCK_EX);
1194         spin_lock(&lpni->lpni_lock);
1195         if (lpni->lpni_pref_nnids == 0) {
1196                 lpni->lpni_pref.nid = nid;
1197         } else {
1198                 if (ne2)
1199                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1200                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1201         }
1202         lpni->lpni_pref_nnids++;
1203         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1204         spin_unlock(&lpni->lpni_lock);
1205         lnet_net_unlock(LNET_LOCK_EX);
1206
1207 out:
1208         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1209                 spin_lock(&lpni->lpni_lock);
1210                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1211                 spin_unlock(&lpni->lpni_lock);
1212         }
1213         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1214                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1215         return rc;
1216 }
1217
1218 int
1219 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1220 {
1221         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1222         struct lnet_nid_list *ne = NULL;
1223         int rc = 0;
1224
1225         if (lpni->lpni_pref_nnids == 0) {
1226                 rc = -ENOENT;
1227                 goto out;
1228         }
1229
1230         if (lpni->lpni_pref_nnids == 1) {
1231                 if (lpni->lpni_pref.nid != nid) {
1232                         rc = -ENOENT;
1233                         goto out;
1234                 }
1235         } else {
1236                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1237                         if (ne->nl_nid == nid)
1238                                 goto remove_nid_entry;
1239                 }
1240                 rc = -ENOENT;
1241                 ne = NULL;
1242                 goto out;
1243         }
1244
1245 remove_nid_entry:
1246         lnet_net_lock(LNET_LOCK_EX);
1247         spin_lock(&lpni->lpni_lock);
1248         if (lpni->lpni_pref_nnids == 1)
1249                 lpni->lpni_pref.nid = LNET_NID_ANY;
1250         else {
1251                 list_del_init(&ne->nl_list);
1252                 if (lpni->lpni_pref_nnids == 2) {
1253                         struct lnet_nid_list *ne, *tmp;
1254
1255                         list_for_each_entry_safe(ne, tmp,
1256                                                  &lpni->lpni_pref.nids,
1257                                                  nl_list) {
1258                                 lpni->lpni_pref.nid = ne->nl_nid;
1259                                 list_del_init(&ne->nl_list);
1260                                 LIBCFS_FREE(ne, sizeof(*ne));
1261                         }
1262                 }
1263         }
1264         lpni->lpni_pref_nnids--;
1265         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1266         spin_unlock(&lpni->lpni_lock);
1267         lnet_net_unlock(LNET_LOCK_EX);
1268
1269         if (ne)
1270                 LIBCFS_FREE(ne, sizeof(*ne));
1271 out:
1272         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1273                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1274         return rc;
1275 }
1276
1277 void
1278 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1279 {
1280         struct list_head zombies;
1281         struct lnet_nid_list *ne;
1282         struct lnet_nid_list *tmp;
1283
1284         INIT_LIST_HEAD(&zombies);
1285
1286         lnet_net_lock(LNET_LOCK_EX);
1287         if (lpni->lpni_pref_nnids == 1)
1288                 lpni->lpni_pref.nid = LNET_NID_ANY;
1289         else if (lpni->lpni_pref_nnids > 1)
1290                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1291         lpni->lpni_pref_nnids = 0;
1292         lnet_net_unlock(LNET_LOCK_EX);
1293
1294         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1295                 list_del_init(&ne->nl_list);
1296                 LIBCFS_FREE(ne, sizeof(*ne));
1297         }
1298 }
1299
1300 lnet_nid_t
1301 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1302 {
1303         struct lnet_peer_ni *lpni;
1304         lnet_nid_t primary_nid = nid;
1305
1306         lpni = lnet_find_peer_ni_locked(nid);
1307         if (lpni) {
1308                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1309                 lnet_peer_ni_decref_locked(lpni);
1310         }
1311
1312         return primary_nid;
1313 }
1314
1315 bool
1316 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1317 __must_hold(&lp->lp_lock)
1318 {
1319         if (lnet_peer_discovery_disabled)
1320                 return true;
1321
1322         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1323             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1324                 return true;
1325         }
1326
1327         return false;
1328 }
1329
1330 /*
1331  * Peer Discovery
1332  */
1333 bool
1334 lnet_is_discovery_disabled(struct lnet_peer *lp)
1335 {
1336         bool rc = false;
1337
1338         spin_lock(&lp->lp_lock);
1339         rc = lnet_is_discovery_disabled_locked(lp);
1340         spin_unlock(&lp->lp_lock);
1341
1342         return rc;
1343 }
1344
1345 int
1346 LNetAddPeer(lnet_nid_t *nids, __u32 num_nids)
1347 {
1348         lnet_nid_t pnid = 0;
1349         bool mr;
1350         int i, rc;
1351
1352         if (!nids || num_nids < 1)
1353                 return -EINVAL;
1354
1355         rc = LNetNIInit(LNET_PID_ANY);
1356         if (rc < 0)
1357                 return rc;
1358
1359         mutex_lock(&the_lnet.ln_api_mutex);
1360
1361         mr = lnet_peer_discovery_disabled == 0;
1362
1363         rc = 0;
1364         for (i = 0; i < num_nids; i++) {
1365                 if (nids[i] == LNET_NID_LO_0)
1366                         continue;
1367
1368                 if (!pnid) {
1369                         pnid = nids[i];
1370                         rc = lnet_add_peer_ni(pnid, LNET_NID_ANY, mr, true);
1371                 } else if (lnet_peer_discovery_disabled) {
1372                         rc = lnet_add_peer_ni(nids[i], LNET_NID_ANY, mr, true);
1373                 } else {
1374                         rc = lnet_add_peer_ni(pnid, nids[i], mr, true);
1375                 }
1376
1377                 if (rc && rc != -EEXIST)
1378                         goto unlock;
1379         }
1380
1381 unlock:
1382         mutex_unlock(&the_lnet.ln_api_mutex);
1383
1384         LNetNIFini();
1385
1386         return rc == -EEXIST ? 0 : rc;
1387 }
1388 EXPORT_SYMBOL(LNetAddPeer);
1389
1390 lnet_nid_t
1391 LNetPrimaryNID(lnet_nid_t nid)
1392 {
1393         struct lnet_peer *lp;
1394         struct lnet_peer_ni *lpni;
1395         lnet_nid_t primary_nid = nid;
1396         int rc = 0;
1397         int cpt;
1398
1399         if (nid == LNET_NID_LO_0)
1400                 return LNET_NID_LO_0;
1401
1402         cpt = lnet_net_lock_current();
1403         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1404         if (IS_ERR(lpni)) {
1405                 rc = PTR_ERR(lpni);
1406                 goto out_unlock;
1407         }
1408         lp = lpni->lpni_peer_net->lpn_peer;
1409
1410         /* If discovery is disabled locally then we needn't bother running
1411          * discovery here because discovery will not modify whatever
1412          * primary NID is currently set for this peer. If the specified peer is
1413          * down then this discovery can introduce long delays into the mount
1414          * process, so skip it if it isn't necessary.
1415          */
1416         if (!lnet_peer_discovery_disabled && !lnet_peer_is_uptodate(lp)) {
1417                 spin_lock(&lp->lp_lock);
1418                 /* force a full discovery cycle */
1419                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH |
1420                                 LNET_PEER_LOCK_PRIMARY;
1421                 spin_unlock(&lp->lp_lock);
1422
1423                 /* start discovery in the background. Messages to that
1424                  * peer will not go through until the discovery is
1425                  * complete
1426                  */
1427                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1428                 if (rc)
1429                         goto out_decref;
1430                 /* The lpni (or lp) for this NID may have changed and our ref is
1431                  * the only thing keeping the old one around. Release the ref
1432                  * and lookup the lpni again
1433                  */
1434                 lnet_peer_ni_decref_locked(lpni);
1435                 lpni = lnet_find_peer_ni_locked(nid);
1436                 if (!lpni) {
1437                         rc = -ENOENT;
1438                         goto out_unlock;
1439                 }
1440                 lp = lpni->lpni_peer_net->lpn_peer;
1441         }
1442         primary_nid = lp->lp_primary_nid;
1443 out_decref:
1444         lnet_peer_ni_decref_locked(lpni);
1445 out_unlock:
1446         lnet_net_unlock(cpt);
1447
1448         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1449                libcfs_nid2str(primary_nid), rc);
1450         return primary_nid;
1451 }
1452 EXPORT_SYMBOL(LNetPrimaryNID);
1453
1454 struct lnet_peer_net *
1455 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1456 {
1457         struct lnet_peer_net *peer_net;
1458         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1459                 if (peer_net->lpn_net_id == net_id)
1460                         return peer_net;
1461         }
1462         return NULL;
1463 }
1464
1465 /*
1466  * Attach a peer_ni to a peer_net and peer. This function assumes
1467  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1468  * may be attached to a different peer, in which case it will be
1469  * properly detached first. The whole operation is done atomically.
1470  *
1471  * This function consumes the reference on lpni and Always returns 0.
1472  * This is the last function called from functions that do return an
1473  * int, so returning 0 here allows the compiler to do a tail call.
1474  */
1475 static int
1476 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1477                          struct lnet_peer_net *lpn,
1478                          struct lnet_peer_ni *lpni,
1479                          unsigned flags)
1480 {
1481         struct lnet_peer_table *ptable;
1482         bool new_lpn = false;
1483         int rc;
1484
1485         /* Install the new peer_ni */
1486         lnet_net_lock(LNET_LOCK_EX);
1487         /* Add peer_ni to global peer table hash, if necessary. */
1488         if (list_empty(&lpni->lpni_hashlist)) {
1489                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1490
1491                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1492                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1493                 ptable->pt_version++;
1494                 lnet_peer_ni_addref_locked(lpni);
1495         }
1496
1497         /* Detach the peer_ni from an existing peer, if necessary. */
1498         if (lpni->lpni_peer_net) {
1499                 LASSERT(lpni->lpni_peer_net != lpn);
1500                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1501                 lnet_peer_detach_peer_ni_locked(lpni);
1502                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1503                 lpni->lpni_peer_net = NULL;
1504         }
1505
1506         /* Add peer_ni to peer_net */
1507         lpni->lpni_peer_net = lpn;
1508         if (lp->lp_primary_nid == lnet_nid_to_nid4(&lpni->lpni_nid))
1509                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1510         else
1511                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1512         lnet_update_peer_net_healthv(lpni);
1513         lnet_peer_net_addref_locked(lpn);
1514
1515         /* Add peer_net to peer */
1516         if (!lpn->lpn_peer) {
1517                 new_lpn = true;
1518                 lpn->lpn_peer = lp;
1519                 if (lp->lp_primary_nid == lnet_nid_to_nid4(&lpni->lpni_nid))
1520                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1521                 else
1522                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1523                 lnet_peer_addref_locked(lp);
1524         }
1525
1526         /* Add peer to global peer list, if necessary */
1527         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1528         if (list_empty(&lp->lp_peer_list)) {
1529                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1530                 ptable->pt_peers++;
1531         }
1532
1533
1534         /* Update peer state */
1535         spin_lock(&lp->lp_lock);
1536         if (flags & LNET_PEER_CONFIGURED) {
1537                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1538                         lp->lp_state |= LNET_PEER_CONFIGURED;
1539         }
1540         if (flags & LNET_PEER_MULTI_RAIL) {
1541                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1542                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1543                         lnet_peer_clr_non_mr_pref_nids(lp);
1544                 }
1545         }
1546         if (flags & LNET_PEER_LOCK_PRIMARY)
1547                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1548         spin_unlock(&lp->lp_lock);
1549
1550         lp->lp_nnis++;
1551
1552         /* apply UDSPs */
1553         if (new_lpn) {
1554                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1555                 if (rc)
1556                         CERROR("Failed to apply UDSPs on lpn %s\n",
1557                                libcfs_net2str(lpn->lpn_net_id));
1558         }
1559         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1560         if (rc)
1561                 CERROR("Failed to apply UDSPs on lpni %s\n",
1562                        libcfs_nidstr(&lpni->lpni_nid));
1563
1564         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1565                libcfs_nid2str(lp->lp_primary_nid),
1566                libcfs_nidstr(&lpni->lpni_nid), flags);
1567         lnet_peer_ni_decref_locked(lpni);
1568         lnet_net_unlock(LNET_LOCK_EX);
1569
1570         return 0;
1571 }
1572
1573 /*
1574  * Create a new peer, with nid as its primary nid.
1575  *
1576  * Call with the lnet_api_mutex held.
1577  */
1578 static int
1579 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1580 {
1581         struct lnet_peer *lp;
1582         struct lnet_peer_net *lpn;
1583         struct lnet_peer_ni *lpni;
1584         int rc = 0;
1585
1586         LASSERT(nid != LNET_NID_ANY);
1587
1588         /*
1589          * No need for the lnet_net_lock here, because the
1590          * lnet_api_mutex is held.
1591          */
1592         lpni = lnet_find_peer_ni_locked(nid);
1593         if (lpni) {
1594                 /* A peer with this NID already exists. */
1595                 lp = lpni->lpni_peer_net->lpn_peer;
1596                 lnet_peer_ni_decref_locked(lpni);
1597                 /*
1598                  * This is an error if the peer was configured and the
1599                  * primary NID differs or an attempt is made to change
1600                  * the Multi-Rail flag. Otherwise the assumption is
1601                  * that an existing peer is being modified.
1602                  */
1603                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1604                         if (lp->lp_primary_nid != nid)
1605                                 rc = -EEXIST;
1606                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1607                                 rc = -EPERM;
1608                         goto out;
1609                 } else if (!(flags & LNET_PEER_CONFIGURED)) {
1610                         if (lp->lp_primary_nid == nid) {
1611                                 rc = -EEXIST;
1612                                 goto out;
1613                         }
1614                 }
1615                 /* Delete and recreate as a configured peer. */
1616                 lnet_peer_del(lp);
1617         }
1618
1619         /* Create peer, peer_net, and peer_ni. */
1620         rc = -ENOMEM;
1621         lp = lnet_peer_alloc(nid);
1622         if (!lp)
1623                 goto out;
1624         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1625         if (!lpn)
1626                 goto out_free_lp;
1627         lpni = lnet_peer_ni_alloc(nid);
1628         if (!lpni)
1629                 goto out_free_lpn;
1630
1631         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1632
1633 out_free_lpn:
1634         LIBCFS_FREE(lpn, sizeof(*lpn));
1635 out_free_lp:
1636         LIBCFS_FREE(lp, sizeof(*lp));
1637 out:
1638         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1639                libcfs_nid2str(nid), flags, rc);
1640         return rc;
1641 }
1642
1643 /*
1644  * Add a NID to a peer. Call with ln_api_mutex held.
1645  *
1646  * Error codes:
1647  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1648  *  -EEXIST:   The NID was configured by DLC for a different peer.
1649  *  -ENOMEM:   Out of memory.
1650  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1651  *             non-multi-rail peer.
1652  */
1653 static int
1654 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1655 {
1656         struct lnet_peer_net *lpn;
1657         struct lnet_peer_ni *lpni;
1658         int rc = 0;
1659
1660         LASSERT(lp);
1661         LASSERT(nid != LNET_NID_ANY);
1662
1663         /* A configured peer can only be updated through configuration. */
1664         if (!(flags & LNET_PEER_CONFIGURED)) {
1665                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1666                         rc = -EPERM;
1667                         goto out;
1668                 }
1669         }
1670
1671         /*
1672          * The MULTI_RAIL flag can be set but not cleared, because
1673          * that would leave the peer struct in an invalid state.
1674          */
1675         if (flags & LNET_PEER_MULTI_RAIL) {
1676                 spin_lock(&lp->lp_lock);
1677                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1678                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1679                         lnet_peer_clr_non_mr_pref_nids(lp);
1680                 }
1681                 spin_unlock(&lp->lp_lock);
1682         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1683                 rc = -EPERM;
1684                 goto out;
1685         }
1686
1687         lpni = lnet_find_peer_ni_locked(nid);
1688         if (lpni) {
1689                 /*
1690                  * A peer_ni already exists. This is only a problem if
1691                  * it is not connected to this peer and was configured
1692                  * by DLC.
1693                  */
1694                 if (lpni->lpni_peer_net->lpn_peer == lp)
1695                         goto out_free_lpni;
1696                 if (lnet_peer_ni_is_configured(lpni)) {
1697                         rc = -EEXIST;
1698                         goto out_free_lpni;
1699                 }
1700                 /* If this is the primary NID, destroy the peer. */
1701                 if (lnet_peer_ni_is_primary(lpni)) {
1702                         struct lnet_peer *lp2 =
1703                                 lpni->lpni_peer_net->lpn_peer;
1704                         int rtr_refcount = lp2->lp_rtr_refcount;
1705
1706                         /* If the new peer that this NID belongs to is
1707                          * a primary NID for another peer which we're
1708                          * suppose to preserve the Primary for then we
1709                          * don't want to mess with it. But the
1710                          * configuration is wrong at this point, so we
1711                          * should flag both of these peers as in a bad
1712                          * state
1713                          */
1714                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY) {
1715                                 spin_lock(&lp->lp_lock);
1716                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1717                                 spin_unlock(&lp->lp_lock);
1718                                 spin_lock(&lp2->lp_lock);
1719                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1720                                 spin_unlock(&lp2->lp_lock);
1721                                 goto out_free_lpni;
1722                         }
1723                         /*
1724                          * if we're trying to delete a router it means
1725                          * we're moving this peer NI to a new peer so must
1726                          * transfer router properties to the new peer
1727                          */
1728                         if (rtr_refcount > 0) {
1729                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1730                                 lnet_rtr_transfer_to_peer(lp2, lp);
1731                         }
1732                         lnet_peer_del(lp2);
1733                         lnet_peer_ni_decref_locked(lpni);
1734                         lpni = lnet_peer_ni_alloc(nid);
1735                         if (!lpni) {
1736                                 rc = -ENOMEM;
1737                                 goto out_free_lpni;
1738                         }
1739                 }
1740         } else {
1741                 lpni = lnet_peer_ni_alloc(nid);
1742                 if (!lpni) {
1743                         rc = -ENOMEM;
1744                         goto out_free_lpni;
1745                 }
1746         }
1747
1748         /*
1749          * Get the peer_net. Check that we're not adding a second
1750          * peer_ni on a peer_net of a non-multi-rail peer.
1751          */
1752         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1753         if (!lpn) {
1754                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1755                 if (!lpn) {
1756                         rc = -ENOMEM;
1757                         goto out_free_lpni;
1758                 }
1759         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1760                 rc = -ENOTUNIQ;
1761                 goto out_free_lpni;
1762         }
1763
1764         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1765
1766 out_free_lpni:
1767         lnet_peer_ni_decref_locked(lpni);
1768 out:
1769         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1770                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1771                flags, rc);
1772         return rc;
1773 }
1774
1775 /*
1776  * Update the primary NID of a peer, if possible.
1777  *
1778  * Call with the lnet_api_mutex held.
1779  */
1780 static int
1781 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1782 {
1783         lnet_nid_t old = lp->lp_primary_nid;
1784         int rc = 0;
1785
1786         if (lp->lp_primary_nid == nid)
1787                 goto out;
1788
1789         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1790                 lp->lp_primary_nid = nid;
1791
1792         rc = lnet_peer_add_nid(lp, nid, flags);
1793         if (rc) {
1794                 lp->lp_primary_nid = old;
1795                 goto out;
1796         }
1797 out:
1798         /* if this is a configured peer or the primary for that peer has
1799          * been locked, then we don't want to flag this scenario as
1800          * a failure
1801          */
1802         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1803             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1804                 return 0;
1805
1806         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1807                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1808
1809         return rc;
1810 }
1811
1812 /*
1813  * lpni creation initiated due to traffic either sending or receiving.
1814  */
1815 static int
1816 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1817 {
1818         struct lnet_peer *lp;
1819         struct lnet_peer_net *lpn;
1820         struct lnet_peer_ni *lpni;
1821         unsigned flags = 0;
1822         int rc = 0;
1823
1824         if (nid == LNET_NID_ANY) {
1825                 rc = -EINVAL;
1826                 goto out;
1827         }
1828
1829         /* lnet_net_lock is not needed here because ln_api_lock is held */
1830         lpni = lnet_find_peer_ni_locked(nid);
1831         if (lpni) {
1832                 /*
1833                  * We must have raced with another thread. Since we
1834                  * know next to nothing about a peer_ni created by
1835                  * traffic, we just assume everything is ok and
1836                  * return.
1837                  */
1838                 lnet_peer_ni_decref_locked(lpni);
1839                 goto out;
1840         }
1841
1842         /* Create peer, peer_net, and peer_ni. */
1843         rc = -ENOMEM;
1844         lp = lnet_peer_alloc(nid);
1845         if (!lp)
1846                 goto out;
1847         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1848         if (!lpn)
1849                 goto out_free_lp;
1850         lpni = lnet_peer_ni_alloc(nid);
1851         if (!lpni)
1852                 goto out_free_lpn;
1853         if (pref != LNET_NID_ANY)
1854                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1855
1856         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1857
1858 out_free_lpn:
1859         LIBCFS_FREE(lpn, sizeof(*lpn));
1860 out_free_lp:
1861         LIBCFS_FREE(lp, sizeof(*lp));
1862 out:
1863         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1864         return rc;
1865 }
1866
1867 /*
1868  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1869  *
1870  * This API handles the following combinations:
1871  *   Create a peer with its primary NI if only the prim_nid is provided
1872  *   Add a NID to a peer identified by the prim_nid. The peer identified
1873  *   by the prim_nid must already exist.
1874  *   The peer being created may be non-MR.
1875  *
1876  * The caller must hold ln_api_mutex. This prevents the peer from
1877  * being created/modified/deleted by a different thread.
1878  */
1879 int
1880 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr, bool temp)
1881 {
1882         struct lnet_peer *lp = NULL;
1883         struct lnet_peer_ni *lpni;
1884         unsigned int flags = 0;
1885
1886         /* The prim_nid must always be specified */
1887         if (prim_nid == LNET_NID_ANY)
1888                 return -EINVAL;
1889
1890         if (!temp)
1891                 flags = LNET_PEER_CONFIGURED;
1892
1893         if (mr)
1894                 flags |= LNET_PEER_MULTI_RAIL;
1895
1896         /*
1897          * If nid isn't specified, we must create a new peer with
1898          * prim_nid as its primary nid.
1899          */
1900         if (nid == LNET_NID_ANY)
1901                 return lnet_peer_add(prim_nid, flags);
1902
1903         /* Look up the prim_nid, which must exist. */
1904         lpni = lnet_find_peer_ni_locked(prim_nid);
1905         if (!lpni)
1906                 return -ENOENT;
1907         lnet_peer_ni_decref_locked(lpni);
1908         lp = lpni->lpni_peer_net->lpn_peer;
1909
1910         /* Peer must have been configured. */
1911         if (!temp && !(lp->lp_state & LNET_PEER_CONFIGURED)) {
1912                 CDEBUG(D_NET, "peer %s was not configured\n",
1913                        libcfs_nid2str(prim_nid));
1914                 return -ENOENT;
1915         }
1916
1917         /* Primary NID must match */
1918         if (lp->lp_primary_nid != prim_nid) {
1919                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1920                        libcfs_nid2str(prim_nid),
1921                        libcfs_nid2str(lp->lp_primary_nid));
1922                 return -ENODEV;
1923         }
1924
1925         /* Multi-Rail flag must match. */
1926         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1927                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1928                        libcfs_nid2str(prim_nid));
1929                 return -EPERM;
1930         }
1931
1932         return lnet_peer_add_nid(lp, nid, flags);
1933 }
1934
1935 /*
1936  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1937  *
1938  * This API handles the following combinations:
1939  *   Delete a NI from a peer if both prim_nid and nid are provided.
1940  *   Delete a peer if only prim_nid is provided.
1941  *   Delete a peer if its primary nid is provided.
1942  *
1943  * The caller must hold ln_api_mutex. This prevents the peer from
1944  * being modified/deleted by a different thread.
1945  */
1946 int
1947 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1948 {
1949         struct lnet_peer *lp;
1950         struct lnet_peer_ni *lpni;
1951         unsigned flags;
1952
1953         if (prim_nid == LNET_NID_ANY)
1954                 return -EINVAL;
1955
1956         lpni = lnet_find_peer_ni_locked(prim_nid);
1957         if (!lpni)
1958                 return -ENOENT;
1959         lnet_peer_ni_decref_locked(lpni);
1960         lp = lpni->lpni_peer_net->lpn_peer;
1961
1962         if (prim_nid != lp->lp_primary_nid) {
1963                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1964                        libcfs_nid2str(prim_nid),
1965                        libcfs_nid2str(lp->lp_primary_nid));
1966                 return -ENODEV;
1967         }
1968
1969         lnet_net_lock(LNET_LOCK_EX);
1970         if (lp->lp_rtr_refcount > 0) {
1971                 lnet_net_unlock(LNET_LOCK_EX);
1972                 CERROR("%s is a router. Can not be deleted\n",
1973                        libcfs_nid2str(prim_nid));
1974                 return -EBUSY;
1975         }
1976         lnet_net_unlock(LNET_LOCK_EX);
1977
1978         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1979                 return lnet_peer_del(lp);
1980
1981         flags = LNET_PEER_CONFIGURED;
1982         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1983                 flags |= LNET_PEER_MULTI_RAIL;
1984
1985         return lnet_peer_del_nid(lp, nid, flags);
1986 }
1987
1988 void
1989 lnet_destroy_peer_ni_locked(struct kref *ref)
1990 {
1991         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
1992                                                  lpni_kref);
1993         struct lnet_peer_table *ptable;
1994         struct lnet_peer_net *lpn;
1995
1996         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
1997
1998         LASSERT(kref_read(&lpni->lpni_kref) == 0);
1999         LASSERT(list_empty(&lpni->lpni_txq));
2000         LASSERT(lpni->lpni_txqnob == 0);
2001         LASSERT(list_empty(&lpni->lpni_peer_nis));
2002         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2003
2004         lpn = lpni->lpni_peer_net;
2005         lpni->lpni_peer_net = NULL;
2006         lpni->lpni_net = NULL;
2007
2008         if (!list_empty(&lpni->lpni_hashlist)) {
2009                 /* remove the peer ni from the zombie list */
2010                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2011                 spin_lock(&ptable->pt_zombie_lock);
2012                 list_del_init(&lpni->lpni_hashlist);
2013                 ptable->pt_zombies--;
2014                 spin_unlock(&ptable->pt_zombie_lock);
2015         }
2016
2017         if (lpni->lpni_pref_nnids > 1) {
2018                 struct lnet_nid_list *ne, *tmp;
2019
2020                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2021                                          nl_list) {
2022                         list_del_init(&ne->nl_list);
2023                         LIBCFS_FREE(ne, sizeof(*ne));
2024                 }
2025         }
2026         LIBCFS_FREE(lpni, sizeof(*lpni));
2027
2028         if (lpn)
2029                 lnet_peer_net_decref_locked(lpn);
2030 }
2031
2032 struct lnet_peer_ni *
2033 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
2034 {
2035         struct lnet_peer_ni *lpni = NULL;
2036         int rc;
2037
2038         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2039                 return ERR_PTR(-ESHUTDOWN);
2040
2041         /*
2042          * find if a peer_ni already exists.
2043          * If so then just return that.
2044          */
2045         lpni = lnet_find_peer_ni_locked(nid);
2046         if (lpni)
2047                 return lpni;
2048
2049         lnet_net_unlock(cpt);
2050
2051         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
2052         if (rc) {
2053                 lpni = ERR_PTR(rc);
2054                 goto out_net_relock;
2055         }
2056
2057         lpni = lnet_find_peer_ni_locked(nid);
2058         LASSERT(lpni);
2059
2060 out_net_relock:
2061         lnet_net_lock(cpt);
2062
2063         return lpni;
2064 }
2065
2066 /*
2067  * Get a peer_ni for the given nid, create it if necessary. Takes a
2068  * hold on the peer_ni.
2069  */
2070 struct lnet_peer_ni *
2071 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
2072 {
2073         struct lnet_peer_ni *lpni = NULL;
2074         int rc;
2075
2076         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2077                 return ERR_PTR(-ESHUTDOWN);
2078
2079         /*
2080          * find if a peer_ni already exists.
2081          * If so then just return that.
2082          */
2083         lpni = lnet_find_peer_ni_locked(nid);
2084         if (lpni)
2085                 return lpni;
2086
2087         /*
2088          * Slow path:
2089          * use the lnet_api_mutex to serialize the creation of the peer_ni
2090          * and the creation/deletion of the local ni/net. When a local ni is
2091          * created, if there exists a set of peer_nis on that network,
2092          * they need to be traversed and updated. When a local NI is
2093          * deleted, which could result in a network being deleted, then
2094          * all peer nis on that network need to be removed as well.
2095          *
2096          * Creation through traffic should also be serialized with
2097          * creation through DLC.
2098          */
2099         lnet_net_unlock(cpt);
2100         mutex_lock(&the_lnet.ln_api_mutex);
2101         /*
2102          * Shutdown is only set under the ln_api_lock, so a single
2103          * check here is sufficent.
2104          */
2105         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2106                 lpni = ERR_PTR(-ESHUTDOWN);
2107                 goto out_mutex_unlock;
2108         }
2109
2110         rc = lnet_peer_ni_traffic_add(nid, pref);
2111         if (rc) {
2112                 lpni = ERR_PTR(rc);
2113                 goto out_mutex_unlock;
2114         }
2115
2116         lpni = lnet_find_peer_ni_locked(nid);
2117         LASSERT(lpni);
2118
2119 out_mutex_unlock:
2120         mutex_unlock(&the_lnet.ln_api_mutex);
2121         lnet_net_lock(cpt);
2122
2123         /* Lock has been dropped, check again for shutdown. */
2124         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2125                 if (!IS_ERR(lpni))
2126                         lnet_peer_ni_decref_locked(lpni);
2127                 lpni = ERR_PTR(-ESHUTDOWN);
2128         }
2129
2130         return lpni;
2131 }
2132
2133 bool
2134 lnet_peer_gw_discovery(struct lnet_peer *lp)
2135 {
2136         bool rc = false;
2137
2138         spin_lock(&lp->lp_lock);
2139         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2140                 rc = true;
2141         spin_unlock(&lp->lp_lock);
2142
2143         return rc;
2144 }
2145
2146 bool
2147 lnet_peer_is_uptodate(struct lnet_peer *lp)
2148 {
2149         bool rc;
2150
2151         spin_lock(&lp->lp_lock);
2152         rc = lnet_peer_is_uptodate_locked(lp);
2153         spin_unlock(&lp->lp_lock);
2154         return rc;
2155 }
2156
2157 /*
2158  * Is a peer uptodate from the point of view of discovery?
2159  *
2160  * If it is currently being processed, obviously not.
2161  * A forced Ping or Push is also handled by the discovery thread.
2162  *
2163  * Otherwise look at whether the peer needs rediscovering.
2164  */
2165 bool
2166 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2167 __must_hold(&lp->lp_lock)
2168 {
2169         bool rc;
2170
2171         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2172                             LNET_PEER_FORCE_PING |
2173                             LNET_PEER_FORCE_PUSH)) {
2174                 rc = false;
2175         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2176                 rc = false;
2177         } else if (lnet_peer_needs_push(lp)) {
2178                 rc = false;
2179         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2180                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2181                         rc = true;
2182                 else
2183                         rc = false;
2184         } else {
2185                 rc = false;
2186         }
2187
2188         return rc;
2189 }
2190
2191 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2192 void
2193 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2194 {
2195         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2196          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2197          * when adding to the list and queuing the peer to ensure that we do not
2198          * strand any messages on the lp_dc_pendq. This scheme ensures the
2199          * message will be resent even if the peer is already being discovered.
2200          * Therefore we needn't check the return value of
2201          * lnet_peer_queue_for_discovery(lp).
2202          */
2203         lnet_net_lock(LNET_LOCK_EX);
2204         spin_lock(&lp->lp_lock);
2205         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2206         spin_unlock(&lp->lp_lock);
2207         lnet_peer_queue_for_discovery(lp);
2208         lnet_net_unlock(LNET_LOCK_EX);
2209 }
2210
2211 /*
2212  * Queue a peer for the attention of the discovery thread.  Call with
2213  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2214  * -EALREADY if the peer was already queued.
2215  */
2216 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2217 {
2218         int rc;
2219
2220         spin_lock(&lp->lp_lock);
2221         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2222                 lp->lp_state |= LNET_PEER_DISCOVERING;
2223         spin_unlock(&lp->lp_lock);
2224         if (list_empty(&lp->lp_dc_list)) {
2225                 lnet_peer_addref_locked(lp);
2226                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2227                 wake_up(&the_lnet.ln_dc_waitq);
2228                 rc = 0;
2229         } else {
2230                 rc = -EALREADY;
2231         }
2232
2233         CDEBUG(D_NET, "Queue peer %s: %d\n",
2234                libcfs_nid2str(lp->lp_primary_nid), rc);
2235
2236         return rc;
2237 }
2238
2239 /*
2240  * Discovery of a peer is complete. Wake all waiters on the peer.
2241  * Call with lnet_net_lock/EX held.
2242  */
2243 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
2244 {
2245         struct lnet_msg *msg, *tmp;
2246         int rc = 0;
2247         LIST_HEAD(pending_msgs);
2248
2249         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2250                libcfs_nid2str(lp->lp_primary_nid));
2251
2252         list_del_init(&lp->lp_dc_list);
2253         spin_lock(&lp->lp_lock);
2254         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2255         spin_unlock(&lp->lp_lock);
2256         wake_up(&lp->lp_dc_waitq);
2257
2258         if (lp->lp_rtr_refcount > 0)
2259                 lnet_router_discovery_complete(lp);
2260
2261         lnet_net_unlock(LNET_LOCK_EX);
2262
2263         /* iterate through all pending messages and send them again */
2264         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2265                 list_del_init(&msg->msg_list);
2266                 if (lp->lp_dc_error) {
2267                         lnet_finalize(msg, lp->lp_dc_error);
2268                         continue;
2269                 }
2270
2271                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2272                        lnet_msgtyp2str(msg->msg_type),
2273                        libcfs_id2str(msg->msg_target));
2274                 rc = lnet_send(msg->msg_src_nid_param, msg,
2275                                msg->msg_rtr_nid_param);
2276                 if (rc < 0) {
2277                         CNETERR("Error sending %s to %s: %d\n",
2278                                lnet_msgtyp2str(msg->msg_type),
2279                                libcfs_id2str(msg->msg_target), rc);
2280                         lnet_finalize(msg, rc);
2281                 }
2282         }
2283         lnet_net_lock(LNET_LOCK_EX);
2284         lnet_peer_decref_locked(lp);
2285 }
2286
2287 /*
2288  * Handle inbound push.
2289  * Like any event handler, called with lnet_res_lock/CPT held.
2290  */
2291 void lnet_peer_push_event(struct lnet_event *ev)
2292 {
2293         struct lnet_ping_buffer *pbuf;
2294         struct lnet_peer *lp;
2295
2296         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2297
2298         /* lnet_find_peer() adds a refcount */
2299         lp = lnet_find_peer(ev->source.nid);
2300         if (!lp) {
2301                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2302                        libcfs_nid2str(ev->initiator.nid),
2303                        libcfs_nid2str(ev->source.nid));
2304                 pbuf->pb_needs_post = true;
2305                 return;
2306         }
2307
2308         /* Ensure peer state remains consistent while we modify it. */
2309         spin_lock(&lp->lp_lock);
2310
2311         /*
2312          * If some kind of error happened the contents of the message
2313          * cannot be used. Clear the NIDS_UPTODATE and set the
2314          * FORCE_PING flag to trigger a ping.
2315          */
2316         if (ev->status) {
2317                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2318                 lp->lp_state |= LNET_PEER_FORCE_PING;
2319                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2320                        ev->status,
2321                        libcfs_nid2str(lp->lp_primary_nid),
2322                        libcfs_nid2str(ev->source.nid));
2323                 goto out;
2324         }
2325
2326         /*
2327          * A push with invalid or corrupted info. Clear the UPTODATE
2328          * flag to trigger a ping.
2329          */
2330         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2331                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2332                 lp->lp_state |= LNET_PEER_FORCE_PING;
2333                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2334                        libcfs_nid2str(lp->lp_primary_nid));
2335                 goto out;
2336         }
2337
2338         /*
2339          * Make sure we'll allocate the correct size ping buffer when
2340          * pinging the peer.
2341          */
2342         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2343                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2344
2345         /*
2346          * A non-Multi-Rail peer is not supposed to be capable of
2347          * sending a push.
2348          */
2349         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2350                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2351                        libcfs_nid2str(lp->lp_primary_nid));
2352                 goto out;
2353         }
2354
2355         /*
2356          * The peer may have discovery disabled at its end. Set
2357          * NO_DISCOVERY as appropriate.
2358          */
2359         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2360                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2361                        libcfs_nid2str(lp->lp_primary_nid));
2362                 /*
2363                  * Mark the peer for deletion if we already know about it
2364                  * and it's going from discovery set to no discovery set
2365                  */
2366                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2367                                       LNET_PEER_DISCOVERING)) &&
2368                      lp->lp_state & LNET_PEER_DISCOVERED) {
2369                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2370                                libcfs_nid2str(lp->lp_primary_nid),
2371                                lp->lp_state);
2372                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2373                 }
2374                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2375         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2376                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2377                        libcfs_nid2str(lp->lp_primary_nid));
2378                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2379         }
2380
2381         /*
2382          * Update the MULTI_RAIL flag based on the push. If the peer
2383          * was configured with DLC then the setting should match what
2384          * DLC put in.
2385          * NB: We verified above that the MR feature bit is set in pi_features
2386          */
2387         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2388                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2389                        libcfs_nid2str(lp->lp_primary_nid), lp);
2390         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2391                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2392                       libcfs_nid2str(lp->lp_primary_nid));
2393         } else if (lnet_peer_discovery_disabled) {
2394                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2395                        libcfs_nid2str(lp->lp_primary_nid), lp);
2396         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2397                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2398                        libcfs_nid2str(lp->lp_primary_nid), lp);
2399         } else {
2400                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2401                        libcfs_nid2str(lp->lp_primary_nid), lp);
2402                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2403                 lnet_peer_clr_non_mr_pref_nids(lp);
2404         }
2405
2406         /*
2407          * Check for truncation of the Put message. Clear the
2408          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2409          * and tell discovery to allocate a bigger buffer.
2410          */
2411         if (ev->mlength < ev->rlength) {
2412                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2413                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2414                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2415                 lp->lp_state |= LNET_PEER_FORCE_PING;
2416                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2417                        libcfs_nid2str(lp->lp_primary_nid),
2418                        pbuf->pb_info.pi_nnis);
2419                 goto out;
2420         }
2421
2422         /* always assume new data */
2423         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2424         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2425
2426         /*
2427          * If there is data present that hasn't been processed yet,
2428          * we'll replace it if the Put contained newer data and it
2429          * fits. We're racing with a Ping or earlier Push in this
2430          * case.
2431          */
2432         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2433                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2434                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2435                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2436                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2437                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2438                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2439                               libcfs_nid2str(lp->lp_primary_nid),
2440                               LNET_PING_BUFFER_SEQNO(pbuf),
2441                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2442                 }
2443                 goto out;
2444         }
2445
2446         /*
2447          * Allocate a buffer to copy the data. On a failure we drop
2448          * the Push and set FORCE_PING to force the discovery
2449          * thread to fix the problem by pinging the peer.
2450          */
2451         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2452         if (!lp->lp_data) {
2453                 lp->lp_state |= LNET_PEER_FORCE_PING;
2454                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2455                        libcfs_nid2str(lp->lp_primary_nid),
2456                        LNET_PING_BUFFER_SEQNO(pbuf));
2457                 goto out;
2458         }
2459
2460         /* Success */
2461         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2462                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2463         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2464         CDEBUG(D_NET, "Received Push %s %u\n",
2465                libcfs_nid2str(lp->lp_primary_nid),
2466                LNET_PING_BUFFER_SEQNO(pbuf));
2467
2468 out:
2469         /* We've processed this buffer. It can be reposted */
2470         pbuf->pb_needs_post = true;
2471
2472         /*
2473          * Queue the peer for discovery if not done, force it on the request
2474          * queue and wake the discovery thread if the peer was already queued,
2475          * because its status changed.
2476          */
2477         spin_unlock(&lp->lp_lock);
2478         lnet_net_lock(LNET_LOCK_EX);
2479         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2480                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2481                 wake_up(&the_lnet.ln_dc_waitq);
2482         }
2483         /* Drop refcount from lookup */
2484         lnet_peer_decref_locked(lp);
2485         lnet_net_unlock(LNET_LOCK_EX);
2486 }
2487
2488 /*
2489  * Clear the discovery error state, unless we're already discovering
2490  * this peer, in which case the error is current.
2491  */
2492 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2493 {
2494         spin_lock(&lp->lp_lock);
2495         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2496                 lp->lp_dc_error = 0;
2497         spin_unlock(&lp->lp_lock);
2498 }
2499
2500 /*
2501  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2502  * dropped/retaken within this function. An lnet_peer_ni is passed in
2503  * because discovery could tear down an lnet_peer.
2504  */
2505 int
2506 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2507 {
2508         DEFINE_WAIT(wait);
2509         struct lnet_peer *lp;
2510         int rc = 0;
2511         int count = 0;
2512
2513 again:
2514         lnet_net_unlock(cpt);
2515         lnet_net_lock(LNET_LOCK_EX);
2516         lp = lpni->lpni_peer_net->lpn_peer;
2517         lnet_peer_clear_discovery_error(lp);
2518
2519         /*
2520          * We're willing to be interrupted. The lpni can become a
2521          * zombie if we race with DLC, so we must check for that.
2522          */
2523         for (;;) {
2524                 /* Keep lp alive when the lnet_net_lock is unlocked */
2525                 lnet_peer_addref_locked(lp);
2526                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2527                 if (signal_pending(current))
2528                         break;
2529                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2530                         break;
2531                 /*
2532                  * Don't repeat discovery if discovery is disabled. This is
2533                  * done to ensure we can use discovery as a standard ping as
2534                  * well for backwards compatibility with routers which do not
2535                  * have discovery or have discovery disabled
2536                  */
2537                 if (lnet_is_discovery_disabled(lp) && count > 0)
2538                         break;
2539                 if (lp->lp_dc_error)
2540                         break;
2541                 if (lnet_peer_is_uptodate(lp))
2542                         break;
2543                 lnet_peer_queue_for_discovery(lp);
2544                 count++;
2545                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2546
2547                 /*
2548                  * If caller requested a non-blocking operation then
2549                  * return immediately. Once discovery is complete any
2550                  * pending messages that were stopped due to discovery
2551                  * will be transmitted.
2552                  */
2553                 if (!block)
2554                         break;
2555
2556                 lnet_net_unlock(LNET_LOCK_EX);
2557                 schedule();
2558                 finish_wait(&lp->lp_dc_waitq, &wait);
2559                 lnet_net_lock(LNET_LOCK_EX);
2560                 lnet_peer_decref_locked(lp);
2561                 /* Peer may have changed */
2562                 lp = lpni->lpni_peer_net->lpn_peer;
2563         }
2564         finish_wait(&lp->lp_dc_waitq, &wait);
2565
2566         lnet_net_unlock(LNET_LOCK_EX);
2567         lnet_net_lock(cpt);
2568         lnet_peer_decref_locked(lp);
2569         /*
2570          * The peer may have changed, so re-check and rediscover if that turns
2571          * out to have been the case. The reference count on lp ensured that
2572          * even if it was unlinked from lpni the memory could not be recycled.
2573          * Thus the check below is sufficient to determine whether the peer
2574          * changed. If the peer changed, then lp must not be dereferenced.
2575          */
2576         if (lp != lpni->lpni_peer_net->lpn_peer)
2577                 goto again;
2578
2579         if (signal_pending(current))
2580                 rc = -EINTR;
2581         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2582                 rc = -ESHUTDOWN;
2583         else if (lp->lp_dc_error)
2584                 rc = lp->lp_dc_error;
2585         else if (!block)
2586                 CDEBUG(D_NET, "non-blocking discovery\n");
2587         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2588                 goto again;
2589
2590         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2591                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2592                libcfs_nidstr(&lpni->lpni_nid), rc,
2593                (!block) ? "pending discovery" : "discovery complete");
2594
2595         return rc;
2596 }
2597
2598 /* Handle an incoming ack for a push. */
2599 static void
2600 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2601 {
2602         struct lnet_ping_buffer *pbuf;
2603
2604         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2605         spin_lock(&lp->lp_lock);
2606         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2607         lp->lp_push_error = ev->status;
2608         if (ev->status)
2609                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2610         else
2611                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2612         spin_unlock(&lp->lp_lock);
2613
2614         CDEBUG(D_NET, "peer %s ev->status %d\n",
2615                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2616 }
2617
2618 /* Handle a Reply message. This is the reply to a Ping message. */
2619 static void
2620 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2621 {
2622         struct lnet_ping_buffer *pbuf;
2623         int rc;
2624
2625         spin_lock(&lp->lp_lock);
2626
2627         lp->lp_disc_src_nid = ev->target.nid;
2628         lp->lp_disc_dst_nid = ev->source.nid;
2629
2630         /*
2631          * If some kind of error happened the contents of message
2632          * cannot be used. Set PING_FAILED to trigger a retry.
2633          */
2634         if (ev->status) {
2635                 lp->lp_state |= LNET_PEER_PING_FAILED;
2636                 lp->lp_ping_error = ev->status;
2637                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2638                        ev->status,
2639                        libcfs_nid2str(lp->lp_primary_nid),
2640                        libcfs_nid2str(ev->source.nid));
2641                 goto out;
2642         }
2643
2644         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2645         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2646                 lnet_swap_pinginfo(pbuf);
2647
2648         /*
2649          * A reply with invalid or corrupted info. Set PING_FAILED to
2650          * trigger a retry.
2651          */
2652         rc = lnet_ping_info_validate(&pbuf->pb_info);
2653         if (rc) {
2654                 lp->lp_state |= LNET_PEER_PING_FAILED;
2655                 lp->lp_ping_error = 0;
2656                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2657                        libcfs_nid2str(lp->lp_primary_nid), rc);
2658                 goto out;
2659         }
2660
2661         /*
2662          * The peer may have discovery disabled at its end. Set
2663          * NO_DISCOVERY as appropriate.
2664          */
2665         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) ||
2666             lnet_peer_discovery_disabled) {
2667                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2668                        libcfs_nid2str(lp->lp_primary_nid));
2669
2670                 /* Detect whether this peer has toggled discovery from on to
2671                  * off and whether we can delete and re-create the peer. Peers
2672                  * that were manually configured cannot be deleted by discovery.
2673                  * We need to delete this peer and re-create it if the peer was
2674                  * not configured manually, is currently considered DD capable,
2675                  * and either:
2676                  * 1. We've already discovered the peer (the peer has toggled
2677                  *    the discovery feature from on to off), or
2678                  * 2. The peer is considered MR, but it was not user configured
2679                  *    (this was a "temporary" peer created via the kernel APIs
2680                  *     that we're discovering for the first time)
2681                  */
2682                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2683                                       LNET_PEER_NO_DISCOVERY)) &&
2684                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2685                                      LNET_PEER_MULTI_RAIL))) {
2686                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2687                                libcfs_nid2str(lp->lp_primary_nid),
2688                                lp->lp_state);
2689                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2690                 }
2691                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2692         } else {
2693                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2694                        libcfs_nid2str(lp->lp_primary_nid));
2695                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2696         }
2697
2698         /*
2699          * Update the MULTI_RAIL flag based on the reply. If the peer
2700          * was configured with DLC then the setting should match what
2701          * DLC put in.
2702          */
2703         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2704                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2705                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2706                                libcfs_nid2str(lp->lp_primary_nid), lp);
2707                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2708                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2709                               libcfs_nid2str(lp->lp_primary_nid));
2710                 } else if (lnet_peer_discovery_disabled) {
2711                         CDEBUG(D_NET,
2712                                "peer %s(%p) not MR: DD disabled locally\n",
2713                                libcfs_nid2str(lp->lp_primary_nid), lp);
2714                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2715                         CDEBUG(D_NET,
2716                                "peer %s(%p) not MR: DD disabled remotely\n",
2717                                libcfs_nid2str(lp->lp_primary_nid), lp);
2718                 } else {
2719                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2720                                libcfs_nid2str(lp->lp_primary_nid), lp);
2721                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2722                         lnet_peer_clr_non_mr_pref_nids(lp);
2723                 }
2724         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2725                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2726                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2727                               libcfs_nid2str(lp->lp_primary_nid));
2728                 } else {
2729                         CERROR("Multi-Rail state vanished from %s\n",
2730                                libcfs_nid2str(lp->lp_primary_nid));
2731                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2732                 }
2733         }
2734
2735         /*
2736          * Make sure we'll allocate the correct size ping buffer when
2737          * pinging the peer.
2738          */
2739         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2740                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2741
2742         /*
2743          * Check for truncation of the Reply. Clear PING_SENT and set
2744          * PING_FAILED to trigger a retry.
2745          */
2746         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2747                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2748                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2749                 lp->lp_state |= LNET_PEER_PING_FAILED;
2750                 lp->lp_ping_error = 0;
2751                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2752                        libcfs_nid2str(lp->lp_primary_nid),
2753                        pbuf->pb_info.pi_nnis);
2754                 goto out;
2755         }
2756
2757         /*
2758          * Check the sequence numbers in the reply. These are only
2759          * available if the reply came from a Multi-Rail peer.
2760          */
2761         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2762             pbuf->pb_info.pi_nnis > 1 &&
2763             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2764                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2765                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2766                                 libcfs_nid2str(lp->lp_primary_nid),
2767                                 LNET_PING_BUFFER_SEQNO(pbuf),
2768                                 lp->lp_peer_seqno);
2769
2770                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2771         }
2772
2773         /* We're happy with the state of the data in the buffer. */
2774         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2775                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2776         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2777                 lnet_ping_buffer_decref(lp->lp_data);
2778         else
2779                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2780         lnet_ping_buffer_addref(pbuf);
2781         lp->lp_data = pbuf;
2782 out:
2783         lp->lp_state &= ~LNET_PEER_PING_SENT;
2784         spin_unlock(&lp->lp_lock);
2785
2786         lnet_net_lock(LNET_LOCK_EX);
2787         /*
2788          * If this peer is a gateway, call the routing callback to
2789          * handle the ping reply
2790          */
2791         if (lp->lp_rtr_refcount > 0)
2792                 lnet_router_discovery_ping_reply(lp);
2793         lnet_net_unlock(LNET_LOCK_EX);
2794 }
2795
2796 /*
2797  * Send event handling. Only matters for error cases, where we clean
2798  * up state on the peer and peer_ni that would otherwise be updated in
2799  * the REPLY event handler for a successful Ping, and the ACK event
2800  * handler for a successful Push.
2801  */
2802 static int
2803 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2804 {
2805         int rc = 0;
2806
2807         if (!ev->status)
2808                 goto out;
2809
2810         spin_lock(&lp->lp_lock);
2811         if (ev->msg_type == LNET_MSG_GET) {
2812                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2813                 lp->lp_state |= LNET_PEER_PING_FAILED;
2814                 lp->lp_ping_error = ev->status;
2815         } else { /* ev->msg_type == LNET_MSG_PUT */
2816                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2817                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2818                 lp->lp_push_error = ev->status;
2819         }
2820         spin_unlock(&lp->lp_lock);
2821         rc = LNET_REDISCOVER_PEER;
2822 out:
2823         CDEBUG(D_NET, "%s Send to %s: %d\n",
2824                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2825                 libcfs_nid2str(ev->target.nid), rc);
2826         return rc;
2827 }
2828
2829 /*
2830  * Unlink event handling. This event is only seen if a call to
2831  * LNetMDUnlink() caused the event to be unlinked. If this call was
2832  * made after the event was set up in LNetGet() or LNetPut() then we
2833  * assume the Ping or Push timed out.
2834  */
2835 static void
2836 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2837 {
2838         spin_lock(&lp->lp_lock);
2839         /* We've passed through LNetGet() */
2840         if (lp->lp_state & LNET_PEER_PING_SENT) {
2841                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2842                 lp->lp_state |= LNET_PEER_PING_FAILED;
2843                 lp->lp_ping_error = -ETIMEDOUT;
2844                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2845                         libcfs_nid2str(lp->lp_primary_nid));
2846         }
2847         /* We've passed through LNetPut() */
2848         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2849                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2850                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2851                 lp->lp_push_error = -ETIMEDOUT;
2852                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2853                         libcfs_nid2str(lp->lp_primary_nid));
2854         }
2855         spin_unlock(&lp->lp_lock);
2856 }
2857
2858 /*
2859  * Event handler for the discovery EQ.
2860  *
2861  * Called with lnet_res_lock(cpt) held. The cpt is the
2862  * lnet_cpt_of_cookie() of the md handle cookie.
2863  */
2864 static void lnet_discovery_event_handler(struct lnet_event *event)
2865 {
2866         struct lnet_peer *lp = event->md_user_ptr;
2867         struct lnet_ping_buffer *pbuf;
2868         int rc;
2869
2870         /* discovery needs to take another look */
2871         rc = LNET_REDISCOVER_PEER;
2872
2873         CDEBUG(D_NET, "Received event: %d\n", event->type);
2874
2875         switch (event->type) {
2876         case LNET_EVENT_ACK:
2877                 lnet_discovery_event_ack(lp, event);
2878                 break;
2879         case LNET_EVENT_REPLY:
2880                 lnet_discovery_event_reply(lp, event);
2881                 break;
2882         case LNET_EVENT_SEND:
2883                 /* Only send failure triggers a retry. */
2884                 rc = lnet_discovery_event_send(lp, event);
2885                 break;
2886         case LNET_EVENT_UNLINK:
2887                 /* LNetMDUnlink() was called */
2888                 lnet_discovery_event_unlink(lp, event);
2889                 break;
2890         default:
2891                 /* Invalid events. */
2892                 LBUG();
2893         }
2894         lnet_net_lock(LNET_LOCK_EX);
2895         if (event->unlinked) {
2896                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
2897                 lnet_ping_buffer_decref(pbuf);
2898                 lnet_peer_decref_locked(lp);
2899         }
2900
2901         /* put peer back at end of request queue, if discovery not already
2902          * done */
2903         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2904             lnet_peer_queue_for_discovery(lp)) {
2905                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2906                 wake_up(&the_lnet.ln_dc_waitq);
2907         }
2908         lnet_net_unlock(LNET_LOCK_EX);
2909 }
2910
2911 /*
2912  * Build a peer from incoming data.
2913  *
2914  * The NIDs in the incoming data are supposed to be structured as follows:
2915  *  - loopback
2916  *  - primary NID
2917  *  - other NIDs in same net
2918  *  - NIDs in second net
2919  *  - NIDs in third net
2920  *  - ...
2921  * This due to the way the list of NIDs in the data is created.
2922  *
2923  * Note that this function will mark the peer uptodate unless an
2924  * ENOMEM is encontered. All other errors are due to a conflict
2925  * between the DLC configuration and what discovery sees. We treat DLC
2926  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2927  * peer from becoming stuck in discovery.
2928  */
2929 static int lnet_peer_merge_data(struct lnet_peer *lp,
2930                                 struct lnet_ping_buffer *pbuf)
2931 {
2932         struct lnet_peer_net *lpn;
2933         struct lnet_peer_ni *lpni;
2934         lnet_nid_t *curnis = NULL;
2935         struct lnet_ni_status *addnis = NULL;
2936         lnet_nid_t *delnis = NULL;
2937         unsigned flags;
2938         int ncurnis;
2939         int naddnis;
2940         int ndelnis;
2941         int nnis = 0;
2942         int i;
2943         int j;
2944         int rc;
2945
2946         flags = LNET_PEER_DISCOVERED;
2947         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2948                 flags |= LNET_PEER_MULTI_RAIL;
2949
2950         /*
2951          * Cache the routing feature for the peer; whether it is enabled
2952          * for disabled as reported by the remote peer.
2953          */
2954         spin_lock(&lp->lp_lock);
2955         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2956                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2957         else
2958                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2959         spin_unlock(&lp->lp_lock);
2960
2961         nnis = max_t(int, lp->lp_nnis, pbuf->pb_info.pi_nnis);
2962         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
2963         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
2964         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
2965         if (!curnis || !addnis || !delnis) {
2966                 rc = -ENOMEM;
2967                 goto out;
2968         }
2969         ncurnis = 0;
2970         naddnis = 0;
2971         ndelnis = 0;
2972
2973         /* Construct the list of NIDs present in peer. */
2974         lpni = NULL;
2975         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2976                 curnis[ncurnis++] = lnet_nid_to_nid4(&lpni->lpni_nid);
2977
2978         /*
2979          * Check for NIDs in pbuf not present in curnis[].
2980          * The loop starts at 1 to skip the loopback NID.
2981          */
2982         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2983                 for (j = 0; j < ncurnis; j++)
2984                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2985                                 break;
2986                 if (j == ncurnis)
2987                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2988         }
2989         /*
2990          * Check for NIDs in curnis[] not present in pbuf.
2991          * The nested loop starts at 1 to skip the loopback NID.
2992          *
2993          * But never add the loopback NID to delnis[]: if it is
2994          * present in curnis[] then this peer is for this node.
2995          */
2996         for (i = 0; i < ncurnis; i++) {
2997                 if (curnis[i] == LNET_NID_LO_0)
2998                         continue;
2999                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
3000                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
3001                                 /*
3002                                  * update the information we cache for the
3003                                  * peer with the latest information we
3004                                  * received
3005                                  */
3006                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
3007                                 if (lpni) {
3008                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
3009                                         lnet_peer_ni_decref_locked(lpni);
3010                                 }
3011                                 break;
3012                         }
3013                 }
3014                 if (j == pbuf->pb_info.pi_nnis)
3015                         delnis[ndelnis++] = curnis[i];
3016         }
3017
3018         /*
3019          * If we get here and the discovery is disabled then we don't want
3020          * to add or delete any NIs. We just updated the ones we have some
3021          * information on, and call it a day
3022          */
3023         rc = 0;
3024         if (lnet_is_discovery_disabled(lp))
3025                 goto out;
3026
3027         for (i = 0; i < naddnis; i++) {
3028                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
3029                 if (rc) {
3030                         CERROR("Error adding NID %s to peer %s: %d\n",
3031                                libcfs_nid2str(addnis[i].ns_nid),
3032                                libcfs_nid2str(lp->lp_primary_nid), rc);
3033                         if (rc == -ENOMEM)
3034                                 goto out;
3035                 }
3036                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
3037                 if (lpni) {
3038                         lpni->lpni_ns_status = addnis[i].ns_status;
3039                         lnet_peer_ni_decref_locked(lpni);
3040                 }
3041         }
3042
3043         for (i = 0; i < ndelnis; i++) {
3044                 /*
3045                  * for routers it's okay to delete the primary_nid because
3046                  * the upper layers don't really rely on it. So if we're
3047                  * being told that the router changed its primary_nid
3048                  * then it's okay to delete it.
3049                  */
3050                 if (lp->lp_rtr_refcount > 0)
3051                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3052                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
3053                 if (rc) {
3054                         CERROR("Error deleting NID %s from peer %s: %d\n",
3055                                libcfs_nid2str(delnis[i]),
3056                                libcfs_nid2str(lp->lp_primary_nid), rc);
3057                         if (rc == -ENOMEM)
3058                                 goto out;
3059                 }
3060         }
3061
3062         /* The peer net for the primary NID should be the first entry in the
3063          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3064          * be the first entry in its peer net's lpn_peer_nis list.
3065          */
3066         lpni = lnet_find_peer_ni_locked(pbuf->pb_info.pi_ni[1].ns_nid);
3067         if (!lpni) {
3068                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3069                        libcfs_nid2str(pbuf->pb_info.pi_ni[1].ns_nid));
3070                 goto out;
3071         }
3072
3073         lnet_peer_ni_decref_locked(lpni);
3074
3075         lpn = lpni->lpni_peer_net;
3076         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3077                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3078
3079         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3080                 list_move(&lpni->lpni_peer_nis,
3081                           &lpni->lpni_peer_net->lpn_peer_nis);
3082
3083         /*
3084          * Errors other than -ENOMEM are due to peers having been
3085          * configured with DLC. Ignore these because DLC overrides
3086          * Discovery.
3087          */
3088         rc = 0;
3089 out:
3090         CFS_FREE_PTR_ARRAY(curnis, nnis);
3091         CFS_FREE_PTR_ARRAY(addnis, nnis);
3092         CFS_FREE_PTR_ARRAY(delnis, nnis);
3093         lnet_ping_buffer_decref(pbuf);
3094         CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3095
3096         if (rc) {
3097                 spin_lock(&lp->lp_lock);
3098                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3099                 lp->lp_state |= LNET_PEER_FORCE_PING;
3100                 spin_unlock(&lp->lp_lock);
3101         }
3102         return rc;
3103 }
3104
3105 /*
3106  * The data in pbuf says lp is its primary peer, but the data was
3107  * received by a different peer. Try to update lp with the data.
3108  */
3109 static int
3110 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3111 {
3112         struct lnet_handle_md mdh;
3113
3114         /* Queue lp for discovery, and force it on the request queue. */
3115         lnet_net_lock(LNET_LOCK_EX);
3116         if (lnet_peer_queue_for_discovery(lp))
3117                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3118         lnet_net_unlock(LNET_LOCK_EX);
3119
3120         LNetInvalidateMDHandle(&mdh);
3121
3122         /*
3123          * Decide whether we can move the peer to the DATA_PRESENT state.
3124          *
3125          * We replace stale data for a multi-rail peer, repair PING_FAILED
3126          * status, and preempt FORCE_PING.
3127          *
3128          * If after that we have DATA_PRESENT, we merge it into this peer.
3129          */
3130         spin_lock(&lp->lp_lock);
3131         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3132                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3133                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3134                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3135                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3136                         lnet_ping_buffer_decref(pbuf);
3137                         pbuf = lp->lp_data;
3138                         lp->lp_data = NULL;
3139                 }
3140         }
3141         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3142                 lnet_ping_buffer_decref(lp->lp_data);
3143                 lp->lp_data = NULL;
3144                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3145         }
3146         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3147                 mdh = lp->lp_ping_mdh;
3148                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3149                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3150                 lp->lp_ping_error = 0;
3151         }
3152         if (lp->lp_state & LNET_PEER_FORCE_PING)
3153                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3154         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3155         spin_unlock(&lp->lp_lock);
3156
3157         if (!LNetMDHandleIsInvalid(mdh))
3158                 LNetMDUnlink(mdh);
3159
3160         if (pbuf)
3161                 return lnet_peer_merge_data(lp, pbuf);
3162
3163         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3164         return 0;
3165 }
3166
3167 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
3168 {
3169         int i;
3170
3171         for (i = 0; i < pinfo->pi_nnis; i++) {
3172                 if (pinfo->pi_ni[i].ns_nid == nid)
3173                         return true;
3174         }
3175
3176         return false;
3177 }
3178
3179 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3180  * to the discovery queue a reference was taken that will prevent the peer from
3181  * actually being freed by this function. After this function exits the
3182  * discovery thread should call lnet_peer_discovery_complete() which will
3183  * drop that reference as well as wake any waiters that may also be holding a
3184  * ref on the peer
3185  */
3186 static int lnet_peer_deletion(struct lnet_peer *lp)
3187 __must_hold(&lp->lp_lock)
3188 {
3189         struct list_head rlist;
3190         struct lnet_route *route, *tmp;
3191         int sensitivity = lp->lp_health_sensitivity;
3192
3193         INIT_LIST_HEAD(&rlist);
3194
3195         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3196                           LNET_PEER_FORCE_PUSH);
3197         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3198                libcfs_nid2str(lp->lp_primary_nid), lp, lp->lp_state);
3199
3200         /* no-op if lnet_peer_del() has already been called on this peer */
3201         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3202                 return 0;
3203
3204         if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
3205                 return -ESHUTDOWN;
3206
3207         spin_unlock(&lp->lp_lock);
3208
3209         mutex_lock(&the_lnet.ln_api_mutex);
3210
3211         lnet_net_lock(LNET_LOCK_EX);
3212         /* remove the peer from the discovery work
3213          * queue if it's on there in preparation
3214          * of deleting it.
3215          */
3216         if (!list_empty(&lp->lp_dc_list))
3217                 list_del_init(&lp->lp_dc_list);
3218         list_for_each_entry_safe(route, tmp,
3219                                  &lp->lp_routes,
3220                                  lr_gwlist)
3221                 lnet_move_route(route, NULL, &rlist);
3222         lnet_net_unlock(LNET_LOCK_EX);
3223
3224         /* lnet_peer_del() deletes all the peer NIs owned by this peer */
3225         lnet_peer_del(lp);
3226
3227         list_for_each_entry_safe(route, tmp,
3228                                  &rlist, lr_list) {
3229                 /* re-add these routes */
3230                 lnet_add_route(route->lr_net,
3231                                route->lr_hops,
3232                                route->lr_nid,
3233                                route->lr_priority,
3234                                sensitivity);
3235                 LIBCFS_FREE(route, sizeof(*route));
3236         }
3237
3238         mutex_unlock(&the_lnet.ln_api_mutex);
3239
3240         spin_lock(&lp->lp_lock);
3241
3242         return 0;
3243 }
3244
3245 /*
3246  * Update a peer using the data received.
3247  */
3248 static int lnet_peer_data_present(struct lnet_peer *lp)
3249 __must_hold(&lp->lp_lock)
3250 {
3251         struct lnet_ping_buffer *pbuf;
3252         struct lnet_peer_ni *lpni;
3253         lnet_nid_t nid = LNET_NID_ANY;
3254         unsigned flags;
3255         int rc = 0;
3256
3257         pbuf = lp->lp_data;
3258         lp->lp_data = NULL;
3259         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3260         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3261         spin_unlock(&lp->lp_lock);
3262
3263         /*
3264          * Modifications of peer structures are done while holding the
3265          * ln_api_mutex. A global lock is required because we may be
3266          * modifying multiple peer structures, and a mutex greatly
3267          * simplifies memory management.
3268          *
3269          * The actual changes to the data structures must also protect
3270          * against concurrent lookups, for which the lnet_net_lock in
3271          * LNET_LOCK_EX mode is used.
3272          */
3273         mutex_lock(&the_lnet.ln_api_mutex);
3274         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3275                 rc = -ESHUTDOWN;
3276                 goto out;
3277         }
3278
3279         /*
3280          * If this peer is not on the peer list then it is being torn
3281          * down, and our reference count may be all that is keeping it
3282          * alive. Don't do any work on it.
3283          */
3284         if (list_empty(&lp->lp_peer_list))
3285                 goto out;
3286
3287         flags = LNET_PEER_DISCOVERED;
3288         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3289                 flags |= LNET_PEER_MULTI_RAIL;
3290
3291         /*
3292          * Check whether the primary NID in the message matches the
3293          * primary NID of the peer. If it does, update the peer, if
3294          * it it does not, check whether there is already a peer with
3295          * that primary NID. If no such peer exists, try to update
3296          * the primary NID of the current peer (allowed if it was
3297          * created due to message traffic) and complete the update.
3298          * If the peer did exist, hand off the data to it.
3299          *
3300          * The peer for the loopback interface is a special case: this
3301          * is the peer for the local node, and we want to set its
3302          * primary NID to the correct value here. Moreover, this peer
3303          * can show up with only the loopback NID in the ping buffer.
3304          */
3305         if (pbuf->pb_info.pi_nnis <= 1)
3306                 goto out;
3307         nid = pbuf->pb_info.pi_ni[1].ns_nid;
3308         if (lp->lp_primary_nid == LNET_NID_LO_0) {
3309                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
3310                 if (!rc)
3311                         rc = lnet_peer_merge_data(lp, pbuf);
3312         /*
3313          * if the primary nid of the peer is present in the ping info returned
3314          * from the peer, but it's not the local primary peer we have
3315          * cached and discovery is disabled, then we don't want to update
3316          * our local peer info, by adding or removing NIDs, we just want
3317          * to update the status of the nids that we currently have
3318          * recorded in that peer.
3319          */
3320         } else if (lp->lp_primary_nid == nid ||
3321                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
3322                     lnet_is_discovery_disabled(lp))) {
3323                 rc = lnet_peer_merge_data(lp, pbuf);
3324         } else {
3325                 lpni = lnet_find_peer_ni_locked(nid);
3326                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3327                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
3328                         if (rc) {
3329                                 CERROR("Primary NID error %s versus %s: %d\n",
3330                                        libcfs_nid2str(lp->lp_primary_nid),
3331                                        libcfs_nid2str(nid), rc);
3332                         } else {
3333                                 rc = lnet_peer_merge_data(lp, pbuf);
3334                         }
3335                         if (lpni)
3336                                 lnet_peer_ni_decref_locked(lpni);
3337                 } else {
3338                         struct lnet_peer *new_lp;
3339                         new_lp = lpni->lpni_peer_net->lpn_peer;
3340                         /*
3341                          * if lp has discovery/MR enabled that means new_lp
3342                          * should have discovery/MR enabled as well, since
3343                          * it's the same peer, which we're about to merge
3344                          */
3345                         spin_lock(&lp->lp_lock);
3346                         spin_lock(&new_lp->lp_lock);
3347                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3348                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3349                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3350                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3351                         /* If we're processing a ping reply then we may be
3352                          * about to send a push to the peer that we ping'd.
3353                          * Since the ping reply that we're processing was
3354                          * received by lp, we need to set the discovery source
3355                          * NID for new_lp to the NID stored in lp.
3356                          */
3357                         if (lp->lp_disc_src_nid != LNET_NID_ANY) {
3358                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3359                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3360                         }
3361                         spin_unlock(&new_lp->lp_lock);
3362                         spin_unlock(&lp->lp_lock);
3363
3364                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3365                         lnet_consolidate_routes_locked(lp, new_lp);
3366                         lnet_peer_ni_decref_locked(lpni);
3367                 }
3368         }
3369 out:
3370         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
3371                lp->lp_state);
3372         mutex_unlock(&the_lnet.ln_api_mutex);
3373
3374         spin_lock(&lp->lp_lock);
3375         /* Tell discovery to re-check the peer immediately. */
3376         if (!rc)
3377                 rc = LNET_REDISCOVER_PEER;
3378         return rc;
3379 }
3380
3381 /*
3382  * A ping failed. Clear the PING_FAILED state and set the
3383  * FORCE_PING state, to ensure a retry even if discovery is
3384  * disabled. This avoids being left with incorrect state.
3385  */
3386 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3387 __must_hold(&lp->lp_lock)
3388 {
3389         struct lnet_handle_md mdh;
3390         int rc;
3391
3392         mdh = lp->lp_ping_mdh;
3393         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3394         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3395         lp->lp_state |= LNET_PEER_FORCE_PING;
3396         rc = lp->lp_ping_error;
3397         lp->lp_ping_error = 0;
3398         spin_unlock(&lp->lp_lock);
3399
3400         if (!LNetMDHandleIsInvalid(mdh))
3401                 LNetMDUnlink(mdh);
3402
3403         CDEBUG(D_NET, "peer %s:%d\n",
3404                libcfs_nid2str(lp->lp_primary_nid), rc);
3405
3406         spin_lock(&lp->lp_lock);
3407         return rc ? rc : LNET_REDISCOVER_PEER;
3408 }
3409
3410 /* Active side of ping. */
3411 static int lnet_peer_send_ping(struct lnet_peer *lp)
3412 __must_hold(&lp->lp_lock)
3413 {
3414         int nnis;
3415         int rc;
3416         int cpt;
3417
3418         lp->lp_state |= LNET_PEER_PING_SENT;
3419         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3420         spin_unlock(&lp->lp_lock);
3421
3422         cpt = lnet_net_lock_current();
3423         /* Refcount for MD. */
3424         lnet_peer_addref_locked(lp);
3425         lnet_net_unlock(cpt);
3426
3427         nnis = max(lp->lp_data_nnis, LNET_INTERFACES_MIN);
3428
3429         rc = lnet_send_ping(lp->lp_primary_nid, &lp->lp_ping_mdh, nnis, lp,
3430                             the_lnet.ln_dc_handler, false);
3431
3432         /*
3433          * if LNetMDBind in lnet_send_ping fails we need to decrement the
3434          * refcount on the peer, otherwise LNetMDUnlink will be called
3435          * which will eventually do that.
3436          */
3437         if (rc > 0) {
3438                 lnet_net_lock(cpt);
3439                 lnet_peer_decref_locked(lp);
3440                 lnet_net_unlock(cpt);
3441                 rc = -rc; /* change the rc to negative value */
3442                 goto fail_error;
3443         } else if (rc < 0) {
3444                 goto fail_error;
3445         }
3446
3447         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3448
3449         spin_lock(&lp->lp_lock);
3450         return 0;
3451
3452 fail_error:
3453         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3454         /*
3455          * The errors that get us here are considered hard errors and
3456          * cause Discovery to terminate. So we clear PING_SENT, but do
3457          * not set either PING_FAILED or FORCE_PING. In fact we need
3458          * to clear PING_FAILED, because the unlink event handler will
3459          * have set it if we called LNetMDUnlink() above.
3460          */
3461         spin_lock(&lp->lp_lock);
3462         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3463         return rc;
3464 }
3465
3466 /*
3467  * This function exists because you cannot call LNetMDUnlink() from an
3468  * event handler.
3469  */
3470 static int lnet_peer_push_failed(struct lnet_peer *lp)
3471 __must_hold(&lp->lp_lock)
3472 {
3473         struct lnet_handle_md mdh;
3474         int rc;
3475
3476         mdh = lp->lp_push_mdh;
3477         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3478         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3479         rc = lp->lp_push_error;
3480         lp->lp_push_error = 0;
3481         spin_unlock(&lp->lp_lock);
3482
3483         if (!LNetMDHandleIsInvalid(mdh))
3484                 LNetMDUnlink(mdh);
3485
3486         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3487         spin_lock(&lp->lp_lock);
3488         return rc ? rc : LNET_REDISCOVER_PEER;
3489 }
3490
3491 /*
3492  * Mark the peer as discovered.
3493  */
3494 static int lnet_peer_discovered(struct lnet_peer *lp)
3495 __must_hold(&lp->lp_lock)
3496 {
3497         lp->lp_state |= LNET_PEER_DISCOVERED;
3498         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3499                           LNET_PEER_REDISCOVER);
3500
3501         lp->lp_dc_error = 0;
3502
3503         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3504
3505         return 0;
3506 }
3507
3508 /* Active side of push. */
3509 static int lnet_peer_send_push(struct lnet_peer *lp)
3510 __must_hold(&lp->lp_lock)
3511 {
3512         struct lnet_ping_buffer *pbuf;
3513         struct lnet_process_id id;
3514         struct lnet_md md;
3515         int cpt;
3516         int rc;
3517
3518         /* Don't push to a non-multi-rail peer. */
3519         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3520                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3521                 /* if peer's NIDs are uptodate then peer is discovered */
3522                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3523                         rc = lnet_peer_discovered(lp);
3524                         return rc;
3525                 }
3526
3527                 return 0;
3528         }
3529
3530         lp->lp_state |= LNET_PEER_PUSH_SENT;
3531         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3532         spin_unlock(&lp->lp_lock);
3533
3534         cpt = lnet_net_lock_current();
3535         pbuf = the_lnet.ln_ping_target;
3536         lnet_ping_buffer_addref(pbuf);
3537         lnet_net_unlock(cpt);
3538
3539         /* Push source MD */
3540         md.start     = &pbuf->pb_info;
3541         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3542         md.threshold = 2; /* Put/Ack */
3543         md.max_size  = 0;
3544         md.options   = LNET_MD_TRACK_RESPONSE;
3545         md.handler   = the_lnet.ln_dc_handler;
3546         md.user_ptr  = lp;
3547
3548         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3549         if (rc) {
3550                 lnet_ping_buffer_decref(pbuf);
3551                 CERROR("Can't bind push source MD: %d\n", rc);
3552                 goto fail_error;
3553         }
3554
3555         cpt = lnet_net_lock_current();
3556         /* Refcount for MD. */
3557         lnet_peer_addref_locked(lp);
3558         id.pid = LNET_PID_LUSTRE;
3559         if (lp->lp_disc_dst_nid != LNET_NID_ANY)
3560                 id.nid = lp->lp_disc_dst_nid;
3561         else
3562                 id.nid = lp->lp_primary_nid;
3563         lnet_net_unlock(cpt);
3564
3565         rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
3566                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3567                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3568
3569         /*
3570          * reset the discovery nid. There is no need to restrict sending
3571          * from that source, if we call lnet_push_update_to_peers(). It'll
3572          * get set to a specific NID, if we initiate discovery from the
3573          * scratch
3574          */
3575         lp->lp_disc_src_nid = LNET_NID_ANY;
3576         lp->lp_disc_dst_nid = LNET_NID_ANY;
3577
3578         if (rc)
3579                 goto fail_unlink;
3580
3581         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3582
3583         spin_lock(&lp->lp_lock);
3584         return 0;
3585
3586 fail_unlink:
3587         LNetMDUnlink(lp->lp_push_mdh);
3588         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3589 fail_error:
3590         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3591         /*
3592          * The errors that get us here are considered hard errors and
3593          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3594          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3595          * because the unlink event handler will have set it if we
3596          * called LNetMDUnlink() above.
3597          */
3598         spin_lock(&lp->lp_lock);
3599         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3600         return rc;
3601 }
3602
3603 /*
3604  * An unrecoverable error was encountered during discovery.
3605  * Set error status in peer and abort discovery.
3606  */
3607 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3608 {
3609         CDEBUG(D_NET, "Discovery error %s: %d\n",
3610                libcfs_nid2str(lp->lp_primary_nid), error);
3611
3612         spin_lock(&lp->lp_lock);
3613         lp->lp_dc_error = error;
3614         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3615         lp->lp_state |= LNET_PEER_REDISCOVER;
3616         spin_unlock(&lp->lp_lock);
3617 }
3618
3619 /*
3620  * Wait for work to be queued or some other change that must be
3621  * attended to. Returns non-zero if the discovery thread should shut
3622  * down.
3623  */
3624 static int lnet_peer_discovery_wait_for_work(void)
3625 {
3626         int cpt;
3627         int rc = 0;
3628
3629         DEFINE_WAIT(wait);
3630
3631         cpt = lnet_net_lock_current();
3632         for (;;) {
3633                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3634                                 TASK_INTERRUPTIBLE);
3635                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3636                         break;
3637                 if (lnet_push_target_resize_needed() ||
3638                     the_lnet.ln_push_target->pb_needs_post)
3639                         break;
3640                 if (!list_empty(&the_lnet.ln_dc_request))
3641                         break;
3642                 if (!list_empty(&the_lnet.ln_msg_resend))
3643                         break;
3644                 lnet_net_unlock(cpt);
3645
3646                 /*
3647                  * wakeup max every second to check if there are peers that
3648                  * have been stuck on the working queue for greater than
3649                  * the peer timeout.
3650                  */
3651                 schedule_timeout(cfs_time_seconds(1));
3652                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3653                 cpt = lnet_net_lock_current();
3654         }
3655         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3656
3657         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3658                 rc = -ESHUTDOWN;
3659
3660         lnet_net_unlock(cpt);
3661
3662         CDEBUG(D_NET, "woken: %d\n", rc);
3663
3664         return rc;
3665 }
3666
3667 /*
3668  * Messages that were pending on a destroyed peer will be put on a global
3669  * resend list. The message resend list will be checked by
3670  * the discovery thread when it wakes up, and will resend messages. These
3671  * messages can still be sendable in the case the lpni which was the initial
3672  * cause of the message re-queue was transfered to another peer.
3673  *
3674  * It is possible that LNet could be shutdown while we're iterating
3675  * through the list. lnet_shudown_lndnets() will attempt to access the
3676  * resend list, but will have to wait until the spinlock is released, by
3677  * which time there shouldn't be any more messages on the resend list.
3678  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3679  * for the messages so they can be released. The other case is that
3680  * lnet_shudown_lndnets() can finalize all the messages before this
3681  * function can visit the resend list, in which case this function will be
3682  * a no-op.
3683  */
3684 static void lnet_resend_msgs(void)
3685 {
3686         struct lnet_msg *msg, *tmp;
3687         LIST_HEAD(resend);
3688         int rc;
3689
3690         spin_lock(&the_lnet.ln_msg_resend_lock);
3691         list_splice(&the_lnet.ln_msg_resend, &resend);
3692         spin_unlock(&the_lnet.ln_msg_resend_lock);
3693
3694         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3695                 list_del_init(&msg->msg_list);
3696                 rc = lnet_send(msg->msg_src_nid_param, msg,
3697                                msg->msg_rtr_nid_param);
3698                 if (rc < 0) {
3699                         CNETERR("Error sending %s to %s: %d\n",
3700                                lnet_msgtyp2str(msg->msg_type),
3701                                libcfs_id2str(msg->msg_target), rc);
3702                         lnet_finalize(msg, rc);
3703                 }
3704         }
3705 }
3706
3707 /* The discovery thread. */
3708 static int lnet_peer_discovery(void *arg)
3709 {
3710         struct lnet_peer *lp;
3711         int rc;
3712
3713         wait_for_completion(&the_lnet.ln_started);
3714
3715         CDEBUG(D_NET, "started\n");
3716
3717         for (;;) {
3718                 if (lnet_peer_discovery_wait_for_work())
3719                         break;
3720
3721                 if (lnet_push_target_resize_needed())
3722                         lnet_push_target_resize();
3723                 else if (the_lnet.ln_push_target->pb_needs_post)
3724                         lnet_push_target_post(the_lnet.ln_push_target,
3725                                               &the_lnet.ln_push_target_md);
3726
3727                 lnet_resend_msgs();
3728
3729                 lnet_net_lock(LNET_LOCK_EX);
3730                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
3731                         lnet_net_unlock(LNET_LOCK_EX);
3732                         break;
3733                 }
3734
3735                 /*
3736                  * Process all incoming discovery work requests.  When
3737                  * discovery must wait on a peer to change state, it
3738                  * is added to the tail of the ln_dc_working queue. A
3739                  * timestamp keeps track of when the peer was added,
3740                  * so we can time out discovery requests that take too
3741                  * long.
3742                  */
3743                 while (!list_empty(&the_lnet.ln_dc_request)) {
3744                         lp = list_first_entry(&the_lnet.ln_dc_request,
3745                                               struct lnet_peer, lp_dc_list);
3746                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3747                         /*
3748                          * set the time the peer was put on the dc_working
3749                          * queue. It shouldn't remain on the queue
3750                          * forever, in case the GET message (for ping)
3751                          * doesn't get a REPLY or the PUT message (for
3752                          * push) doesn't get an ACK.
3753                          */
3754                         lp->lp_last_queued = ktime_get_real_seconds();
3755                         lnet_net_unlock(LNET_LOCK_EX);
3756
3757                         if (lnet_push_target_resize_needed())
3758                                 lnet_push_target_resize();
3759                         else if (the_lnet.ln_push_target->pb_needs_post)
3760                                 lnet_push_target_post(the_lnet.ln_push_target,
3761                                                       &the_lnet.ln_push_target_md);
3762
3763                         /*
3764                          * Select an action depending on the state of
3765                          * the peer and whether discovery is disabled.
3766                          * The check whether discovery is disabled is
3767                          * done after the code that handles processing
3768                          * for arrived data, cleanup for failures, and
3769                          * forcing a Ping or Push.
3770                          */
3771                         spin_lock(&lp->lp_lock);
3772                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3773                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3774                                 lp->lp_state);
3775                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
3776                                             LNET_PEER_MARK_DELETED))
3777                                 rc = lnet_peer_deletion(lp);
3778                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3779                                 rc = lnet_peer_data_present(lp);
3780                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3781                                 rc = lnet_peer_ping_failed(lp);
3782                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3783                                 rc = lnet_peer_push_failed(lp);
3784                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3785                                 rc = lnet_peer_send_ping(lp);
3786                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3787                                 rc = lnet_peer_send_push(lp);
3788                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3789                                 rc = lnet_peer_send_ping(lp);
3790                         else if (lnet_peer_needs_push(lp))
3791                                 rc = lnet_peer_send_push(lp);
3792                         else
3793                                 rc = lnet_peer_discovered(lp);
3794                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3795                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3796                                 lp->lp_state, rc);
3797                         spin_unlock(&lp->lp_lock);
3798
3799                         lnet_net_lock(LNET_LOCK_EX);
3800                         if (rc == LNET_REDISCOVER_PEER) {
3801                                 list_move(&lp->lp_dc_list,
3802                                           &the_lnet.ln_dc_request);
3803                         } else if (rc) {
3804                                 lnet_peer_discovery_error(lp, rc);
3805                         }
3806                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3807                                 lnet_peer_discovery_complete(lp);
3808                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3809                                 break;
3810
3811                 }
3812
3813                 lnet_net_unlock(LNET_LOCK_EX);
3814         }
3815
3816         CDEBUG(D_NET, "stopping\n");
3817         /*
3818          * Clean up before telling lnet_peer_discovery_stop() that
3819          * we're done. Use wake_up() below to somewhat reduce the
3820          * size of the thundering herd if there are multiple threads
3821          * waiting on discovery of a single peer.
3822          */
3823
3824         /* Queue cleanup 1: stop all pending pings and pushes. */
3825         lnet_net_lock(LNET_LOCK_EX);
3826         while (!list_empty(&the_lnet.ln_dc_working)) {
3827                 lp = list_first_entry(&the_lnet.ln_dc_working,
3828                                       struct lnet_peer, lp_dc_list);
3829                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3830                 lnet_net_unlock(LNET_LOCK_EX);
3831                 lnet_peer_cancel_discovery(lp);
3832                 lnet_net_lock(LNET_LOCK_EX);
3833         }
3834         lnet_net_unlock(LNET_LOCK_EX);
3835
3836         /* Queue cleanup 2: wait for the expired queue to clear. */
3837         while (!list_empty(&the_lnet.ln_dc_expired))
3838                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
3839
3840         /* Queue cleanup 3: clear the request queue. */
3841         lnet_net_lock(LNET_LOCK_EX);
3842         while (!list_empty(&the_lnet.ln_dc_request)) {
3843                 lp = list_first_entry(&the_lnet.ln_dc_request,
3844                                       struct lnet_peer, lp_dc_list);
3845                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3846                 lnet_peer_discovery_complete(lp);
3847         }
3848         lnet_net_unlock(LNET_LOCK_EX);
3849
3850         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
3851         the_lnet.ln_dc_handler = NULL;
3852
3853         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3854         wake_up(&the_lnet.ln_dc_waitq);
3855
3856         CDEBUG(D_NET, "stopped\n");
3857
3858         return 0;
3859 }
3860
3861 /* ln_api_mutex is held on entry. */
3862 int lnet_peer_discovery_start(void)
3863 {
3864         struct task_struct *task;
3865         int rc = 0;
3866
3867         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3868                 return -EALREADY;
3869
3870         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
3871         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3872         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3873         if (IS_ERR(task)) {
3874                 rc = PTR_ERR(task);
3875                 CERROR("Can't start peer discovery thread: %d\n", rc);
3876
3877                 the_lnet.ln_dc_handler = NULL;
3878
3879                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3880         }
3881
3882         CDEBUG(D_NET, "discovery start: %d\n", rc);
3883
3884         return rc;
3885 }
3886
3887 /* ln_api_mutex is held on entry. */
3888 void lnet_peer_discovery_stop(void)
3889 {
3890         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3891                 return;
3892
3893         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3894         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3895
3896         /* In the LNetNIInit() path we may be stopping discovery before it
3897          * entered its work loop
3898          */
3899         if (!completion_done(&the_lnet.ln_started))
3900                 complete(&the_lnet.ln_started);
3901         else
3902                 wake_up(&the_lnet.ln_dc_waitq);
3903
3904         wait_event(the_lnet.ln_dc_waitq,
3905                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3906
3907         LASSERT(list_empty(&the_lnet.ln_dc_request));
3908         LASSERT(list_empty(&the_lnet.ln_dc_working));
3909         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3910
3911         CDEBUG(D_NET, "discovery stopped\n");
3912 }
3913
3914 /* Debugging */
3915
3916 void
3917 lnet_debug_peer(lnet_nid_t nid)
3918 {
3919         char                    *aliveness = "NA";
3920         struct lnet_peer_ni     *lp;
3921         int                     cpt;
3922
3923         cpt = lnet_cpt_of_nid(nid, NULL);
3924         lnet_net_lock(cpt);
3925
3926         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3927         if (IS_ERR(lp)) {
3928                 lnet_net_unlock(cpt);
3929                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3930                 return;
3931         }
3932
3933         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3934                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3935
3936         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3937                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
3938                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3939                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3940                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3941
3942         lnet_peer_ni_decref_locked(lp);
3943
3944         lnet_net_unlock(cpt);
3945 }
3946
3947 /* Gathering information for userspace. */
3948
3949 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3950                           char aliveness[LNET_MAX_STR_LEN],
3951                           __u32 *cpt_iter, __u32 *refcount,
3952                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3953                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3954                           __u32 *peer_tx_qnob)
3955 {
3956         struct lnet_peer_table          *peer_table;
3957         struct lnet_peer_ni             *lp;
3958         int                             j;
3959         int                             lncpt;
3960         bool                            found = false;
3961
3962         /* get the number of CPTs */
3963         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3964
3965         /* if the cpt number to be examined is >= the number of cpts in
3966          * the system then indicate that there are no more cpts to examin
3967          */
3968         if (*cpt_iter >= lncpt)
3969                 return -ENOENT;
3970
3971         /* get the current table */
3972         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3973         /* if the ptable is NULL then there are no more cpts to examine */
3974         if (peer_table == NULL)
3975                 return -ENOENT;
3976
3977         lnet_net_lock(*cpt_iter);
3978
3979         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3980                 struct list_head *peers = &peer_table->pt_hash[j];
3981
3982                 list_for_each_entry(lp, peers, lpni_hashlist) {
3983                         if (!nid_is_nid4(&lp->lpni_nid))
3984                                 continue;
3985                         if (peer_index-- > 0)
3986                                 continue;
3987
3988                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3989                         if (lnet_isrouter(lp) ||
3990                                 lnet_peer_aliveness_enabled(lp))
3991                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3992                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3993
3994                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
3995                         *refcount = kref_read(&lp->lpni_kref);
3996                         *ni_peer_tx_credits =
3997                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3998                         *peer_tx_credits = lp->lpni_txcredits;
3999                         *peer_rtr_credits = lp->lpni_rtrcredits;
4000                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4001                         *peer_tx_qnob = lp->lpni_txqnob;
4002
4003                         found = true;
4004                 }
4005
4006         }
4007         lnet_net_unlock(*cpt_iter);
4008
4009         *cpt_iter = lncpt;
4010
4011         return found ? 0 : -ENOENT;
4012 }
4013
4014 /* ln_api_mutex is held, which keeps the peer list stable */
4015 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4016 {
4017         struct lnet_ioctl_element_stats *lpni_stats;
4018         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4019         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4020         struct lnet_peer_ni_credit_info *lpni_info;
4021         struct lnet_peer_ni *lpni;
4022         struct lnet_peer *lp;
4023         lnet_nid_t nid;
4024         __u32 size;
4025         int rc;
4026
4027         lp = lnet_find_peer(cfg->prcfg_prim_nid);
4028
4029         if (!lp) {
4030                 rc = -ENOENT;
4031                 goto out;
4032         }
4033
4034         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4035                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4036         size *= lp->lp_nnis;
4037         if (size > cfg->prcfg_size) {
4038                 cfg->prcfg_size = size;
4039                 rc = -E2BIG;
4040                 goto out_lp_decref;
4041         }
4042
4043         cfg->prcfg_prim_nid = lp->lp_primary_nid;
4044         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4045         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
4046         cfg->prcfg_count = lp->lp_nnis;
4047         cfg->prcfg_size = size;
4048         cfg->prcfg_state = lp->lp_state;
4049
4050         /* Allocate helper buffers. */
4051         rc = -ENOMEM;
4052         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4053         if (!lpni_info)
4054                 goto out_lp_decref;
4055         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4056         if (!lpni_stats)
4057                 goto out_free_info;
4058         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4059         if (!lpni_msg_stats)
4060                 goto out_free_stats;
4061         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4062         if (!lpni_hstats)
4063                 goto out_free_msg_stats;
4064
4065
4066         lpni = NULL;
4067         rc = -EFAULT;
4068         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4069                 if (!nid_is_nid4(&lpni->lpni_nid))
4070                         continue;
4071                 nid = lnet_nid_to_nid4(&lpni->lpni_nid);
4072                 if (copy_to_user(bulk, &nid, sizeof(nid)))
4073                         goto out_free_hstats;
4074                 bulk += sizeof(nid);
4075
4076                 memset(lpni_info, 0, sizeof(*lpni_info));
4077                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4078                 if (lnet_isrouter(lpni) ||
4079                         lnet_peer_aliveness_enabled(lpni))
4080                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4081                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4082
4083                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4084                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4085                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4086                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4087                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4088                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4089                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4090                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4091                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4092                         goto out_free_hstats;
4093                 bulk += sizeof(*lpni_info);
4094
4095                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4096                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4097                                                             LNET_STATS_TYPE_SEND);
4098                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4099                                                             LNET_STATS_TYPE_RECV);
4100                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4101                                                             LNET_STATS_TYPE_DROP);
4102                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4103                         goto out_free_hstats;
4104                 bulk += sizeof(*lpni_stats);
4105                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4106                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4107                         goto out_free_hstats;
4108                 bulk += sizeof(*lpni_msg_stats);
4109                 lpni_hstats->hlpni_network_timeout =
4110                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4111                 lpni_hstats->hlpni_remote_dropped =
4112                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4113                 lpni_hstats->hlpni_remote_timeout =
4114                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4115                 lpni_hstats->hlpni_remote_error =
4116                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4117                 lpni_hstats->hlpni_health_value =
4118                   atomic_read(&lpni->lpni_healthv);
4119                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4120                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4121                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4122                         goto out_free_hstats;
4123                 bulk += sizeof(*lpni_hstats);
4124         }
4125         rc = 0;
4126
4127 out_free_hstats:
4128         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4129 out_free_msg_stats:
4130         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4131 out_free_stats:
4132         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4133 out_free_info:
4134         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4135 out_lp_decref:
4136         lnet_peer_decref_locked(lp);
4137 out:
4138         return rc;
4139 }
4140
4141 /* must hold net_lock/0 */
4142 void
4143 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4144                                      struct list_head *recovery_queue,
4145                                      time64_t now)
4146 {
4147         /* the mt could've shutdown and cleaned up the queues */
4148         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4149                 return;
4150
4151         if (!list_empty(&lpni->lpni_recovery))
4152                 return;
4153
4154         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4155                 return;
4156
4157         if (!lpni->lpni_last_alive) {
4158                 CDEBUG(D_NET,
4159                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4160                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4161                        lpni->lpni_last_alive);
4162                 return;
4163         }
4164
4165         if (lnet_recovery_limit &&
4166             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4167                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4168                        libcfs_nidstr(&lpni->lpni_nid),
4169                        lpni->lpni_last_alive);
4170                 /* Reset the ping count so that if this peer NI is added back to
4171                  * the recovery queue we will send the first ping right away.
4172                  */
4173                 lpni->lpni_ping_count = 0;
4174                 return;
4175         }
4176
4177         /* This peer NI is going on the recovery queue, so take a ref on it */
4178         lnet_peer_ni_addref_locked(lpni);
4179
4180         lnet_peer_ni_set_next_ping(lpni, now);
4181
4182         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4183                libcfs_nidstr(&lpni->lpni_nid),
4184                lpni->lpni_ping_count,
4185                lpni->lpni_next_ping,
4186                lpni->lpni_last_alive,
4187                atomic_read(&lpni->lpni_healthv));
4188
4189         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4190 }
4191
4192 /* Call with the ln_api_mutex held */
4193 void
4194 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
4195 {
4196         struct lnet_peer_table *ptable;
4197         struct lnet_peer *lp;
4198         struct lnet_peer_net *lpn;
4199         struct lnet_peer_ni *lpni;
4200         int lncpt;
4201         int cpt;
4202         time64_t now;
4203
4204         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4205                 return;
4206
4207         now = ktime_get_seconds();
4208
4209         if (!all) {
4210                 lnet_net_lock(LNET_LOCK_EX);
4211                 lpni = lnet_find_peer_ni_locked(nid);
4212                 if (!lpni) {
4213                         lnet_net_unlock(LNET_LOCK_EX);
4214                         return;
4215                 }
4216                 lnet_set_lpni_healthv_locked(lpni, value);
4217                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4218                                              &the_lnet.ln_mt_peerNIRecovq, now);
4219                 lnet_peer_ni_decref_locked(lpni);
4220                 lnet_net_unlock(LNET_LOCK_EX);
4221                 return;
4222         }
4223
4224         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4225
4226         /*
4227          * Walk all the peers and reset the health value for each one to the
4228          * specified value.
4229          */
4230         lnet_net_lock(LNET_LOCK_EX);
4231         for (cpt = 0; cpt < lncpt; cpt++) {
4232                 ptable = the_lnet.ln_peer_tables[cpt];
4233                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4234                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4235                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4236                                                     lpni_peer_nis) {
4237                                         lnet_set_lpni_healthv_locked(lpni,
4238                                                                      value);
4239                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4240                                              &the_lnet.ln_mt_peerNIRecovq, now);
4241                                 }
4242                         }
4243                 }
4244         }
4245         lnet_net_unlock(LNET_LOCK_EX);
4246 }
4247