Whamcloud - gitweb
d1078191bb1d0969dc9d4f7d84f6e87d5e739016
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/peer.c
33  */
34
35 #define DEBUG_SUBSYSTEM S_LNET
36
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
40 #endif
41 #include <linux/uaccess.h>
42
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_hashlist);
166         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
167         INIT_LIST_HEAD(&lpni->lpni_recovery);
168         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
169         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
170
171         spin_lock_init(&lpni->lpni_lock);
172
173         if (lnet_peers_start_down())
174                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
175         else
176                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
177         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
178         lpni->lpni_nid = nid;
179         lpni->lpni_cpt = cpt;
180         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
181
182         net = lnet_get_net_locked(LNET_NIDNET(nid));
183         lpni->lpni_net = net;
184         if (net) {
185                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
186                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
187                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
188                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
189         } else {
190                 /*
191                  * This peer_ni is not on a local network, so we
192                  * cannot add the credits here. In case the net is
193                  * added later, add the peer_ni to the remote peer ni
194                  * list so it can be easily found and revisited.
195                  */
196                 /* FIXME: per-net implementation instead? */
197                 atomic_inc(&lpni->lpni_refcount);
198                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
199                               &the_lnet.ln_remote_peer_ni_list);
200         }
201
202         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
203
204         return lpni;
205 }
206
207 static struct lnet_peer_net *
208 lnet_peer_net_alloc(__u32 net_id)
209 {
210         struct lnet_peer_net *lpn;
211
212         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
213         if (!lpn)
214                 return NULL;
215
216         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
217         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
218         lpn->lpn_net_id = net_id;
219
220         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
221
222         return lpn;
223 }
224
225 void
226 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
227 {
228         struct lnet_peer *lp;
229
230         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
231
232         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
233         LASSERT(list_empty(&lpn->lpn_peer_nis));
234         LASSERT(list_empty(&lpn->lpn_peer_nets));
235         lp = lpn->lpn_peer;
236         lpn->lpn_peer = NULL;
237         LIBCFS_FREE(lpn, sizeof(*lpn));
238
239         lnet_peer_decref_locked(lp);
240 }
241
242 static struct lnet_peer *
243 lnet_peer_alloc(lnet_nid_t nid)
244 {
245         struct lnet_peer *lp;
246
247         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
248         if (!lp)
249                 return NULL;
250
251         INIT_LIST_HEAD(&lp->lp_rtrq);
252         INIT_LIST_HEAD(&lp->lp_routes);
253         INIT_LIST_HEAD(&lp->lp_peer_list);
254         INIT_LIST_HEAD(&lp->lp_peer_nets);
255         INIT_LIST_HEAD(&lp->lp_dc_list);
256         INIT_LIST_HEAD(&lp->lp_dc_pendq);
257         INIT_LIST_HEAD(&lp->lp_rtr_list);
258         init_waitqueue_head(&lp->lp_dc_waitq);
259         spin_lock_init(&lp->lp_lock);
260         lp->lp_primary_nid = nid;
261
262         /*
263          * all peers created on a router should have health on
264          * if it's not already on.
265          */
266         if (the_lnet.ln_routing && !lnet_health_sensitivity)
267                 lp->lp_health_sensitivity = 1;
268
269         /*
270          * Turn off discovery for loopback peer. If you're creating a peer
271          * for the loopback interface then that was initiated when we
272          * attempted to send a message over the loopback. There is no need
273          * to ever use a different interface when sending messages to
274          * myself.
275          */
276         if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
277                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
278         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
279
280         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
281
282         return lp;
283 }
284
285 void
286 lnet_destroy_peer_locked(struct lnet_peer *lp)
287 {
288         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
289
290         LASSERT(atomic_read(&lp->lp_refcount) == 0);
291         LASSERT(lp->lp_rtr_refcount == 0);
292         LASSERT(list_empty(&lp->lp_peer_nets));
293         LASSERT(list_empty(&lp->lp_peer_list));
294         LASSERT(list_empty(&lp->lp_dc_list));
295
296         if (lp->lp_data)
297                 lnet_ping_buffer_decref(lp->lp_data);
298
299         /*
300          * if there are messages still on the pending queue, then make
301          * sure to queue them on the ln_msg_resend list so they can be
302          * resent at a later point if the discovery thread is still
303          * running.
304          * If the discovery thread has stopped, then the wakeup will be a
305          * no-op, and it is expected the lnet_shutdown_lndnets() will
306          * eventually be called, which will traverse this list and
307          * finalize the messages on the list.
308          * We can not resend them now because we're holding the cpt lock.
309          * Releasing the lock can cause an inconsistent state
310          */
311         spin_lock(&the_lnet.ln_msg_resend_lock);
312         spin_lock(&lp->lp_lock);
313         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
314         spin_unlock(&lp->lp_lock);
315         spin_unlock(&the_lnet.ln_msg_resend_lock);
316         wake_up(&the_lnet.ln_dc_waitq);
317
318         LIBCFS_FREE(lp, sizeof(*lp));
319 }
320
321 /*
322  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
323  * that peer_net, detach the peer_net from the peer.
324  *
325  * Call with lnet_net_lock/EX held
326  */
327 static void
328 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
329 {
330         struct lnet_peer_table *ptable;
331         struct lnet_peer_net *lpn;
332         struct lnet_peer *lp;
333
334         /*
335          * Belts and suspenders: gracefully handle teardown of a
336          * partially connected peer_ni.
337          */
338         lpn = lpni->lpni_peer_net;
339
340         list_del_init(&lpni->lpni_peer_nis);
341         /*
342          * If there are no lpni's left, we detach lpn from
343          * lp_peer_nets, so it cannot be found anymore.
344          */
345         if (list_empty(&lpn->lpn_peer_nis))
346                 list_del_init(&lpn->lpn_peer_nets);
347
348         /* Update peer NID count. */
349         lp = lpn->lpn_peer;
350         lp->lp_nnis--;
351
352         /*
353          * If there are no more peer nets, make the peer unfindable
354          * via the peer_tables.
355          *
356          * Otherwise, if the peer is DISCOVERED, tell discovery to
357          * take another look at it. This is a no-op if discovery for
358          * this peer did the detaching.
359          */
360         if (list_empty(&lp->lp_peer_nets)) {
361                 list_del_init(&lp->lp_peer_list);
362                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
363                 ptable->pt_peers--;
364         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
365                 /* Discovery isn't running, nothing to do here. */
366         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
367                 lnet_peer_queue_for_discovery(lp);
368                 wake_up(&the_lnet.ln_dc_waitq);
369         }
370         CDEBUG(D_NET, "peer %s NID %s\n",
371                 libcfs_nid2str(lp->lp_primary_nid),
372                 libcfs_nid2str(lpni->lpni_nid));
373 }
374
375 /* called with lnet_net_lock LNET_LOCK_EX held */
376 static int
377 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
378 {
379         struct lnet_peer_table *ptable = NULL;
380
381         /* don't remove a peer_ni if it's also a gateway */
382         if (lnet_isrouter(lpni) && !force) {
383                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
384                        libcfs_nid2str(lpni->lpni_nid));
385                 return -EBUSY;
386         }
387
388         lnet_peer_remove_from_remote_list(lpni);
389
390         /* remove peer ni from the hash list. */
391         list_del_init(&lpni->lpni_hashlist);
392
393         /*
394          * indicate the peer is being deleted so the monitor thread can
395          * remove it from the recovery queue.
396          */
397         spin_lock(&lpni->lpni_lock);
398         lpni->lpni_state |= LNET_PEER_NI_DELETING;
399         spin_unlock(&lpni->lpni_lock);
400
401         /* decrement the ref count on the peer table */
402         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
403         LASSERT(ptable->pt_number > 0);
404         ptable->pt_number--;
405
406         /*
407          * The peer_ni can no longer be found with a lookup. But there
408          * can be current users, so keep track of it on the zombie
409          * list until the reference count has gone to zero.
410          *
411          * The last reference may be lost in a place where the
412          * lnet_net_lock locks only a single cpt, and that cpt may not
413          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
414          * has its own lock.
415          */
416         spin_lock(&ptable->pt_zombie_lock);
417         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
418         ptable->pt_zombies++;
419         spin_unlock(&ptable->pt_zombie_lock);
420
421         /* no need to keep this peer_ni on the hierarchy anymore */
422         lnet_peer_detach_peer_ni_locked(lpni);
423
424         /* remove hashlist reference on peer_ni */
425         lnet_peer_ni_decref_locked(lpni);
426
427         return 0;
428 }
429
430 void lnet_peer_uninit(void)
431 {
432         struct lnet_peer_ni *lpni, *tmp;
433
434         lnet_net_lock(LNET_LOCK_EX);
435
436         /* remove all peer_nis from the remote peer and the hash list */
437         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
438                                  lpni_on_remote_peer_ni_list)
439                 lnet_peer_ni_del_locked(lpni, false);
440
441         lnet_peer_tables_destroy();
442
443         lnet_net_unlock(LNET_LOCK_EX);
444 }
445
446 static int
447 lnet_peer_del_locked(struct lnet_peer *peer)
448 {
449         struct lnet_peer_ni *lpni = NULL, *lpni2;
450         int rc = 0, rc2 = 0;
451
452         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
453
454         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
455         while (lpni != NULL) {
456                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
457                 rc = lnet_peer_ni_del_locked(lpni, false);
458                 if (rc != 0)
459                         rc2 = rc;
460                 lpni = lpni2;
461         }
462
463         return rc2;
464 }
465
466 static int
467 lnet_peer_del(struct lnet_peer *peer)
468 {
469         lnet_net_lock(LNET_LOCK_EX);
470         lnet_peer_del_locked(peer);
471         lnet_net_unlock(LNET_LOCK_EX);
472
473         return 0;
474 }
475
476 /*
477  * Delete a NID from a peer. Call with ln_api_mutex held.
478  *
479  * Error codes:
480  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
481  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
482  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
483  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
484  */
485 static int
486 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
487 {
488         struct lnet_peer_ni *lpni;
489         lnet_nid_t primary_nid = lp->lp_primary_nid;
490         int rc = 0;
491         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
492
493         if (!(flags & LNET_PEER_CONFIGURED)) {
494                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
495                         rc = -EPERM;
496                         goto out;
497                 }
498         }
499         lpni = lnet_find_peer_ni_locked(nid);
500         if (!lpni) {
501                 rc = -ENOENT;
502                 goto out;
503         }
504         lnet_peer_ni_decref_locked(lpni);
505         if (lp != lpni->lpni_peer_net->lpn_peer) {
506                 rc = -ECHILD;
507                 goto out;
508         }
509
510         /*
511          * This function only allows deletion of the primary NID if it
512          * is the only NID.
513          */
514         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && !force) {
515                 rc = -EBUSY;
516                 goto out;
517         }
518
519         lnet_net_lock(LNET_LOCK_EX);
520
521         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1 && force) {
522                 struct lnet_peer_ni *lpni2;
523                 /* assign the next peer_ni to be the primary */
524                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
525                 LASSERT(lpni2);
526                 lp->lp_primary_nid = lpni->lpni_nid;
527         }
528         rc = lnet_peer_ni_del_locked(lpni, force);
529
530         lnet_net_unlock(LNET_LOCK_EX);
531
532 out:
533         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
534                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
535
536         return rc;
537 }
538
539 static void
540 lnet_peer_table_cleanup_locked(struct lnet_net *net,
541                                struct lnet_peer_table *ptable)
542 {
543         int                      i;
544         struct lnet_peer_ni     *next;
545         struct lnet_peer_ni     *lpni;
546         struct lnet_peer        *peer;
547
548         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
549                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
550                                          lpni_hashlist) {
551                         if (net != NULL && net != lpni->lpni_net)
552                                 continue;
553
554                         peer = lpni->lpni_peer_net->lpn_peer;
555                         if (peer->lp_primary_nid != lpni->lpni_nid) {
556                                 lnet_peer_ni_del_locked(lpni, false);
557                                 continue;
558                         }
559                         /*
560                          * Removing the primary NID implies removing
561                          * the entire peer. Advance next beyond any
562                          * peer_ni that belongs to the same peer.
563                          */
564                         list_for_each_entry_from(next, &ptable->pt_hash[i],
565                                                  lpni_hashlist) {
566                                 if (next->lpni_peer_net->lpn_peer != peer)
567                                         break;
568                         }
569                         lnet_peer_del_locked(peer);
570                 }
571         }
572 }
573
574 static void
575 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
576 {
577         int     i = 3;
578
579         spin_lock(&ptable->pt_zombie_lock);
580         while (ptable->pt_zombies) {
581                 spin_unlock(&ptable->pt_zombie_lock);
582
583                 if (is_power_of_2(i)) {
584                         CDEBUG(D_WARNING,
585                                "Waiting for %d zombies on peer table\n",
586                                ptable->pt_zombies);
587                 }
588                 set_current_state(TASK_UNINTERRUPTIBLE);
589                 schedule_timeout(cfs_time_seconds(1) >> 1);
590                 spin_lock(&ptable->pt_zombie_lock);
591         }
592         spin_unlock(&ptable->pt_zombie_lock);
593 }
594
595 static void
596 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
597                                 struct lnet_peer_table *ptable)
598 {
599         struct lnet_peer_ni     *lp;
600         struct lnet_peer_ni     *tmp;
601         lnet_nid_t              gw_nid;
602         int                     i;
603
604         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
605                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
606                                          lpni_hashlist) {
607                         if (net != lp->lpni_net)
608                                 continue;
609
610                         if (!lnet_isrouter(lp))
611                                 continue;
612
613                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
614
615                         lnet_net_unlock(LNET_LOCK_EX);
616                         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), gw_nid);
617                         lnet_net_lock(LNET_LOCK_EX);
618                 }
619         }
620 }
621
622 void
623 lnet_peer_tables_cleanup(struct lnet_net *net)
624 {
625         int i;
626         struct lnet_peer_table *ptable;
627
628         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
629         /* If just deleting the peers for a NI, get rid of any routes these
630          * peers are gateways for. */
631         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
632                 lnet_net_lock(LNET_LOCK_EX);
633                 lnet_peer_table_del_rtrs_locked(net, ptable);
634                 lnet_net_unlock(LNET_LOCK_EX);
635         }
636
637         /* Start the cleanup process */
638         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
639                 lnet_net_lock(LNET_LOCK_EX);
640                 lnet_peer_table_cleanup_locked(net, ptable);
641                 lnet_net_unlock(LNET_LOCK_EX);
642         }
643
644         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
645                 lnet_peer_ni_finalize_wait(ptable);
646 }
647
648 static struct lnet_peer_ni *
649 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
650 {
651         struct list_head        *peers;
652         struct lnet_peer_ni     *lp;
653
654         LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
655
656         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
657         list_for_each_entry(lp, peers, lpni_hashlist) {
658                 if (lp->lpni_nid == nid) {
659                         lnet_peer_ni_addref_locked(lp);
660                         return lp;
661                 }
662         }
663
664         return NULL;
665 }
666
667 struct lnet_peer_ni *
668 lnet_find_peer_ni_locked(lnet_nid_t nid)
669 {
670         struct lnet_peer_ni *lpni;
671         struct lnet_peer_table *ptable;
672         int cpt;
673
674         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
675
676         ptable = the_lnet.ln_peer_tables[cpt];
677         lpni = lnet_get_peer_ni_locked(ptable, nid);
678
679         return lpni;
680 }
681
682 struct lnet_peer_ni *
683 lnet_peer_get_ni_locked(struct lnet_peer *lp, lnet_nid_t nid)
684 {
685         struct lnet_peer_net *lpn;
686         struct lnet_peer_ni *lpni;
687
688         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
689         if (!lpn)
690                 return NULL;
691
692         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
693                 if (lpni->lpni_nid == nid)
694                         return lpni;
695         }
696
697         return NULL;
698 }
699
700 struct lnet_peer *
701 lnet_find_peer(lnet_nid_t nid)
702 {
703         struct lnet_peer_ni *lpni;
704         struct lnet_peer *lp = NULL;
705         int cpt;
706
707         cpt = lnet_net_lock_current();
708         lpni = lnet_find_peer_ni_locked(nid);
709         if (lpni) {
710                 lp = lpni->lpni_peer_net->lpn_peer;
711                 lnet_peer_addref_locked(lp);
712                 lnet_peer_ni_decref_locked(lpni);
713         }
714         lnet_net_unlock(cpt);
715
716         return lp;
717 }
718
719 struct lnet_peer_net *
720 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
721 {
722         struct lnet_peer_net *net;
723
724         if (!prev_lpn_id) {
725                 /* no net id provided return the first net */
726                 net = list_first_entry_or_null(&lp->lp_peer_nets,
727                                                struct lnet_peer_net,
728                                                lpn_peer_nets);
729
730                 return net;
731         }
732
733         /* find the net after the one provided */
734         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
735                 if (net->lpn_net_id == prev_lpn_id) {
736                         /*
737                          * if we reached the end of the list loop to the
738                          * beginning.
739                          */
740                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
741                                 return list_first_entry_or_null(&lp->lp_peer_nets,
742                                                                 struct lnet_peer_net,
743                                                                 lpn_peer_nets);
744                         else
745                                 return list_next_entry(net, lpn_peer_nets);
746                 }
747         }
748
749         return NULL;
750 }
751
752 struct lnet_peer_ni *
753 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
754                              struct lnet_peer_net *peer_net,
755                              struct lnet_peer_ni *prev)
756 {
757         struct lnet_peer_ni *lpni;
758         struct lnet_peer_net *net = peer_net;
759
760         if (!prev) {
761                 if (!net) {
762                         if (list_empty(&peer->lp_peer_nets))
763                                 return NULL;
764
765                         net = list_entry(peer->lp_peer_nets.next,
766                                          struct lnet_peer_net,
767                                          lpn_peer_nets);
768                 }
769                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
770                                   lpni_peer_nis);
771
772                 return lpni;
773         }
774
775         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
776                 /*
777                  * if you reached the end of the peer ni list and the peer
778                  * net is specified then there are no more peer nis in that
779                  * net.
780                  */
781                 if (net)
782                         return NULL;
783
784                 /*
785                  * we reached the end of this net ni list. move to the
786                  * next net
787                  */
788                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
789                     &peer->lp_peer_nets)
790                         /* no more nets and no more NIs. */
791                         return NULL;
792
793                 /* get the next net */
794                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
795                                  struct lnet_peer_net,
796                                  lpn_peer_nets);
797                 /* get the ni on it */
798                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
799                                   lpni_peer_nis);
800
801                 return lpni;
802         }
803
804         /* there are more nis left */
805         lpni = list_entry(prev->lpni_peer_nis.next,
806                           struct lnet_peer_ni, lpni_peer_nis);
807
808         return lpni;
809 }
810
811 /* Call with the ln_api_mutex held */
812 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
813 {
814         struct lnet_process_id id;
815         struct lnet_peer_table *ptable;
816         struct lnet_peer *lp;
817         __u32 count = 0;
818         __u32 size = 0;
819         int lncpt;
820         int cpt;
821         __u32 i;
822         int rc;
823
824         rc = -ESHUTDOWN;
825         if (the_lnet.ln_state != LNET_STATE_RUNNING)
826                 goto done;
827
828         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
829
830         /*
831          * Count the number of peers, and return E2BIG if the buffer
832          * is too small. We'll also return the desired size.
833          */
834         rc = -E2BIG;
835         for (cpt = 0; cpt < lncpt; cpt++) {
836                 ptable = the_lnet.ln_peer_tables[cpt];
837                 count += ptable->pt_peers;
838         }
839         size = count * sizeof(*ids);
840         if (size > *sizep)
841                 goto done;
842
843         /*
844          * Walk the peer lists and copy out the primary nids.
845          * This is safe because the peer lists are only modified
846          * while the ln_api_mutex is held. So we don't need to
847          * hold the lnet_net_lock as well, and can therefore
848          * directly call copy_to_user().
849          */
850         rc = -EFAULT;
851         memset(&id, 0, sizeof(id));
852         id.pid = LNET_PID_LUSTRE;
853         i = 0;
854         for (cpt = 0; cpt < lncpt; cpt++) {
855                 ptable = the_lnet.ln_peer_tables[cpt];
856                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
857                         if (i >= count)
858                                 goto done;
859                         id.nid = lp->lp_primary_nid;
860                         if (copy_to_user(&ids[i], &id, sizeof(id)))
861                                 goto done;
862                         i++;
863                 }
864         }
865         rc = 0;
866 done:
867         *countp = count;
868         *sizep = size;
869         return rc;
870 }
871
872 /*
873  * Start pushes to peers that need to be updated for a configuration
874  * change on this node.
875  */
876 void
877 lnet_push_update_to_peers(int force)
878 {
879         struct lnet_peer_table *ptable;
880         struct lnet_peer *lp;
881         int lncpt;
882         int cpt;
883
884         lnet_net_lock(LNET_LOCK_EX);
885         if (lnet_peer_discovery_disabled)
886                 force = 0;
887         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
888         for (cpt = 0; cpt < lncpt; cpt++) {
889                 ptable = the_lnet.ln_peer_tables[cpt];
890                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
891                         if (force) {
892                                 spin_lock(&lp->lp_lock);
893                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
894                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
895                                 spin_unlock(&lp->lp_lock);
896                         }
897                         if (lnet_peer_needs_push(lp))
898                                 lnet_peer_queue_for_discovery(lp);
899                 }
900         }
901         lnet_net_unlock(LNET_LOCK_EX);
902         wake_up(&the_lnet.ln_dc_waitq);
903 }
904
905 /*
906  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
907  * this is a preferred point-to-point path. Call with lnet_net_lock in
908  * shared mmode.
909  */
910 bool
911 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
912 {
913         int i;
914
915         if (lpni->lpni_pref_nnids == 0)
916                 return false;
917         if (lpni->lpni_pref_nnids == 1)
918                 return lpni->lpni_pref.nid == nid;
919         for (i = 0; i < lpni->lpni_pref_nnids; i++) {
920                 if (lpni->lpni_pref.nids[i] == nid)
921                         return true;
922         }
923         return false;
924 }
925
926 /*
927  * Set a single ni as preferred, provided no preferred ni is already
928  * defined. Only to be used for non-multi-rail peer_ni.
929  */
930 int
931 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
932 {
933         int rc = 0;
934
935         spin_lock(&lpni->lpni_lock);
936         if (nid == LNET_NID_ANY) {
937                 rc = -EINVAL;
938         } else if (lpni->lpni_pref_nnids > 0) {
939                 rc = -EPERM;
940         } else if (lpni->lpni_pref_nnids == 0) {
941                 lpni->lpni_pref.nid = nid;
942                 lpni->lpni_pref_nnids = 1;
943                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
944         }
945         spin_unlock(&lpni->lpni_lock);
946
947         CDEBUG(D_NET, "peer %s nid %s: %d\n",
948                libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
949         return rc;
950 }
951
952 /*
953  * Clear the preferred NID from a non-multi-rail peer_ni, provided
954  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
955  */
956 int
957 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
958 {
959         int rc = 0;
960
961         spin_lock(&lpni->lpni_lock);
962         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
963                 lpni->lpni_pref_nnids = 0;
964                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
965         } else if (lpni->lpni_pref_nnids == 0) {
966                 rc = -ENOENT;
967         } else {
968                 rc = -EPERM;
969         }
970         spin_unlock(&lpni->lpni_lock);
971
972         CDEBUG(D_NET, "peer %s: %d\n",
973                libcfs_nid2str(lpni->lpni_nid), rc);
974         return rc;
975 }
976
977 /*
978  * Clear the preferred NIDs from a non-multi-rail peer.
979  */
980 void
981 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
982 {
983         struct lnet_peer_ni *lpni = NULL;
984
985         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
986                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
987 }
988
989 int
990 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
991 {
992         lnet_nid_t *nids = NULL;
993         lnet_nid_t *oldnids = NULL;
994         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
995         int size;
996         int i;
997         int rc = 0;
998
999         if (nid == LNET_NID_ANY) {
1000                 rc = -EINVAL;
1001                 goto out;
1002         }
1003
1004         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
1005                 rc = -EEXIST;
1006                 goto out;
1007         }
1008
1009         /* A non-MR node may have only one preferred NI per peer_ni */
1010         if (lpni->lpni_pref_nnids > 0) {
1011                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1012                         rc = -EPERM;
1013                         goto out;
1014                 }
1015         }
1016
1017         if (lpni->lpni_pref_nnids != 0) {
1018                 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
1019                 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
1020                 if (!nids) {
1021                         rc = -ENOMEM;
1022                         goto out;
1023                 }
1024                 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
1025                         if (lpni->lpni_pref.nids[i] == nid) {
1026                                 LIBCFS_FREE(nids, size);
1027                                 rc = -EEXIST;
1028                                 goto out;
1029                         }
1030                         nids[i] = lpni->lpni_pref.nids[i];
1031                 }
1032                 nids[i] = nid;
1033         }
1034
1035         lnet_net_lock(LNET_LOCK_EX);
1036         spin_lock(&lpni->lpni_lock);
1037         if (lpni->lpni_pref_nnids == 0) {
1038                 lpni->lpni_pref.nid = nid;
1039         } else {
1040                 oldnids = lpni->lpni_pref.nids;
1041                 lpni->lpni_pref.nids = nids;
1042         }
1043         lpni->lpni_pref_nnids++;
1044         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1045         spin_unlock(&lpni->lpni_lock);
1046         lnet_net_unlock(LNET_LOCK_EX);
1047
1048         if (oldnids) {
1049                 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1050                 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
1051         }
1052 out:
1053         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1054                 spin_lock(&lpni->lpni_lock);
1055                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1056                 spin_unlock(&lpni->lpni_lock);
1057         }
1058         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1059                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1060         return rc;
1061 }
1062
1063 int
1064 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
1065 {
1066         lnet_nid_t *nids = NULL;
1067         lnet_nid_t *oldnids = NULL;
1068         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1069         int size;
1070         int i, j;
1071         int rc = 0;
1072
1073         if (lpni->lpni_pref_nnids == 0) {
1074                 rc = -ENOENT;
1075                 goto out;
1076         }
1077
1078         if (lpni->lpni_pref_nnids == 1) {
1079                 if (lpni->lpni_pref.nid != nid) {
1080                         rc = -ENOENT;
1081                         goto out;
1082                 }
1083         } else if (lpni->lpni_pref_nnids == 2) {
1084                 if (lpni->lpni_pref.nids[0] != nid &&
1085                     lpni->lpni_pref.nids[1] != nid) {
1086                         rc = -ENOENT;
1087                         goto out;
1088                 }
1089         } else {
1090                 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1091                 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
1092                 if (!nids) {
1093                         rc = -ENOMEM;
1094                         goto out;
1095                 }
1096                 for (i = 0, j = 0; i < lpni->lpni_pref_nnids; i++) {
1097                         if (lpni->lpni_pref.nids[i] != nid)
1098                                 continue;
1099                         nids[j++] = lpni->lpni_pref.nids[i];
1100                 }
1101                 /* Check if we actually removed a nid. */
1102                 if (j == lpni->lpni_pref_nnids) {
1103                         LIBCFS_FREE(nids, size);
1104                         rc = -ENOENT;
1105                         goto out;
1106                 }
1107         }
1108
1109         lnet_net_lock(LNET_LOCK_EX);
1110         spin_lock(&lpni->lpni_lock);
1111         if (lpni->lpni_pref_nnids == 1) {
1112                 lpni->lpni_pref.nid = LNET_NID_ANY;
1113         } else if (lpni->lpni_pref_nnids == 2) {
1114                 oldnids = lpni->lpni_pref.nids;
1115                 if (oldnids[0] == nid)
1116                         lpni->lpni_pref.nid = oldnids[1];
1117                 else
1118                         lpni->lpni_pref.nid = oldnids[2];
1119         } else {
1120                 oldnids = lpni->lpni_pref.nids;
1121                 lpni->lpni_pref.nids = nids;
1122         }
1123         lpni->lpni_pref_nnids--;
1124         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1125         spin_unlock(&lpni->lpni_lock);
1126         lnet_net_unlock(LNET_LOCK_EX);
1127
1128         if (oldnids) {
1129                 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
1130                 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
1131         }
1132 out:
1133         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1134                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1135         return rc;
1136 }
1137
1138 lnet_nid_t
1139 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1140 {
1141         struct lnet_peer_ni *lpni;
1142         lnet_nid_t primary_nid = nid;
1143
1144         lpni = lnet_find_peer_ni_locked(nid);
1145         if (lpni) {
1146                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1147                 lnet_peer_ni_decref_locked(lpni);
1148         }
1149
1150         return primary_nid;
1151 }
1152
1153 bool
1154 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1155 {
1156         if (lnet_peer_discovery_disabled)
1157                 return true;
1158
1159         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1160             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1161                 return true;
1162         }
1163
1164         return false;
1165 }
1166
1167 /*
1168  * Peer Discovery
1169  */
1170 bool
1171 lnet_is_discovery_disabled(struct lnet_peer *lp)
1172 {
1173         bool rc = false;
1174
1175         spin_lock(&lp->lp_lock);
1176         rc = lnet_is_discovery_disabled_locked(lp);
1177         spin_unlock(&lp->lp_lock);
1178
1179         return rc;
1180 }
1181
1182 lnet_nid_t
1183 LNetPrimaryNID(lnet_nid_t nid)
1184 {
1185         struct lnet_peer *lp;
1186         struct lnet_peer_ni *lpni;
1187         lnet_nid_t primary_nid = nid;
1188         int rc = 0;
1189         int cpt;
1190
1191         cpt = lnet_net_lock_current();
1192         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1193         if (IS_ERR(lpni)) {
1194                 rc = PTR_ERR(lpni);
1195                 goto out_unlock;
1196         }
1197         lp = lpni->lpni_peer_net->lpn_peer;
1198
1199         while (!lnet_peer_is_uptodate(lp)) {
1200                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1201                 if (rc)
1202                         goto out_decref;
1203                 lp = lpni->lpni_peer_net->lpn_peer;
1204
1205                 /* Only try once if discovery is disabled */
1206                 if (lnet_is_discovery_disabled(lp))
1207                         break;
1208         }
1209         primary_nid = lp->lp_primary_nid;
1210 out_decref:
1211         lnet_peer_ni_decref_locked(lpni);
1212 out_unlock:
1213         lnet_net_unlock(cpt);
1214
1215         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1216                libcfs_nid2str(primary_nid), rc);
1217         return primary_nid;
1218 }
1219 EXPORT_SYMBOL(LNetPrimaryNID);
1220
1221 struct lnet_peer_net *
1222 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1223 {
1224         struct lnet_peer_net *peer_net;
1225         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1226                 if (peer_net->lpn_net_id == net_id)
1227                         return peer_net;
1228         }
1229         return NULL;
1230 }
1231
1232 /*
1233  * Attach a peer_ni to a peer_net and peer. This function assumes
1234  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1235  * may be attached to a different peer, in which case it will be
1236  * properly detached first. The whole operation is done atomically.
1237  *
1238  * Always returns 0.  This is the last function called from functions
1239  * that do return an int, so returning 0 here allows the compiler to
1240  * do a tail call.
1241  */
1242 static int
1243 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1244                                 struct lnet_peer_net *lpn,
1245                                 struct lnet_peer_ni *lpni,
1246                                 unsigned flags)
1247 {
1248         struct lnet_peer_table *ptable;
1249
1250         /* Install the new peer_ni */
1251         lnet_net_lock(LNET_LOCK_EX);
1252         /* Add peer_ni to global peer table hash, if necessary. */
1253         if (list_empty(&lpni->lpni_hashlist)) {
1254                 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1255
1256                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1257                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1258                 ptable->pt_version++;
1259                 ptable->pt_number++;
1260                 /* This is the 1st refcount on lpni. */
1261                 atomic_inc(&lpni->lpni_refcount);
1262         }
1263
1264         /* Detach the peer_ni from an existing peer, if necessary. */
1265         if (lpni->lpni_peer_net) {
1266                 LASSERT(lpni->lpni_peer_net != lpn);
1267                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1268                 lnet_peer_detach_peer_ni_locked(lpni);
1269                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1270                 lpni->lpni_peer_net = NULL;
1271         }
1272
1273         /* Add peer_ni to peer_net */
1274         lpni->lpni_peer_net = lpn;
1275         list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1276         lnet_peer_net_addref_locked(lpn);
1277
1278         /* Add peer_net to peer */
1279         if (!lpn->lpn_peer) {
1280                 lpn->lpn_peer = lp;
1281                 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1282                 lnet_peer_addref_locked(lp);
1283         }
1284
1285         /* Add peer to global peer list, if necessary */
1286         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1287         if (list_empty(&lp->lp_peer_list)) {
1288                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1289                 ptable->pt_peers++;
1290         }
1291
1292
1293         /* Update peer state */
1294         spin_lock(&lp->lp_lock);
1295         if (flags & LNET_PEER_CONFIGURED) {
1296                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1297                         lp->lp_state |= LNET_PEER_CONFIGURED;
1298         }
1299         if (flags & LNET_PEER_MULTI_RAIL) {
1300                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1301                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1302                         lnet_peer_clr_non_mr_pref_nids(lp);
1303                 }
1304         }
1305         spin_unlock(&lp->lp_lock);
1306
1307         lp->lp_nnis++;
1308         lnet_net_unlock(LNET_LOCK_EX);
1309
1310         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1311                libcfs_nid2str(lp->lp_primary_nid),
1312                libcfs_nid2str(lpni->lpni_nid), flags);
1313
1314         return 0;
1315 }
1316
1317 /*
1318  * Create a new peer, with nid as its primary nid.
1319  *
1320  * Call with the lnet_api_mutex held.
1321  */
1322 static int
1323 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1324 {
1325         struct lnet_peer *lp;
1326         struct lnet_peer_net *lpn;
1327         struct lnet_peer_ni *lpni;
1328         int rc = 0;
1329
1330         LASSERT(nid != LNET_NID_ANY);
1331
1332         /*
1333          * No need for the lnet_net_lock here, because the
1334          * lnet_api_mutex is held.
1335          */
1336         lpni = lnet_find_peer_ni_locked(nid);
1337         if (lpni) {
1338                 /* A peer with this NID already exists. */
1339                 lp = lpni->lpni_peer_net->lpn_peer;
1340                 lnet_peer_ni_decref_locked(lpni);
1341                 /*
1342                  * This is an error if the peer was configured and the
1343                  * primary NID differs or an attempt is made to change
1344                  * the Multi-Rail flag. Otherwise the assumption is
1345                  * that an existing peer is being modified.
1346                  */
1347                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1348                         if (lp->lp_primary_nid != nid)
1349                                 rc = -EEXIST;
1350                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1351                                 rc = -EPERM;
1352                         goto out;
1353                 }
1354                 /* Delete and recreate as a configured peer. */
1355                 lnet_peer_del(lp);
1356         }
1357
1358         /* Create peer, peer_net, and peer_ni. */
1359         rc = -ENOMEM;
1360         lp = lnet_peer_alloc(nid);
1361         if (!lp)
1362                 goto out;
1363         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1364         if (!lpn)
1365                 goto out_free_lp;
1366         lpni = lnet_peer_ni_alloc(nid);
1367         if (!lpni)
1368                 goto out_free_lpn;
1369
1370         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1371
1372 out_free_lpn:
1373         LIBCFS_FREE(lpn, sizeof(*lpn));
1374 out_free_lp:
1375         LIBCFS_FREE(lp, sizeof(*lp));
1376 out:
1377         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1378                libcfs_nid2str(nid), flags, rc);
1379         return rc;
1380 }
1381
1382 /*
1383  * Add a NID to a peer. Call with ln_api_mutex held.
1384  *
1385  * Error codes:
1386  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1387  *  -EEXIST:   The NID was configured by DLC for a different peer.
1388  *  -ENOMEM:   Out of memory.
1389  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1390  *             non-multi-rail peer.
1391  */
1392 static int
1393 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1394 {
1395         struct lnet_peer_net *lpn;
1396         struct lnet_peer_ni *lpni;
1397         int rc = 0;
1398
1399         LASSERT(lp);
1400         LASSERT(nid != LNET_NID_ANY);
1401
1402         /* A configured peer can only be updated through configuration. */
1403         if (!(flags & LNET_PEER_CONFIGURED)) {
1404                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1405                         rc = -EPERM;
1406                         goto out;
1407                 }
1408         }
1409
1410         /*
1411          * The MULTI_RAIL flag can be set but not cleared, because
1412          * that would leave the peer struct in an invalid state.
1413          */
1414         if (flags & LNET_PEER_MULTI_RAIL) {
1415                 spin_lock(&lp->lp_lock);
1416                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1417                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1418                         lnet_peer_clr_non_mr_pref_nids(lp);
1419                 }
1420                 spin_unlock(&lp->lp_lock);
1421         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1422                 rc = -EPERM;
1423                 goto out;
1424         }
1425
1426         lpni = lnet_find_peer_ni_locked(nid);
1427         if (lpni) {
1428                 /*
1429                  * A peer_ni already exists. This is only a problem if
1430                  * it is not connected to this peer and was configured
1431                  * by DLC.
1432                  */
1433                 lnet_peer_ni_decref_locked(lpni);
1434                 if (lpni->lpni_peer_net->lpn_peer == lp)
1435                         goto out;
1436                 if (lnet_peer_ni_is_configured(lpni)) {
1437                         rc = -EEXIST;
1438                         goto out;
1439                 }
1440                 /* If this is the primary NID, destroy the peer. */
1441                 if (lnet_peer_ni_is_primary(lpni)) {
1442                         struct lnet_peer *rtr_lp =
1443                           lpni->lpni_peer_net->lpn_peer;
1444                         int rtr_refcount = rtr_lp->lp_rtr_refcount;
1445                         /*
1446                          * if we're trying to delete a router it means
1447                          * we're moving this peer NI to a new peer so must
1448                          * transfer router properties to the new peer
1449                          */
1450                         if (rtr_refcount > 0) {
1451                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1452                                 lnet_rtr_transfer_to_peer(rtr_lp, lp);
1453                         }
1454                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1455                         lpni = lnet_peer_ni_alloc(nid);
1456                         if (!lpni) {
1457                                 rc = -ENOMEM;
1458                                 goto out;
1459                         }
1460                 }
1461         } else {
1462                 lpni = lnet_peer_ni_alloc(nid);
1463                 if (!lpni) {
1464                         rc = -ENOMEM;
1465                         goto out;
1466                 }
1467         }
1468
1469         /*
1470          * Get the peer_net. Check that we're not adding a second
1471          * peer_ni on a peer_net of a non-multi-rail peer.
1472          */
1473         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1474         if (!lpn) {
1475                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1476                 if (!lpn) {
1477                         rc = -ENOMEM;
1478                         goto out_free_lpni;
1479                 }
1480         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1481                 rc = -ENOTUNIQ;
1482                 goto out_free_lpni;
1483         }
1484
1485         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1486
1487 out_free_lpni:
1488         /* If the peer_ni was allocated above its peer_net pointer is NULL */
1489         if (!lpni->lpni_peer_net)
1490                 LIBCFS_FREE(lpni, sizeof(*lpni));
1491 out:
1492         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1493                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1494                flags, rc);
1495         return rc;
1496 }
1497
1498 /*
1499  * Update the primary NID of a peer, if possible.
1500  *
1501  * Call with the lnet_api_mutex held.
1502  */
1503 static int
1504 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1505 {
1506         lnet_nid_t old = lp->lp_primary_nid;
1507         int rc = 0;
1508
1509         if (lp->lp_primary_nid == nid)
1510                 goto out;
1511         rc = lnet_peer_add_nid(lp, nid, flags);
1512         if (rc)
1513                 goto out;
1514         lp->lp_primary_nid = nid;
1515 out:
1516         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1517                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1518         return rc;
1519 }
1520
1521 /*
1522  * lpni creation initiated due to traffic either sending or receiving.
1523  */
1524 static int
1525 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1526 {
1527         struct lnet_peer *lp;
1528         struct lnet_peer_net *lpn;
1529         struct lnet_peer_ni *lpni;
1530         /*
1531          * Assume peer is Multi-Rail capable and let discovery find out
1532          * otherwise.
1533          */
1534         unsigned flags = LNET_PEER_MULTI_RAIL;
1535         int rc = 0;
1536
1537         if (nid == LNET_NID_ANY) {
1538                 rc = -EINVAL;
1539                 goto out;
1540         }
1541
1542         /* lnet_net_lock is not needed here because ln_api_lock is held */
1543         lpni = lnet_find_peer_ni_locked(nid);
1544         if (lpni) {
1545                 /*
1546                  * We must have raced with another thread. Since we
1547                  * know next to nothing about a peer_ni created by
1548                  * traffic, we just assume everything is ok and
1549                  * return.
1550                  */
1551                 lnet_peer_ni_decref_locked(lpni);
1552                 goto out;
1553         }
1554
1555         /* Create peer, peer_net, and peer_ni. */
1556         rc = -ENOMEM;
1557         lp = lnet_peer_alloc(nid);
1558         if (!lp)
1559                 goto out;
1560         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1561         if (!lpn)
1562                 goto out_free_lp;
1563         lpni = lnet_peer_ni_alloc(nid);
1564         if (!lpni)
1565                 goto out_free_lpn;
1566         if (pref != LNET_NID_ANY)
1567                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1568
1569         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1570
1571 out_free_lpn:
1572         LIBCFS_FREE(lpn, sizeof(*lpn));
1573 out_free_lp:
1574         LIBCFS_FREE(lp, sizeof(*lp));
1575 out:
1576         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1577         return rc;
1578 }
1579
1580 /*
1581  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1582  *
1583  * This API handles the following combinations:
1584  *   Create a peer with its primary NI if only the prim_nid is provided
1585  *   Add a NID to a peer identified by the prim_nid. The peer identified
1586  *   by the prim_nid must already exist.
1587  *   The peer being created may be non-MR.
1588  *
1589  * The caller must hold ln_api_mutex. This prevents the peer from
1590  * being created/modified/deleted by a different thread.
1591  */
1592 int
1593 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1594 {
1595         struct lnet_peer *lp = NULL;
1596         struct lnet_peer_ni *lpni;
1597         unsigned flags;
1598
1599         /* The prim_nid must always be specified */
1600         if (prim_nid == LNET_NID_ANY)
1601                 return -EINVAL;
1602
1603         flags = LNET_PEER_CONFIGURED;
1604         if (mr)
1605                 flags |= LNET_PEER_MULTI_RAIL;
1606
1607         /*
1608          * If nid isn't specified, we must create a new peer with
1609          * prim_nid as its primary nid.
1610          */
1611         if (nid == LNET_NID_ANY)
1612                 return lnet_peer_add(prim_nid, flags);
1613
1614         /* Look up the prim_nid, which must exist. */
1615         lpni = lnet_find_peer_ni_locked(prim_nid);
1616         if (!lpni)
1617                 return -ENOENT;
1618         lnet_peer_ni_decref_locked(lpni);
1619         lp = lpni->lpni_peer_net->lpn_peer;
1620
1621         /* Peer must have been configured. */
1622         if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1623                 CDEBUG(D_NET, "peer %s was not configured\n",
1624                        libcfs_nid2str(prim_nid));
1625                 return -ENOENT;
1626         }
1627
1628         /* Primary NID must match */
1629         if (lp->lp_primary_nid != prim_nid) {
1630                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1631                        libcfs_nid2str(prim_nid),
1632                        libcfs_nid2str(lp->lp_primary_nid));
1633                 return -ENODEV;
1634         }
1635
1636         /* Multi-Rail flag must match. */
1637         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1638                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1639                        libcfs_nid2str(prim_nid));
1640                 return -EPERM;
1641         }
1642
1643         return lnet_peer_add_nid(lp, nid, flags);
1644 }
1645
1646 /*
1647  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1648  *
1649  * This API handles the following combinations:
1650  *   Delete a NI from a peer if both prim_nid and nid are provided.
1651  *   Delete a peer if only prim_nid is provided.
1652  *   Delete a peer if its primary nid is provided.
1653  *
1654  * The caller must hold ln_api_mutex. This prevents the peer from
1655  * being modified/deleted by a different thread.
1656  */
1657 int
1658 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1659 {
1660         struct lnet_peer *lp;
1661         struct lnet_peer_ni *lpni;
1662         unsigned flags;
1663
1664         if (prim_nid == LNET_NID_ANY)
1665                 return -EINVAL;
1666
1667         lpni = lnet_find_peer_ni_locked(prim_nid);
1668         if (!lpni)
1669                 return -ENOENT;
1670         lnet_peer_ni_decref_locked(lpni);
1671         lp = lpni->lpni_peer_net->lpn_peer;
1672
1673         if (prim_nid != lp->lp_primary_nid) {
1674                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1675                        libcfs_nid2str(prim_nid),
1676                        libcfs_nid2str(lp->lp_primary_nid));
1677                 return -ENODEV;
1678         }
1679
1680         lnet_net_lock(LNET_LOCK_EX);
1681         if (lp->lp_rtr_refcount > 0) {
1682                 lnet_net_unlock(LNET_LOCK_EX);
1683                 CERROR("%s is a router. Can not be deleted\n",
1684                        libcfs_nid2str(prim_nid));
1685                 return -EBUSY;
1686         }
1687         lnet_net_unlock(LNET_LOCK_EX);
1688
1689         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1690                 return lnet_peer_del(lp);
1691
1692         flags = LNET_PEER_CONFIGURED;
1693         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1694                 flags |= LNET_PEER_MULTI_RAIL;
1695
1696         return lnet_peer_del_nid(lp, nid, flags);
1697 }
1698
1699 void
1700 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1701 {
1702         struct lnet_peer_table *ptable;
1703         struct lnet_peer_net *lpn;
1704
1705         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1706
1707         LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1708         LASSERT(list_empty(&lpni->lpni_txq));
1709         LASSERT(lpni->lpni_txqnob == 0);
1710         LASSERT(list_empty(&lpni->lpni_peer_nis));
1711         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1712
1713         lpn = lpni->lpni_peer_net;
1714         lpni->lpni_peer_net = NULL;
1715         lpni->lpni_net = NULL;
1716
1717         /* remove the peer ni from the zombie list */
1718         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1719         spin_lock(&ptable->pt_zombie_lock);
1720         list_del_init(&lpni->lpni_hashlist);
1721         ptable->pt_zombies--;
1722         spin_unlock(&ptable->pt_zombie_lock);
1723
1724         if (lpni->lpni_pref_nnids > 1) {
1725                 LIBCFS_FREE(lpni->lpni_pref.nids,
1726                         sizeof(*lpni->lpni_pref.nids) * lpni->lpni_pref_nnids);
1727         }
1728         LIBCFS_FREE(lpni, sizeof(*lpni));
1729
1730         lnet_peer_net_decref_locked(lpn);
1731 }
1732
1733 struct lnet_peer_ni *
1734 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1735 {
1736         struct lnet_peer_ni *lpni = NULL;
1737         int rc;
1738
1739         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1740                 return ERR_PTR(-ESHUTDOWN);
1741
1742         /*
1743          * find if a peer_ni already exists.
1744          * If so then just return that.
1745          */
1746         lpni = lnet_find_peer_ni_locked(nid);
1747         if (lpni)
1748                 return lpni;
1749
1750         lnet_net_unlock(cpt);
1751
1752         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1753         if (rc) {
1754                 lpni = ERR_PTR(rc);
1755                 goto out_net_relock;
1756         }
1757
1758         lpni = lnet_find_peer_ni_locked(nid);
1759         LASSERT(lpni);
1760
1761 out_net_relock:
1762         lnet_net_lock(cpt);
1763
1764         return lpni;
1765 }
1766
1767 /*
1768  * Get a peer_ni for the given nid, create it if necessary. Takes a
1769  * hold on the peer_ni.
1770  */
1771 struct lnet_peer_ni *
1772 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1773 {
1774         struct lnet_peer_ni *lpni = NULL;
1775         int rc;
1776
1777         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1778                 return ERR_PTR(-ESHUTDOWN);
1779
1780         /*
1781          * find if a peer_ni already exists.
1782          * If so then just return that.
1783          */
1784         lpni = lnet_find_peer_ni_locked(nid);
1785         if (lpni)
1786                 return lpni;
1787
1788         /*
1789          * Slow path:
1790          * use the lnet_api_mutex to serialize the creation of the peer_ni
1791          * and the creation/deletion of the local ni/net. When a local ni is
1792          * created, if there exists a set of peer_nis on that network,
1793          * they need to be traversed and updated. When a local NI is
1794          * deleted, which could result in a network being deleted, then
1795          * all peer nis on that network need to be removed as well.
1796          *
1797          * Creation through traffic should also be serialized with
1798          * creation through DLC.
1799          */
1800         lnet_net_unlock(cpt);
1801         mutex_lock(&the_lnet.ln_api_mutex);
1802         /*
1803          * Shutdown is only set under the ln_api_lock, so a single
1804          * check here is sufficent.
1805          */
1806         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1807                 lpni = ERR_PTR(-ESHUTDOWN);
1808                 goto out_mutex_unlock;
1809         }
1810
1811         rc = lnet_peer_ni_traffic_add(nid, pref);
1812         if (rc) {
1813                 lpni = ERR_PTR(rc);
1814                 goto out_mutex_unlock;
1815         }
1816
1817         lpni = lnet_find_peer_ni_locked(nid);
1818         LASSERT(lpni);
1819
1820 out_mutex_unlock:
1821         mutex_unlock(&the_lnet.ln_api_mutex);
1822         lnet_net_lock(cpt);
1823
1824         /* Lock has been dropped, check again for shutdown. */
1825         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1826                 if (!IS_ERR(lpni))
1827                         lnet_peer_ni_decref_locked(lpni);
1828                 lpni = ERR_PTR(-ESHUTDOWN);
1829         }
1830
1831         return lpni;
1832 }
1833
1834 bool
1835 lnet_peer_gw_discovery(struct lnet_peer *lp)
1836 {
1837         bool rc = false;
1838
1839         spin_lock(&lp->lp_lock);
1840         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
1841                 rc = true;
1842         spin_unlock(&lp->lp_lock);
1843
1844         return rc;
1845 }
1846
1847 /*
1848  * Is a peer uptodate from the point of view of discovery?
1849  *
1850  * If it is currently being processed, obviously not.
1851  * A forced Ping or Push is also handled by the discovery thread.
1852  *
1853  * Otherwise look at whether the peer needs rediscovering.
1854  */
1855 bool
1856 lnet_peer_is_uptodate(struct lnet_peer *lp)
1857 {
1858         bool rc;
1859
1860         spin_lock(&lp->lp_lock);
1861         if (lp->lp_state & (LNET_PEER_DISCOVERING |
1862                             LNET_PEER_FORCE_PING |
1863                             LNET_PEER_FORCE_PUSH)) {
1864                 rc = false;
1865         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
1866                 rc = false;
1867         } else if (lnet_peer_needs_push(lp)) {
1868                 rc = false;
1869         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
1870                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
1871                         rc = true;
1872                 else
1873                         rc = false;
1874         } else {
1875                 rc = false;
1876         }
1877         spin_unlock(&lp->lp_lock);
1878
1879         return rc;
1880 }
1881
1882 /*
1883  * Queue a peer for the attention of the discovery thread.  Call with
1884  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
1885  * -EALREADY if the peer was already queued.
1886  */
1887 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
1888 {
1889         int rc;
1890
1891         spin_lock(&lp->lp_lock);
1892         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
1893                 lp->lp_state |= LNET_PEER_DISCOVERING;
1894         spin_unlock(&lp->lp_lock);
1895         if (list_empty(&lp->lp_dc_list)) {
1896                 lnet_peer_addref_locked(lp);
1897                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
1898                 wake_up(&the_lnet.ln_dc_waitq);
1899                 rc = 0;
1900         } else {
1901                 rc = -EALREADY;
1902         }
1903
1904         CDEBUG(D_NET, "Queue peer %s: %d\n",
1905                libcfs_nid2str(lp->lp_primary_nid), rc);
1906
1907         return rc;
1908 }
1909
1910 /*
1911  * Discovery of a peer is complete. Wake all waiters on the peer.
1912  * Call with lnet_net_lock/EX held.
1913  */
1914 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
1915 {
1916         struct lnet_msg *msg, *tmp;
1917         int rc = 0;
1918         struct list_head pending_msgs;
1919
1920         INIT_LIST_HEAD(&pending_msgs);
1921
1922         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
1923                libcfs_nid2str(lp->lp_primary_nid));
1924
1925         list_del_init(&lp->lp_dc_list);
1926         spin_lock(&lp->lp_lock);
1927         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
1928         spin_unlock(&lp->lp_lock);
1929         wake_up_all(&lp->lp_dc_waitq);
1930
1931         if (lp->lp_rtr_refcount > 0)
1932                 lnet_router_discovery_complete(lp);
1933
1934         lnet_net_unlock(LNET_LOCK_EX);
1935
1936         /* iterate through all pending messages and send them again */
1937         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
1938                 list_del_init(&msg->msg_list);
1939                 if (lp->lp_dc_error) {
1940                         lnet_finalize(msg, lp->lp_dc_error);
1941                         continue;
1942                 }
1943
1944                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
1945                        lnet_msgtyp2str(msg->msg_type),
1946                        libcfs_id2str(msg->msg_target));
1947                 rc = lnet_send(msg->msg_src_nid_param, msg,
1948                                msg->msg_rtr_nid_param);
1949                 if (rc < 0) {
1950                         CNETERR("Error sending %s to %s: %d\n",
1951                                lnet_msgtyp2str(msg->msg_type),
1952                                libcfs_id2str(msg->msg_target), rc);
1953                         lnet_finalize(msg, rc);
1954                 }
1955         }
1956         lnet_net_lock(LNET_LOCK_EX);
1957         lnet_peer_decref_locked(lp);
1958 }
1959
1960 /*
1961  * Handle inbound push.
1962  * Like any event handler, called with lnet_res_lock/CPT held.
1963  */
1964 void lnet_peer_push_event(struct lnet_event *ev)
1965 {
1966         struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1967         struct lnet_peer *lp;
1968
1969         /* lnet_find_peer() adds a refcount */
1970         lp = lnet_find_peer(ev->source.nid);
1971         if (!lp) {
1972                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
1973                        libcfs_nid2str(ev->initiator.nid),
1974                        libcfs_nid2str(ev->source.nid));
1975                 return;
1976         }
1977
1978         /* Ensure peer state remains consistent while we modify it. */
1979         spin_lock(&lp->lp_lock);
1980
1981         /*
1982          * If some kind of error happened the contents of the message
1983          * cannot be used. Clear the NIDS_UPTODATE and set the
1984          * FORCE_PING flag to trigger a ping.
1985          */
1986         if (ev->status) {
1987                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1988                 lp->lp_state |= LNET_PEER_FORCE_PING;
1989                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
1990                        ev->status,
1991                        libcfs_nid2str(lp->lp_primary_nid),
1992                        libcfs_nid2str(ev->source.nid));
1993                 goto out;
1994         }
1995
1996         /*
1997          * A push with invalid or corrupted info. Clear the UPTODATE
1998          * flag to trigger a ping.
1999          */
2000         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2001                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2002                 lp->lp_state |= LNET_PEER_FORCE_PING;
2003                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2004                        libcfs_nid2str(lp->lp_primary_nid));
2005                 goto out;
2006         }
2007
2008         /*
2009          * Make sure we'll allocate the correct size ping buffer when
2010          * pinging the peer.
2011          */
2012         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2013                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2014
2015         /*
2016          * A non-Multi-Rail peer is not supposed to be capable of
2017          * sending a push.
2018          */
2019         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2020                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2021                        libcfs_nid2str(lp->lp_primary_nid));
2022                 goto out;
2023         }
2024
2025         /*
2026          * Check the MULTIRAIL flag. Complain if the peer was DLC
2027          * configured without it.
2028          */
2029         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
2030                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2031                         CERROR("Push says %s is Multi-Rail, DLC says not\n",
2032                                libcfs_nid2str(lp->lp_primary_nid));
2033                 } else {
2034                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2035                         lnet_peer_clr_non_mr_pref_nids(lp);
2036                 }
2037         }
2038
2039         /*
2040          * The peer may have discovery disabled at its end. Set
2041          * NO_DISCOVERY as appropriate.
2042          */
2043         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2044                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2045                        libcfs_nid2str(lp->lp_primary_nid));
2046                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2047         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2048                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2049                        libcfs_nid2str(lp->lp_primary_nid));
2050                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2051         }
2052
2053         /*
2054          * Check for truncation of the Put message. Clear the
2055          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2056          * and tell discovery to allocate a bigger buffer.
2057          */
2058         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2059                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2060                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2061                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2062                 lp->lp_state |= LNET_PEER_FORCE_PING;
2063                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
2064                        libcfs_nid2str(lp->lp_primary_nid),
2065                        pbuf->pb_info.pi_nnis);
2066                 goto out;
2067         }
2068
2069         /* always assume new data */
2070         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2071         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2072
2073         /*
2074          * If there is data present that hasn't been processed yet,
2075          * we'll replace it if the Put contained newer data and it
2076          * fits. We're racing with a Ping or earlier Push in this
2077          * case.
2078          */
2079         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2080                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2081                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2082                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
2083                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2084                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2085                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2086                               libcfs_nid2str(lp->lp_primary_nid),
2087                               LNET_PING_BUFFER_SEQNO(pbuf),
2088                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2089                 }
2090                 goto out;
2091         }
2092
2093         /*
2094          * Allocate a buffer to copy the data. On a failure we drop
2095          * the Push and set FORCE_PING to force the discovery
2096          * thread to fix the problem by pinging the peer.
2097          */
2098         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2099         if (!lp->lp_data) {
2100                 lp->lp_state |= LNET_PEER_FORCE_PING;
2101                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2102                        libcfs_nid2str(lp->lp_primary_nid),
2103                        LNET_PING_BUFFER_SEQNO(pbuf));
2104                 goto out;
2105         }
2106
2107         /* Success */
2108         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2109                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2110         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2111         CDEBUG(D_NET, "Received Push %s %u\n",
2112                libcfs_nid2str(lp->lp_primary_nid),
2113                LNET_PING_BUFFER_SEQNO(pbuf));
2114
2115 out:
2116         /*
2117          * Queue the peer for discovery if not done, force it on the request
2118          * queue and wake the discovery thread if the peer was already queued,
2119          * because its status changed.
2120          */
2121         spin_unlock(&lp->lp_lock);
2122         lnet_net_lock(LNET_LOCK_EX);
2123         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2124                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2125                 wake_up(&the_lnet.ln_dc_waitq);
2126         }
2127         /* Drop refcount from lookup */
2128         lnet_peer_decref_locked(lp);
2129         lnet_net_unlock(LNET_LOCK_EX);
2130 }
2131
2132 /*
2133  * Clear the discovery error state, unless we're already discovering
2134  * this peer, in which case the error is current.
2135  */
2136 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2137 {
2138         spin_lock(&lp->lp_lock);
2139         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2140                 lp->lp_dc_error = 0;
2141         spin_unlock(&lp->lp_lock);
2142 }
2143
2144 /*
2145  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2146  * dropped/retaken within this function. An lnet_peer_ni is passed in
2147  * because discovery could tear down an lnet_peer.
2148  */
2149 int
2150 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2151 {
2152         DEFINE_WAIT(wait);
2153         struct lnet_peer *lp;
2154         int rc = 0;
2155         int count = 0;
2156
2157 again:
2158         lnet_net_unlock(cpt);
2159         lnet_net_lock(LNET_LOCK_EX);
2160         lp = lpni->lpni_peer_net->lpn_peer;
2161         lnet_peer_clear_discovery_error(lp);
2162
2163         /*
2164          * We're willing to be interrupted. The lpni can become a
2165          * zombie if we race with DLC, so we must check for that.
2166          */
2167         for (;;) {
2168                 /* Keep lp alive when the lnet_net_lock is unlocked */
2169                 lnet_peer_addref_locked(lp);
2170                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2171                 if (signal_pending(current))
2172                         break;
2173                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2174                         break;
2175                 /*
2176                  * Don't repeat discovery if discovery is disabled. This is
2177                  * done to ensure we can use discovery as a standard ping as
2178                  * well for backwards compatibility with routers which do not
2179                  * have discovery or have discovery disabled
2180                  */
2181                 if (lnet_is_discovery_disabled(lp) && count > 0)
2182                         break;
2183                 if (lp->lp_dc_error)
2184                         break;
2185                 if (lnet_peer_is_uptodate(lp))
2186                         break;
2187                 lnet_peer_queue_for_discovery(lp);
2188                 count++;
2189                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2190
2191                 /*
2192                  * If caller requested a non-blocking operation then
2193                  * return immediately. Once discovery is complete any
2194                  * pending messages that were stopped due to discovery
2195                  * will be transmitted.
2196                  */
2197                 if (!block)
2198                         break;
2199
2200                 lnet_net_unlock(LNET_LOCK_EX);
2201                 schedule();
2202                 finish_wait(&lp->lp_dc_waitq, &wait);
2203                 lnet_net_lock(LNET_LOCK_EX);
2204                 lnet_peer_decref_locked(lp);
2205                 /* Peer may have changed */
2206                 lp = lpni->lpni_peer_net->lpn_peer;
2207         }
2208         finish_wait(&lp->lp_dc_waitq, &wait);
2209
2210         lnet_net_unlock(LNET_LOCK_EX);
2211         lnet_net_lock(cpt);
2212         lnet_peer_decref_locked(lp);
2213         /*
2214          * The peer may have changed, so re-check and rediscover if that turns
2215          * out to have been the case. The reference count on lp ensured that
2216          * even if it was unlinked from lpni the memory could not be recycled.
2217          * Thus the check below is sufficient to determine whether the peer
2218          * changed. If the peer changed, then lp must not be dereferenced.
2219          */
2220         if (lp != lpni->lpni_peer_net->lpn_peer)
2221                 goto again;
2222
2223         if (signal_pending(current))
2224                 rc = -EINTR;
2225         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2226                 rc = -ESHUTDOWN;
2227         else if (lp->lp_dc_error)
2228                 rc = lp->lp_dc_error;
2229         else if (!block)
2230                 CDEBUG(D_NET, "non-blocking discovery\n");
2231         else if (!lnet_peer_is_uptodate(lp) && !lnet_is_discovery_disabled(lp))
2232                 goto again;
2233
2234         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2235                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2236                libcfs_nid2str(lpni->lpni_nid), rc,
2237                (!block) ? "pending discovery" : "discovery complete");
2238
2239         return rc;
2240 }
2241
2242 /* Handle an incoming ack for a push. */
2243 static void
2244 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2245 {
2246         struct lnet_ping_buffer *pbuf;
2247
2248         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
2249         spin_lock(&lp->lp_lock);
2250         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2251         lp->lp_push_error = ev->status;
2252         if (ev->status)
2253                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2254         else
2255                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2256         spin_unlock(&lp->lp_lock);
2257
2258         CDEBUG(D_NET, "peer %s ev->status %d\n",
2259                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2260 }
2261
2262 /* Handle a Reply message. This is the reply to a Ping message. */
2263 static void
2264 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2265 {
2266         struct lnet_ping_buffer *pbuf;
2267         int rc;
2268
2269         spin_lock(&lp->lp_lock);
2270
2271         /*
2272          * If some kind of error happened the contents of message
2273          * cannot be used. Set PING_FAILED to trigger a retry.
2274          */
2275         if (ev->status) {
2276                 lp->lp_state |= LNET_PEER_PING_FAILED;
2277                 lp->lp_ping_error = ev->status;
2278                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2279                        ev->status,
2280                        libcfs_nid2str(lp->lp_primary_nid),
2281                        libcfs_nid2str(ev->source.nid));
2282                 goto out;
2283         }
2284
2285         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
2286         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2287                 lnet_swap_pinginfo(pbuf);
2288
2289         /*
2290          * A reply with invalid or corrupted info. Set PING_FAILED to
2291          * trigger a retry.
2292          */
2293         rc = lnet_ping_info_validate(&pbuf->pb_info);
2294         if (rc) {
2295                 lp->lp_state |= LNET_PEER_PING_FAILED;
2296                 lp->lp_ping_error = 0;
2297                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2298                        libcfs_nid2str(lp->lp_primary_nid), rc);
2299                 goto out;
2300         }
2301
2302
2303         /*
2304          * Only enable the multi-rail feature on the peer if both sides of
2305          * the connection have discovery on
2306          */
2307         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2308                 CDEBUG(D_NET, "Peer %s has Multi-Rail feature enabled\n",
2309                        libcfs_nid2str(lp->lp_primary_nid));
2310                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2311         } else {
2312                 CDEBUG(D_NET, "Peer %s has Multi-Rail feature disabled\n",
2313                        libcfs_nid2str(lp->lp_primary_nid));
2314                 lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2315         }
2316
2317         /*
2318          * The peer may have discovery disabled at its end. Set
2319          * NO_DISCOVERY as appropriate.
2320          */
2321         if ((pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY) &&
2322             !lnet_peer_discovery_disabled) {
2323                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2324                        libcfs_nid2str(lp->lp_primary_nid));
2325                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2326         } else {
2327                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2328                        libcfs_nid2str(lp->lp_primary_nid));
2329                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2330         }
2331
2332         /*
2333          * Update the MULTI_RAIL flag based on the reply. If the peer
2334          * was configured with DLC then the setting should match what
2335          * DLC put in.
2336          */
2337         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2338                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2339                         /* Everything's fine */
2340                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2341                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2342                               libcfs_nid2str(lp->lp_primary_nid));
2343                 } else {
2344                         /*
2345                          * if discovery is disabled then we don't want to
2346                          * update the state of the peer. All we'll do is
2347                          * update the peer_nis which were reported back in
2348                          * the initial ping
2349                          */
2350
2351                         if (!lnet_is_discovery_disabled_locked(lp)) {
2352                                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2353                                 lnet_peer_clr_non_mr_pref_nids(lp);
2354                         }
2355                 }
2356         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2357                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2358                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2359                               libcfs_nid2str(lp->lp_primary_nid));
2360                 } else {
2361                         CERROR("Multi-Rail state vanished from %s\n",
2362                                libcfs_nid2str(lp->lp_primary_nid));
2363                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2364                 }
2365         }
2366
2367         /*
2368          * Make sure we'll allocate the correct size ping buffer when
2369          * pinging the peer.
2370          */
2371         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2372                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2373
2374         /*
2375          * Check for truncation of the Reply. Clear PING_SENT and set
2376          * PING_FAILED to trigger a retry.
2377          */
2378         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2379                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2380                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2381                 lp->lp_state |= LNET_PEER_PING_FAILED;
2382                 lp->lp_ping_error = 0;
2383                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2384                        libcfs_nid2str(lp->lp_primary_nid),
2385                        pbuf->pb_info.pi_nnis);
2386                 goto out;
2387         }
2388
2389         /*
2390          * Check the sequence numbers in the reply. These are only
2391          * available if the reply came from a Multi-Rail peer.
2392          */
2393         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2394             pbuf->pb_info.pi_nnis > 1 &&
2395             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2396                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2397                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2398                                 libcfs_nid2str(lp->lp_primary_nid),
2399                                 LNET_PING_BUFFER_SEQNO(pbuf),
2400                                 lp->lp_peer_seqno);
2401
2402                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2403         }
2404
2405         /* We're happy with the state of the data in the buffer. */
2406         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2407                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno, lp->lp_state);
2408         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2409                 lnet_ping_buffer_decref(lp->lp_data);
2410         else
2411                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2412         lnet_ping_buffer_addref(pbuf);
2413         lp->lp_data = pbuf;
2414 out:
2415         lp->lp_state &= ~LNET_PEER_PING_SENT;
2416         spin_unlock(&lp->lp_lock);
2417
2418         lnet_net_lock(LNET_LOCK_EX);
2419         /*
2420          * If this peer is a gateway, call the routing callback to
2421          * handle the ping reply
2422          */
2423         if (lp->lp_rtr_refcount > 0)
2424                 lnet_router_discovery_ping_reply(lp);
2425         lnet_net_unlock(LNET_LOCK_EX);
2426 }
2427
2428 /*
2429  * Send event handling. Only matters for error cases, where we clean
2430  * up state on the peer and peer_ni that would otherwise be updated in
2431  * the REPLY event handler for a successful Ping, and the ACK event
2432  * handler for a successful Push.
2433  */
2434 static int
2435 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2436 {
2437         int rc = 0;
2438
2439         if (!ev->status)
2440                 goto out;
2441
2442         spin_lock(&lp->lp_lock);
2443         if (ev->msg_type == LNET_MSG_GET) {
2444                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2445                 lp->lp_state |= LNET_PEER_PING_FAILED;
2446                 lp->lp_ping_error = ev->status;
2447         } else { /* ev->msg_type == LNET_MSG_PUT */
2448                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2449                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2450                 lp->lp_push_error = ev->status;
2451         }
2452         spin_unlock(&lp->lp_lock);
2453         rc = LNET_REDISCOVER_PEER;
2454 out:
2455         CDEBUG(D_NET, "%s Send to %s: %d\n",
2456                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2457                 libcfs_nid2str(ev->target.nid), rc);
2458         return rc;
2459 }
2460
2461 /*
2462  * Unlink event handling. This event is only seen if a call to
2463  * LNetMDUnlink() caused the event to be unlinked. If this call was
2464  * made after the event was set up in LNetGet() or LNetPut() then we
2465  * assume the Ping or Push timed out.
2466  */
2467 static void
2468 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2469 {
2470         spin_lock(&lp->lp_lock);
2471         /* We've passed through LNetGet() */
2472         if (lp->lp_state & LNET_PEER_PING_SENT) {
2473                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2474                 lp->lp_state |= LNET_PEER_PING_FAILED;
2475                 lp->lp_ping_error = -ETIMEDOUT;
2476                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2477                         libcfs_nid2str(lp->lp_primary_nid));
2478         }
2479         /* We've passed through LNetPut() */
2480         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2481                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2482                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2483                 lp->lp_push_error = -ETIMEDOUT;
2484                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2485                         libcfs_nid2str(lp->lp_primary_nid));
2486         }
2487         spin_unlock(&lp->lp_lock);
2488 }
2489
2490 /*
2491  * Event handler for the discovery EQ.
2492  *
2493  * Called with lnet_res_lock(cpt) held. The cpt is the
2494  * lnet_cpt_of_cookie() of the md handle cookie.
2495  */
2496 static void lnet_discovery_event_handler(struct lnet_event *event)
2497 {
2498         struct lnet_peer *lp = event->md.user_ptr;
2499         struct lnet_ping_buffer *pbuf;
2500         int rc;
2501
2502         /* discovery needs to take another look */
2503         rc = LNET_REDISCOVER_PEER;
2504
2505         CDEBUG(D_NET, "Received event: %d\n", event->type);
2506
2507         switch (event->type) {
2508         case LNET_EVENT_ACK:
2509                 lnet_discovery_event_ack(lp, event);
2510                 break;
2511         case LNET_EVENT_REPLY:
2512                 lnet_discovery_event_reply(lp, event);
2513                 break;
2514         case LNET_EVENT_SEND:
2515                 /* Only send failure triggers a retry. */
2516                 rc = lnet_discovery_event_send(lp, event);
2517                 break;
2518         case LNET_EVENT_UNLINK:
2519                 /* LNetMDUnlink() was called */
2520                 lnet_discovery_event_unlink(lp, event);
2521                 break;
2522         default:
2523                 /* Invalid events. */
2524                 LBUG();
2525         }
2526         lnet_net_lock(LNET_LOCK_EX);
2527         if (event->unlinked) {
2528                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
2529                 lnet_ping_buffer_decref(pbuf);
2530                 lnet_peer_decref_locked(lp);
2531         }
2532
2533         /* put peer back at end of request queue, if discovery not already
2534          * done */
2535         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp)) {
2536                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2537                 wake_up(&the_lnet.ln_dc_waitq);
2538         }
2539         lnet_net_unlock(LNET_LOCK_EX);
2540 }
2541
2542 /*
2543  * Build a peer from incoming data.
2544  *
2545  * The NIDs in the incoming data are supposed to be structured as follows:
2546  *  - loopback
2547  *  - primary NID
2548  *  - other NIDs in same net
2549  *  - NIDs in second net
2550  *  - NIDs in third net
2551  *  - ...
2552  * This due to the way the list of NIDs in the data is created.
2553  *
2554  * Note that this function will mark the peer uptodate unless an
2555  * ENOMEM is encontered. All other errors are due to a conflict
2556  * between the DLC configuration and what discovery sees. We treat DLC
2557  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2558  * peer from becoming stuck in discovery.
2559  */
2560 static int lnet_peer_merge_data(struct lnet_peer *lp,
2561                                 struct lnet_ping_buffer *pbuf)
2562 {
2563         struct lnet_peer_ni *lpni;
2564         lnet_nid_t *curnis = NULL;
2565         struct lnet_ni_status *addnis = NULL;
2566         lnet_nid_t *delnis = NULL;
2567         unsigned flags;
2568         int ncurnis;
2569         int naddnis;
2570         int ndelnis;
2571         int nnis = 0;
2572         int i;
2573         int j;
2574         int rc;
2575
2576         flags = LNET_PEER_DISCOVERED;
2577         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2578                 flags |= LNET_PEER_MULTI_RAIL;
2579
2580         /*
2581          * Cache the routing feature for the peer; whether it is enabled
2582          * for disabled as reported by the remote peer.
2583          */
2584         spin_lock(&lp->lp_lock);
2585         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
2586                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
2587         else
2588                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
2589         spin_unlock(&lp->lp_lock);
2590
2591         nnis = MAX(lp->lp_nnis, pbuf->pb_info.pi_nnis);
2592         LIBCFS_ALLOC(curnis, nnis * sizeof(*curnis));
2593         LIBCFS_ALLOC(addnis, nnis * sizeof(*addnis));
2594         LIBCFS_ALLOC(delnis, nnis * sizeof(*delnis));
2595         if (!curnis || !addnis || !delnis) {
2596                 rc = -ENOMEM;
2597                 goto out;
2598         }
2599         ncurnis = 0;
2600         naddnis = 0;
2601         ndelnis = 0;
2602
2603         /* Construct the list of NIDs present in peer. */
2604         lpni = NULL;
2605         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2606                 curnis[ncurnis++] = lpni->lpni_nid;
2607
2608         /*
2609          * Check for NIDs in pbuf not present in curnis[].
2610          * The loop starts at 1 to skip the loopback NID.
2611          */
2612         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2613                 for (j = 0; j < ncurnis; j++)
2614                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2615                                 break;
2616                 if (j == ncurnis)
2617                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i];
2618         }
2619         /*
2620          * Check for NIDs in curnis[] not present in pbuf.
2621          * The nested loop starts at 1 to skip the loopback NID.
2622          *
2623          * But never add the loopback NID to delnis[]: if it is
2624          * present in curnis[] then this peer is for this node.
2625          */
2626         for (i = 0; i < ncurnis; i++) {
2627                 if (LNET_NETTYP(LNET_NIDNET(curnis[i])) == LOLND)
2628                         continue;
2629                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++) {
2630                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid) {
2631                                 /*
2632                                  * update the information we cache for the
2633                                  * peer with the latest information we
2634                                  * received
2635                                  */
2636                                 lpni = lnet_find_peer_ni_locked(curnis[i]);
2637                                 if (lpni) {
2638                                         lpni->lpni_ns_status = pbuf->pb_info.pi_ni[j].ns_status;
2639                                         lnet_peer_ni_decref_locked(lpni);
2640                                 }
2641                                 break;
2642                         }
2643                 }
2644                 if (j == pbuf->pb_info.pi_nnis)
2645                         delnis[ndelnis++] = curnis[i];
2646         }
2647
2648         /*
2649          * If we get here and the discovery is disabled then we don't want
2650          * to add or delete any NIs. We just updated the ones we have some
2651          * information on, and call it a day
2652          */
2653         rc = 0;
2654         if (lnet_is_discovery_disabled(lp))
2655                 goto out;
2656
2657         for (i = 0; i < naddnis; i++) {
2658                 rc = lnet_peer_add_nid(lp, addnis[i].ns_nid, flags);
2659                 if (rc) {
2660                         CERROR("Error adding NID %s to peer %s: %d\n",
2661                                libcfs_nid2str(addnis[i].ns_nid),
2662                                libcfs_nid2str(lp->lp_primary_nid), rc);
2663                         if (rc == -ENOMEM)
2664                                 goto out;
2665                 }
2666                 lpni = lnet_find_peer_ni_locked(addnis[i].ns_nid);
2667                 if (lpni) {
2668                         lpni->lpni_ns_status = addnis[i].ns_status;
2669                         lnet_peer_ni_decref_locked(lpni);
2670                 }
2671         }
2672
2673         for (i = 0; i < ndelnis; i++) {
2674                 /*
2675                  * for routers it's okay to delete the primary_nid because
2676                  * the upper layers don't really rely on it. So if we're
2677                  * being told that the router changed its primary_nid
2678                  * then it's okay to delete it.
2679                  */
2680                 if (lp->lp_rtr_refcount > 0)
2681                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
2682                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2683                 if (rc) {
2684                         CERROR("Error deleting NID %s from peer %s: %d\n",
2685                                libcfs_nid2str(delnis[i]),
2686                                libcfs_nid2str(lp->lp_primary_nid), rc);
2687                         if (rc == -ENOMEM)
2688                                 goto out;
2689                 }
2690         }
2691         /*
2692          * Errors other than -ENOMEM are due to peers having been
2693          * configured with DLC. Ignore these because DLC overrides
2694          * Discovery.
2695          */
2696         rc = 0;
2697 out:
2698         LIBCFS_FREE(curnis, nnis * sizeof(*curnis));
2699         LIBCFS_FREE(addnis, nnis * sizeof(*addnis));
2700         LIBCFS_FREE(delnis, nnis * sizeof(*delnis));
2701         lnet_ping_buffer_decref(pbuf);
2702         CDEBUG(D_NET, "peer %s (%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
2703
2704         if (rc) {
2705                 spin_lock(&lp->lp_lock);
2706                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2707                 lp->lp_state |= LNET_PEER_FORCE_PING;
2708                 spin_unlock(&lp->lp_lock);
2709         }
2710         return rc;
2711 }
2712
2713 /*
2714  * The data in pbuf says lp is its primary peer, but the data was
2715  * received by a different peer. Try to update lp with the data.
2716  */
2717 static int
2718 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2719 {
2720         struct lnet_handle_md mdh;
2721
2722         /* Queue lp for discovery, and force it on the request queue. */
2723         lnet_net_lock(LNET_LOCK_EX);
2724         if (lnet_peer_queue_for_discovery(lp))
2725                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2726         lnet_net_unlock(LNET_LOCK_EX);
2727
2728         LNetInvalidateMDHandle(&mdh);
2729
2730         /*
2731          * Decide whether we can move the peer to the DATA_PRESENT state.
2732          *
2733          * We replace stale data for a multi-rail peer, repair PING_FAILED
2734          * status, and preempt FORCE_PING.
2735          *
2736          * If after that we have DATA_PRESENT, we merge it into this peer.
2737          */
2738         spin_lock(&lp->lp_lock);
2739         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2740                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2741                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2742                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2743                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2744                         lnet_ping_buffer_decref(pbuf);
2745                         pbuf = lp->lp_data;
2746                         lp->lp_data = NULL;
2747                 }
2748         }
2749         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2750                 lnet_ping_buffer_decref(lp->lp_data);
2751                 lp->lp_data = NULL;
2752                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2753         }
2754         if (lp->lp_state & LNET_PEER_PING_FAILED) {
2755                 mdh = lp->lp_ping_mdh;
2756                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2757                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2758                 lp->lp_ping_error = 0;
2759         }
2760         if (lp->lp_state & LNET_PEER_FORCE_PING)
2761                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2762         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2763         spin_unlock(&lp->lp_lock);
2764
2765         if (!LNetMDHandleIsInvalid(mdh))
2766                 LNetMDUnlink(mdh);
2767
2768         if (pbuf)
2769                 return lnet_peer_merge_data(lp, pbuf);
2770
2771         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2772         return 0;
2773 }
2774
2775 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
2776 {
2777         int i;
2778
2779         for (i = 0; i < pinfo->pi_nnis; i++) {
2780                 if (pinfo->pi_ni[i].ns_nid == nid)
2781                         return true;
2782         }
2783
2784         return false;
2785 }
2786
2787 /*
2788  * Update a peer using the data received.
2789  */
2790 static int lnet_peer_data_present(struct lnet_peer *lp)
2791 __must_hold(&lp->lp_lock)
2792 {
2793         struct lnet_ping_buffer *pbuf;
2794         struct lnet_peer_ni *lpni;
2795         lnet_nid_t nid = LNET_NID_ANY;
2796         unsigned flags;
2797         int rc = 0;
2798
2799         pbuf = lp->lp_data;
2800         lp->lp_data = NULL;
2801         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2802         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2803         spin_unlock(&lp->lp_lock);
2804
2805         /*
2806          * Modifications of peer structures are done while holding the
2807          * ln_api_mutex. A global lock is required because we may be
2808          * modifying multiple peer structures, and a mutex greatly
2809          * simplifies memory management.
2810          *
2811          * The actual changes to the data structures must also protect
2812          * against concurrent lookups, for which the lnet_net_lock in
2813          * LNET_LOCK_EX mode is used.
2814          */
2815         mutex_lock(&the_lnet.ln_api_mutex);
2816         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2817                 rc = -ESHUTDOWN;
2818                 goto out;
2819         }
2820
2821         /*
2822          * If this peer is not on the peer list then it is being torn
2823          * down, and our reference count may be all that is keeping it
2824          * alive. Don't do any work on it.
2825          */
2826         if (list_empty(&lp->lp_peer_list))
2827                 goto out;
2828
2829         flags = LNET_PEER_DISCOVERED;
2830         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2831                 flags |= LNET_PEER_MULTI_RAIL;
2832
2833         /*
2834          * Check whether the primary NID in the message matches the
2835          * primary NID of the peer. If it does, update the peer, if
2836          * it it does not, check whether there is already a peer with
2837          * that primary NID. If no such peer exists, try to update
2838          * the primary NID of the current peer (allowed if it was
2839          * created due to message traffic) and complete the update.
2840          * If the peer did exist, hand off the data to it.
2841          *
2842          * The peer for the loopback interface is a special case: this
2843          * is the peer for the local node, and we want to set its
2844          * primary NID to the correct value here. Moreover, this peer
2845          * can show up with only the loopback NID in the ping buffer.
2846          */
2847         if (pbuf->pb_info.pi_nnis <= 1)
2848                 goto out;
2849         nid = pbuf->pb_info.pi_ni[1].ns_nid;
2850         if (LNET_NETTYP(LNET_NIDNET(lp->lp_primary_nid)) == LOLND) {
2851                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2852                 if (!rc)
2853                         rc = lnet_peer_merge_data(lp, pbuf);
2854         /*
2855          * if the primary nid of the peer is present in the ping info returned
2856          * from the peer, but it's not the local primary peer we have
2857          * cached and discovery is disabled, then we don't want to update
2858          * our local peer info, by adding or removing NIDs, we just want
2859          * to update the status of the nids that we currently have
2860          * recorded in that peer.
2861          */
2862         } else if (lp->lp_primary_nid == nid ||
2863                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
2864                     lnet_is_discovery_disabled(lp))) {
2865                 rc = lnet_peer_merge_data(lp, pbuf);
2866         } else {
2867                 lpni = lnet_find_peer_ni_locked(nid);
2868                 if (!lpni) {
2869                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
2870                         if (rc) {
2871                                 CERROR("Primary NID error %s versus %s: %d\n",
2872                                        libcfs_nid2str(lp->lp_primary_nid),
2873                                        libcfs_nid2str(nid), rc);
2874                         } else {
2875                                 rc = lnet_peer_merge_data(lp, pbuf);
2876                         }
2877                 } else {
2878                         struct lnet_peer *new_lp;
2879                         new_lp = lpni->lpni_peer_net->lpn_peer;
2880                         /*
2881                          * if lp has discovery/MR enabled that means new_lp
2882                          * should have discovery/MR enabled as well, since
2883                          * it's the same peer, which we're about to merge
2884                          */
2885                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
2886                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2887                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2888                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
2889
2890                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
2891                         lnet_consolidate_routes_locked(lp, new_lp);
2892                         lnet_peer_ni_decref_locked(lpni);
2893                 }
2894         }
2895 out:
2896         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc,
2897                lp->lp_state);
2898         mutex_unlock(&the_lnet.ln_api_mutex);
2899
2900         spin_lock(&lp->lp_lock);
2901         /* Tell discovery to re-check the peer immediately. */
2902         if (!rc)
2903                 rc = LNET_REDISCOVER_PEER;
2904         return rc;
2905 }
2906
2907 /*
2908  * A ping failed. Clear the PING_FAILED state and set the
2909  * FORCE_PING state, to ensure a retry even if discovery is
2910  * disabled. This avoids being left with incorrect state.
2911  */
2912 static int lnet_peer_ping_failed(struct lnet_peer *lp)
2913 __must_hold(&lp->lp_lock)
2914 {
2915         struct lnet_handle_md mdh;
2916         int rc;
2917
2918         mdh = lp->lp_ping_mdh;
2919         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2920         lp->lp_state &= ~LNET_PEER_PING_FAILED;
2921         lp->lp_state |= LNET_PEER_FORCE_PING;
2922         rc = lp->lp_ping_error;
2923         lp->lp_ping_error = 0;
2924         spin_unlock(&lp->lp_lock);
2925
2926         if (!LNetMDHandleIsInvalid(mdh))
2927                 LNetMDUnlink(mdh);
2928
2929         CDEBUG(D_NET, "peer %s:%d\n",
2930                libcfs_nid2str(lp->lp_primary_nid), rc);
2931
2932         spin_lock(&lp->lp_lock);
2933         return rc ? rc : LNET_REDISCOVER_PEER;
2934 }
2935
2936 /*
2937  * Select NID to send a Ping or Push to.
2938  */
2939 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
2940 {
2941         struct lnet_peer_ni *lpni;
2942
2943         /* Look for a direct-connected NID for this peer. */
2944         lpni = NULL;
2945         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2946                 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
2947                         continue;
2948                 break;
2949         }
2950         if (lpni)
2951                 return lpni->lpni_nid;
2952
2953         /* Look for a routed-connected NID for this peer. */
2954         lpni = NULL;
2955         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2956                 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
2957                         continue;
2958                 break;
2959         }
2960         if (lpni)
2961                 return lpni->lpni_nid;
2962
2963         return LNET_NID_ANY;
2964 }
2965
2966 /* Active side of ping. */
2967 static int lnet_peer_send_ping(struct lnet_peer *lp)
2968 __must_hold(&lp->lp_lock)
2969 {
2970         lnet_nid_t pnid;
2971         int nnis;
2972         int rc;
2973         int cpt;
2974
2975         lp->lp_state |= LNET_PEER_PING_SENT;
2976         lp->lp_state &= ~LNET_PEER_FORCE_PING;
2977         spin_unlock(&lp->lp_lock);
2978
2979         cpt = lnet_net_lock_current();
2980         /* Refcount for MD. */
2981         lnet_peer_addref_locked(lp);
2982         pnid = lnet_peer_select_nid(lp);
2983         lnet_net_unlock(cpt);
2984
2985         nnis = MAX(lp->lp_data_nnis, LNET_INTERFACES_MIN);
2986
2987         rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
2988                             the_lnet.ln_dc_eqh, false);
2989
2990         /*
2991          * if LNetMDBind in lnet_send_ping fails we need to decrement the
2992          * refcount on the peer, otherwise LNetMDUnlink will be called
2993          * which will eventually do that.
2994          */
2995         if (rc > 0) {
2996                 lnet_net_lock(cpt);
2997                 lnet_peer_decref_locked(lp);
2998                 lnet_net_unlock(cpt);
2999                 rc = -rc; /* change the rc to negative value */
3000                 goto fail_error;
3001         } else if (rc < 0) {
3002                 goto fail_error;
3003         }
3004
3005         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3006
3007         spin_lock(&lp->lp_lock);
3008         return 0;
3009
3010 fail_error:
3011         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
3012         /*
3013          * The errors that get us here are considered hard errors and
3014          * cause Discovery to terminate. So we clear PING_SENT, but do
3015          * not set either PING_FAILED or FORCE_PING. In fact we need
3016          * to clear PING_FAILED, because the unlink event handler will
3017          * have set it if we called LNetMDUnlink() above.
3018          */
3019         spin_lock(&lp->lp_lock);
3020         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3021         return rc;
3022 }
3023
3024 /*
3025  * This function exists because you cannot call LNetMDUnlink() from an
3026  * event handler.
3027  */
3028 static int lnet_peer_push_failed(struct lnet_peer *lp)
3029 __must_hold(&lp->lp_lock)
3030 {
3031         struct lnet_handle_md mdh;
3032         int rc;
3033
3034         mdh = lp->lp_push_mdh;
3035         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3036         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3037         rc = lp->lp_push_error;
3038         lp->lp_push_error = 0;
3039         spin_unlock(&lp->lp_lock);
3040
3041         if (!LNetMDHandleIsInvalid(mdh))
3042                 LNetMDUnlink(mdh);
3043
3044         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3045         spin_lock(&lp->lp_lock);
3046         return rc ? rc : LNET_REDISCOVER_PEER;
3047 }
3048
3049 /* Active side of push. */
3050 static int lnet_peer_send_push(struct lnet_peer *lp)
3051 __must_hold(&lp->lp_lock)
3052 {
3053         struct lnet_ping_buffer *pbuf;
3054         struct lnet_process_id id;
3055         struct lnet_md md;
3056         int cpt;
3057         int rc;
3058
3059         /* Don't push to a non-multi-rail peer. */
3060         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3061                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3062                 return 0;
3063         }
3064
3065         lp->lp_state |= LNET_PEER_PUSH_SENT;
3066         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3067         spin_unlock(&lp->lp_lock);
3068
3069         cpt = lnet_net_lock_current();
3070         pbuf = the_lnet.ln_ping_target;
3071         lnet_ping_buffer_addref(pbuf);
3072         lnet_net_unlock(cpt);
3073
3074         /* Push source MD */
3075         md.start     = &pbuf->pb_info;
3076         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
3077         md.threshold = 2; /* Put/Ack */
3078         md.max_size  = 0;
3079         md.options   = 0;
3080         md.eq_handle = the_lnet.ln_dc_eqh;
3081         md.user_ptr  = lp;
3082
3083         rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh);
3084         if (rc) {
3085                 lnet_ping_buffer_decref(pbuf);
3086                 CERROR("Can't bind push source MD: %d\n", rc);
3087                 goto fail_error;
3088         }
3089         cpt = lnet_net_lock_current();
3090         /* Refcount for MD. */
3091         lnet_peer_addref_locked(lp);
3092         id.pid = LNET_PID_LUSTRE;
3093         id.nid = lnet_peer_select_nid(lp);
3094         lnet_net_unlock(cpt);
3095
3096         if (id.nid == LNET_NID_ANY) {
3097                 rc = -EHOSTUNREACH;
3098                 goto fail_unlink;
3099         }
3100
3101         rc = LNetPut(LNET_NID_ANY, lp->lp_push_mdh,
3102                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
3103                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3104
3105         if (rc)
3106                 goto fail_unlink;
3107
3108         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3109
3110         spin_lock(&lp->lp_lock);
3111         return 0;
3112
3113 fail_unlink:
3114         LNetMDUnlink(lp->lp_push_mdh);
3115         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3116 fail_error:
3117         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nid2str(lp->lp_primary_nid), lp, rc);
3118         /*
3119          * The errors that get us here are considered hard errors and
3120          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3121          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3122          * because the unlink event handler will have set it if we
3123          * called LNetMDUnlink() above.
3124          */
3125         spin_lock(&lp->lp_lock);
3126         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3127         return rc;
3128 }
3129
3130 /*
3131  * An unrecoverable error was encountered during discovery.
3132  * Set error status in peer and abort discovery.
3133  */
3134 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
3135 {
3136         CDEBUG(D_NET, "Discovery error %s: %d\n",
3137                libcfs_nid2str(lp->lp_primary_nid), error);
3138
3139         spin_lock(&lp->lp_lock);
3140         lp->lp_dc_error = error;
3141         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3142         lp->lp_state |= LNET_PEER_REDISCOVER;
3143         spin_unlock(&lp->lp_lock);
3144 }
3145
3146 /*
3147  * Mark the peer as discovered.
3148  */
3149 static int lnet_peer_discovered(struct lnet_peer *lp)
3150 __must_hold(&lp->lp_lock)
3151 {
3152         lp->lp_state |= LNET_PEER_DISCOVERED;
3153         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3154                           LNET_PEER_REDISCOVER);
3155
3156         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3157
3158         return 0;
3159 }
3160
3161
3162 /*
3163  * Discovering this peer is taking too long. Cancel any Ping or Push
3164  * that discovery is waiting on by unlinking the relevant MDs. The
3165  * lnet_discovery_event_handler() will proceed from here and complete
3166  * the cleanup.
3167  */
3168 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3169 {
3170         struct lnet_handle_md ping_mdh;
3171         struct lnet_handle_md push_mdh;
3172
3173         LNetInvalidateMDHandle(&ping_mdh);
3174         LNetInvalidateMDHandle(&push_mdh);
3175
3176         spin_lock(&lp->lp_lock);
3177         if (lp->lp_state & LNET_PEER_PING_SENT) {
3178                 ping_mdh = lp->lp_ping_mdh;
3179                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3180         }
3181         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3182                 push_mdh = lp->lp_push_mdh;
3183                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3184         }
3185         spin_unlock(&lp->lp_lock);
3186
3187         if (!LNetMDHandleIsInvalid(ping_mdh))
3188                 LNetMDUnlink(ping_mdh);
3189         if (!LNetMDHandleIsInvalid(push_mdh))
3190                 LNetMDUnlink(push_mdh);
3191 }
3192
3193 /*
3194  * Wait for work to be queued or some other change that must be
3195  * attended to. Returns non-zero if the discovery thread should shut
3196  * down.
3197  */
3198 static int lnet_peer_discovery_wait_for_work(void)
3199 {
3200         int cpt;
3201         int rc = 0;
3202
3203         DEFINE_WAIT(wait);
3204
3205         cpt = lnet_net_lock_current();
3206         for (;;) {
3207                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3208                                 TASK_INTERRUPTIBLE);
3209                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3210                         break;
3211                 if (lnet_push_target_resize_needed())
3212                         break;
3213                 if (!list_empty(&the_lnet.ln_dc_request))
3214                         break;
3215                 if (!list_empty(&the_lnet.ln_msg_resend))
3216                         break;
3217                 lnet_net_unlock(cpt);
3218
3219                 /*
3220                  * wakeup max every second to check if there are peers that
3221                  * have been stuck on the working queue for greater than
3222                  * the peer timeout.
3223                  */
3224                 schedule_timeout(cfs_time_seconds(1));
3225                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3226                 cpt = lnet_net_lock_current();
3227         }
3228         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3229
3230         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3231                 rc = -ESHUTDOWN;
3232
3233         lnet_net_unlock(cpt);
3234
3235         CDEBUG(D_NET, "woken: %d\n", rc);
3236
3237         return rc;
3238 }
3239
3240 /*
3241  * Messages that were pending on a destroyed peer will be put on a global
3242  * resend list. The message resend list will be checked by
3243  * the discovery thread when it wakes up, and will resend messages. These
3244  * messages can still be sendable in the case the lpni which was the initial
3245  * cause of the message re-queue was transfered to another peer.
3246  *
3247  * It is possible that LNet could be shutdown while we're iterating
3248  * through the list. lnet_shudown_lndnets() will attempt to access the
3249  * resend list, but will have to wait until the spinlock is released, by
3250  * which time there shouldn't be any more messages on the resend list.
3251  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3252  * for the messages so they can be released. The other case is that
3253  * lnet_shudown_lndnets() can finalize all the messages before this
3254  * function can visit the resend list, in which case this function will be
3255  * a no-op.
3256  */
3257 static void lnet_resend_msgs(void)
3258 {
3259         struct lnet_msg *msg, *tmp;
3260         struct list_head resend;
3261         int rc;
3262
3263         INIT_LIST_HEAD(&resend);
3264
3265         spin_lock(&the_lnet.ln_msg_resend_lock);
3266         list_splice(&the_lnet.ln_msg_resend, &resend);
3267         spin_unlock(&the_lnet.ln_msg_resend_lock);
3268
3269         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3270                 list_del_init(&msg->msg_list);
3271                 rc = lnet_send(msg->msg_src_nid_param, msg,
3272                                msg->msg_rtr_nid_param);
3273                 if (rc < 0) {
3274                         CNETERR("Error sending %s to %s: %d\n",
3275                                lnet_msgtyp2str(msg->msg_type),
3276                                libcfs_id2str(msg->msg_target), rc);
3277                         lnet_finalize(msg, rc);
3278                 }
3279         }
3280 }
3281
3282 /* The discovery thread. */
3283 static int lnet_peer_discovery(void *arg)
3284 {
3285         struct lnet_peer *lp;
3286         int rc;
3287
3288         wait_for_completion(&the_lnet.ln_started);
3289
3290         CDEBUG(D_NET, "started\n");
3291         cfs_block_allsigs();
3292
3293         for (;;) {
3294                 if (lnet_peer_discovery_wait_for_work())
3295                         break;
3296
3297                 lnet_resend_msgs();
3298
3299                 if (lnet_push_target_resize_needed())
3300                         lnet_push_target_resize();
3301
3302                 lnet_net_lock(LNET_LOCK_EX);
3303                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3304                         break;
3305
3306                 /*
3307                  * Process all incoming discovery work requests.  When
3308                  * discovery must wait on a peer to change state, it
3309                  * is added to the tail of the ln_dc_working queue. A
3310                  * timestamp keeps track of when the peer was added,
3311                  * so we can time out discovery requests that take too
3312                  * long.
3313                  */
3314                 while (!list_empty(&the_lnet.ln_dc_request)) {
3315                         lp = list_first_entry(&the_lnet.ln_dc_request,
3316                                               struct lnet_peer, lp_dc_list);
3317                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3318                         /*
3319                          * set the time the peer was put on the dc_working
3320                          * queue. It shouldn't remain on the queue
3321                          * forever, in case the GET message (for ping)
3322                          * doesn't get a REPLY or the PUT message (for
3323                          * push) doesn't get an ACK.
3324                          */
3325                         lp->lp_last_queued = ktime_get_real_seconds();
3326                         lnet_net_unlock(LNET_LOCK_EX);
3327
3328                         /*
3329                          * Select an action depending on the state of
3330                          * the peer and whether discovery is disabled.
3331                          * The check whether discovery is disabled is
3332                          * done after the code that handles processing
3333                          * for arrived data, cleanup for failures, and
3334                          * forcing a Ping or Push.
3335                          */
3336                         spin_lock(&lp->lp_lock);
3337                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3338                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3339                                 lp->lp_state);
3340                         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3341                                 rc = lnet_peer_data_present(lp);
3342                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3343                                 rc = lnet_peer_ping_failed(lp);
3344                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3345                                 rc = lnet_peer_push_failed(lp);
3346                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3347                                 rc = lnet_peer_send_ping(lp);
3348                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3349                                 rc = lnet_peer_send_push(lp);
3350                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3351                                 rc = lnet_peer_send_ping(lp);
3352                         else if (lnet_peer_needs_push(lp))
3353                                 rc = lnet_peer_send_push(lp);
3354                         else
3355                                 rc = lnet_peer_discovered(lp);
3356                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
3357                                 libcfs_nid2str(lp->lp_primary_nid), lp,
3358                                 lp->lp_state, rc);
3359                         spin_unlock(&lp->lp_lock);
3360
3361                         lnet_net_lock(LNET_LOCK_EX);
3362                         if (rc == LNET_REDISCOVER_PEER) {
3363                                 list_move(&lp->lp_dc_list,
3364                                           &the_lnet.ln_dc_request);
3365                         } else if (rc) {
3366                                 lnet_peer_discovery_error(lp, rc);
3367                         }
3368                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3369                                 lnet_peer_discovery_complete(lp);
3370                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3371                                 break;
3372                 }
3373
3374                 lnet_net_unlock(LNET_LOCK_EX);
3375         }
3376
3377         CDEBUG(D_NET, "stopping\n");
3378         /*
3379          * Clean up before telling lnet_peer_discovery_stop() that
3380          * we're done. Use wake_up() below to somewhat reduce the
3381          * size of the thundering herd if there are multiple threads
3382          * waiting on discovery of a single peer.
3383          */
3384
3385         /* Queue cleanup 1: stop all pending pings and pushes. */
3386         lnet_net_lock(LNET_LOCK_EX);
3387         while (!list_empty(&the_lnet.ln_dc_working)) {
3388                 lp = list_first_entry(&the_lnet.ln_dc_working,
3389                                       struct lnet_peer, lp_dc_list);
3390                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3391                 lnet_net_unlock(LNET_LOCK_EX);
3392                 lnet_peer_cancel_discovery(lp);
3393                 lnet_net_lock(LNET_LOCK_EX);
3394         }
3395         lnet_net_unlock(LNET_LOCK_EX);
3396
3397         /* Queue cleanup 2: wait for the expired queue to clear. */
3398         while (!list_empty(&the_lnet.ln_dc_expired))
3399                 schedule_timeout(cfs_time_seconds(1));
3400
3401         /* Queue cleanup 3: clear the request queue. */
3402         lnet_net_lock(LNET_LOCK_EX);
3403         while (!list_empty(&the_lnet.ln_dc_request)) {
3404                 lp = list_first_entry(&the_lnet.ln_dc_request,
3405                                       struct lnet_peer, lp_dc_list);
3406                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3407                 lnet_peer_discovery_complete(lp);
3408         }
3409         lnet_net_unlock(LNET_LOCK_EX);
3410
3411         LNetEQFree(the_lnet.ln_dc_eqh);
3412         LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
3413
3414         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3415         wake_up(&the_lnet.ln_dc_waitq);
3416
3417         CDEBUG(D_NET, "stopped\n");
3418
3419         return 0;
3420 }
3421
3422 /* ln_api_mutex is held on entry. */
3423 int lnet_peer_discovery_start(void)
3424 {
3425         struct task_struct *task;
3426         int rc;
3427
3428         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3429                 return -EALREADY;
3430
3431         rc = LNetEQAlloc(0, lnet_discovery_event_handler, &the_lnet.ln_dc_eqh);
3432         if (rc != 0) {
3433                 CERROR("Can't allocate discovery EQ: %d\n", rc);
3434                 return rc;
3435         }
3436
3437         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3438         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3439         if (IS_ERR(task)) {
3440                 rc = PTR_ERR(task);
3441                 CERROR("Can't start peer discovery thread: %d\n", rc);
3442
3443                 LNetEQFree(the_lnet.ln_dc_eqh);
3444                 LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
3445
3446                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3447         }
3448
3449         CDEBUG(D_NET, "discovery start: %d\n", rc);
3450
3451         return rc;
3452 }
3453
3454 /* ln_api_mutex is held on entry. */
3455 void lnet_peer_discovery_stop(void)
3456 {
3457         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3458                 return;
3459
3460         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3461         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3462
3463         /* In the LNetNIInit() path we may be stopping discovery before it
3464          * entered its work loop
3465          */
3466         if (!completion_done(&the_lnet.ln_started))
3467                 complete(&the_lnet.ln_started);
3468         else
3469                 wake_up(&the_lnet.ln_dc_waitq);
3470
3471         wait_event(the_lnet.ln_dc_waitq,
3472                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3473
3474         LASSERT(list_empty(&the_lnet.ln_dc_request));
3475         LASSERT(list_empty(&the_lnet.ln_dc_working));
3476         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3477
3478         CDEBUG(D_NET, "discovery stopped\n");
3479 }
3480
3481 /* Debugging */
3482
3483 void
3484 lnet_debug_peer(lnet_nid_t nid)
3485 {
3486         char                    *aliveness = "NA";
3487         struct lnet_peer_ni     *lp;
3488         int                     cpt;
3489
3490         cpt = lnet_cpt_of_nid(nid, NULL);
3491         lnet_net_lock(cpt);
3492
3493         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3494         if (IS_ERR(lp)) {
3495                 lnet_net_unlock(cpt);
3496                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3497                 return;
3498         }
3499
3500         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3501                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
3502
3503         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3504                libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3505                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3506                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3507                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3508
3509         lnet_peer_ni_decref_locked(lp);
3510
3511         lnet_net_unlock(cpt);
3512 }
3513
3514 /* Gathering information for userspace. */
3515
3516 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3517                           char aliveness[LNET_MAX_STR_LEN],
3518                           __u32 *cpt_iter, __u32 *refcount,
3519                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3520                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3521                           __u32 *peer_tx_qnob)
3522 {
3523         struct lnet_peer_table          *peer_table;
3524         struct lnet_peer_ni             *lp;
3525         int                             j;
3526         int                             lncpt;
3527         bool                            found = false;
3528
3529         /* get the number of CPTs */
3530         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3531
3532         /* if the cpt number to be examined is >= the number of cpts in
3533          * the system then indicate that there are no more cpts to examin
3534          */
3535         if (*cpt_iter >= lncpt)
3536                 return -ENOENT;
3537
3538         /* get the current table */
3539         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3540         /* if the ptable is NULL then there are no more cpts to examine */
3541         if (peer_table == NULL)
3542                 return -ENOENT;
3543
3544         lnet_net_lock(*cpt_iter);
3545
3546         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3547                 struct list_head *peers = &peer_table->pt_hash[j];
3548
3549                 list_for_each_entry(lp, peers, lpni_hashlist) {
3550                         if (peer_index-- > 0)
3551                                 continue;
3552
3553                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3554                         if (lnet_isrouter(lp) ||
3555                                 lnet_peer_aliveness_enabled(lp))
3556                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3557                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
3558
3559                         *nid = lp->lpni_nid;
3560                         *refcount = atomic_read(&lp->lpni_refcount);
3561                         *ni_peer_tx_credits =
3562                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3563                         *peer_tx_credits = lp->lpni_txcredits;
3564                         *peer_rtr_credits = lp->lpni_rtrcredits;
3565                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
3566                         *peer_tx_qnob = lp->lpni_txqnob;
3567
3568                         found = true;
3569                 }
3570
3571         }
3572         lnet_net_unlock(*cpt_iter);
3573
3574         *cpt_iter = lncpt;
3575
3576         return found ? 0 : -ENOENT;
3577 }
3578
3579 /* ln_api_mutex is held, which keeps the peer list stable */
3580 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3581 {
3582         struct lnet_ioctl_element_stats *lpni_stats;
3583         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3584         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3585         struct lnet_peer_ni_credit_info *lpni_info;
3586         struct lnet_peer_ni *lpni;
3587         struct lnet_peer *lp;
3588         lnet_nid_t nid;
3589         __u32 size;
3590         int rc;
3591
3592         lp = lnet_find_peer(cfg->prcfg_prim_nid);
3593
3594         if (!lp) {
3595                 rc = -ENOENT;
3596                 goto out;
3597         }
3598
3599         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3600                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3601         size *= lp->lp_nnis;
3602         if (size > cfg->prcfg_size) {
3603                 cfg->prcfg_size = size;
3604                 rc = -E2BIG;
3605                 goto out_lp_decref;
3606         }
3607
3608         cfg->prcfg_prim_nid = lp->lp_primary_nid;
3609         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3610         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3611         cfg->prcfg_count = lp->lp_nnis;
3612         cfg->prcfg_size = size;
3613         cfg->prcfg_state = lp->lp_state;
3614
3615         /* Allocate helper buffers. */
3616         rc = -ENOMEM;
3617         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3618         if (!lpni_info)
3619                 goto out_lp_decref;
3620         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3621         if (!lpni_stats)
3622                 goto out_free_info;
3623         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3624         if (!lpni_msg_stats)
3625                 goto out_free_stats;
3626         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3627         if (!lpni_hstats)
3628                 goto out_free_msg_stats;
3629
3630
3631         lpni = NULL;
3632         rc = -EFAULT;
3633         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3634                 nid = lpni->lpni_nid;
3635                 if (copy_to_user(bulk, &nid, sizeof(nid)))
3636                         goto out_free_hstats;
3637                 bulk += sizeof(nid);
3638
3639                 memset(lpni_info, 0, sizeof(*lpni_info));
3640                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3641                 if (lnet_isrouter(lpni) ||
3642                         lnet_peer_aliveness_enabled(lpni))
3643                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3644                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
3645
3646                 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3647                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3648                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3649                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3650                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3651                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3652                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3653                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3654                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3655                         goto out_free_hstats;
3656                 bulk += sizeof(*lpni_info);
3657
3658                 memset(lpni_stats, 0, sizeof(*lpni_stats));
3659                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3660                                                             LNET_STATS_TYPE_SEND);
3661                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3662                                                             LNET_STATS_TYPE_RECV);
3663                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3664                                                             LNET_STATS_TYPE_DROP);
3665                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3666                         goto out_free_hstats;
3667                 bulk += sizeof(*lpni_stats);
3668                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3669                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3670                         goto out_free_hstats;
3671                 bulk += sizeof(*lpni_msg_stats);
3672                 lpni_hstats->hlpni_network_timeout =
3673                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3674                 lpni_hstats->hlpni_remote_dropped =
3675                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3676                 lpni_hstats->hlpni_remote_timeout =
3677                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3678                 lpni_hstats->hlpni_remote_error =
3679                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3680                 lpni_hstats->hlpni_health_value =
3681                   atomic_read(&lpni->lpni_healthv);
3682                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3683                         goto out_free_hstats;
3684                 bulk += sizeof(*lpni_hstats);
3685         }
3686         rc = 0;
3687
3688 out_free_hstats:
3689         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3690 out_free_msg_stats:
3691         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3692 out_free_stats:
3693         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3694 out_free_info:
3695         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3696 out_lp_decref:
3697         lnet_peer_decref_locked(lp);
3698 out:
3699         return rc;
3700 }
3701
3702 void
3703 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3704 {
3705         /* the mt could've shutdown and cleaned up the queues */
3706         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3707                 return;
3708
3709         if (list_empty(&lpni->lpni_recovery) &&
3710             atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3711                 CERROR("lpni %s added to recovery queue. Health = %d\n",
3712                         libcfs_nid2str(lpni->lpni_nid),
3713                         atomic_read(&lpni->lpni_healthv));
3714                 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3715                 lnet_peer_ni_addref_locked(lpni);
3716         }
3717 }
3718
3719 /* Call with the ln_api_mutex held */
3720 void
3721 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3722 {
3723         struct lnet_peer_table *ptable;
3724         struct lnet_peer *lp;
3725         struct lnet_peer_net *lpn;
3726         struct lnet_peer_ni *lpni;
3727         int lncpt;
3728         int cpt;
3729
3730         if (the_lnet.ln_state != LNET_STATE_RUNNING)
3731                 return;
3732
3733         if (!all) {
3734                 lnet_net_lock(LNET_LOCK_EX);
3735                 lpni = lnet_find_peer_ni_locked(nid);
3736                 if (!lpni) {
3737                         lnet_net_unlock(LNET_LOCK_EX);
3738                         return;
3739                 }
3740                 atomic_set(&lpni->lpni_healthv, value);
3741                 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3742                 lnet_peer_ni_decref_locked(lpni);
3743                 lnet_net_unlock(LNET_LOCK_EX);
3744                 return;
3745         }
3746
3747         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3748
3749         /*
3750          * Walk all the peers and reset the healhv for each one to the
3751          * maximum value.
3752          */
3753         lnet_net_lock(LNET_LOCK_EX);
3754         for (cpt = 0; cpt < lncpt; cpt++) {
3755                 ptable = the_lnet.ln_peer_tables[cpt];
3756                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
3757                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
3758                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
3759                                                     lpni_peer_nis) {
3760                                         atomic_set(&lpni->lpni_healthv, value);
3761                                         lnet_peer_ni_add_to_recoveryq_locked(lpni);
3762                                 }
3763                         }
3764                 }
3765         }
3766         lnet_net_unlock(LNET_LOCK_EX);
3767 }
3768