Whamcloud - gitweb
LU-14627 lnet: Ensure ref taken when queueing for discovery
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/peer.c
33  */
34
35 #define DEBUG_SUBSYSTEM S_LNET
36
37 #include <linux/sched.h>
38 #ifdef HAVE_SCHED_HEADERS
39 #include <linux/sched/signal.h>
40 #endif
41 #include <linux/uaccess.h>
42
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50
51 static void
52 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
53 {
54         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
55                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
56                 lnet_peer_ni_decref_locked(lpni);
57         }
58 }
59
60 void
61 lnet_peer_net_added(struct lnet_net *net)
62 {
63         struct lnet_peer_ni *lpni, *tmp;
64
65         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
66                                  lpni_on_remote_peer_ni_list) {
67
68                 if (LNET_NIDNET(lpni->lpni_nid) == net->net_id) {
69                         lpni->lpni_net = net;
70
71                         spin_lock(&lpni->lpni_lock);
72                         lpni->lpni_txcredits =
73                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
74                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
75                         lpni->lpni_rtrcredits =
76                                 lnet_peer_buffer_credits(lpni->lpni_net);
77                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
78                         spin_unlock(&lpni->lpni_lock);
79
80                         lnet_peer_remove_from_remote_list(lpni);
81                 }
82         }
83 }
84
85 static void
86 lnet_peer_tables_destroy(void)
87 {
88         struct lnet_peer_table  *ptable;
89         struct list_head        *hash;
90         int                     i;
91         int                     j;
92
93         if (!the_lnet.ln_peer_tables)
94                 return;
95
96         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
97                 hash = ptable->pt_hash;
98                 if (!hash) /* not intialized */
99                         break;
100
101                 LASSERT(list_empty(&ptable->pt_zombie_list));
102
103                 ptable->pt_hash = NULL;
104                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
105                         LASSERT(list_empty(&hash[j]));
106
107                 LIBCFS_FREE(hash, LNET_PEER_HASH_SIZE * sizeof(*hash));
108         }
109
110         cfs_percpt_free(the_lnet.ln_peer_tables);
111         the_lnet.ln_peer_tables = NULL;
112 }
113
114 int
115 lnet_peer_tables_create(void)
116 {
117         struct lnet_peer_table  *ptable;
118         struct list_head        *hash;
119         int                     i;
120         int                     j;
121
122         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
123                                                    sizeof(*ptable));
124         if (the_lnet.ln_peer_tables == NULL) {
125                 CERROR("Failed to allocate cpu-partition peer tables\n");
126                 return -ENOMEM;
127         }
128
129         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
130                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
131                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
132                 if (hash == NULL) {
133                         CERROR("Failed to create peer hash table\n");
134                         lnet_peer_tables_destroy();
135                         return -ENOMEM;
136                 }
137
138                 spin_lock_init(&ptable->pt_zombie_lock);
139                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
140
141                 INIT_LIST_HEAD(&ptable->pt_peer_list);
142
143                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
144                         INIT_LIST_HEAD(&hash[j]);
145                 ptable->pt_hash = hash; /* sign of initialization */
146         }
147
148         return 0;
149 }
150
151 static struct lnet_peer_ni *
152 lnet_peer_ni_alloc(lnet_nid_t nid)
153 {
154         struct lnet_peer_ni *lpni;
155         struct lnet_net *net;
156         int cpt;
157
158         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
159
160         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
161         if (!lpni)
162                 return NULL;
163
164         INIT_LIST_HEAD(&lpni->lpni_txq);
165         INIT_LIST_HEAD(&lpni->lpni_rtrq);
166         INIT_LIST_HEAD(&lpni->lpni_routes);
167         INIT_LIST_HEAD(&lpni->lpni_hashlist);
168         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
169         INIT_LIST_HEAD(&lpni->lpni_recovery);
170         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
171         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
172
173         spin_lock_init(&lpni->lpni_lock);
174
175         lpni->lpni_alive = !lnet_peers_start_down(); /* 1 bit!! */
176         lpni->lpni_last_alive = ktime_get_seconds(); /* assumes alive */
177         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
178         lpni->lpni_nid = nid;
179         lpni->lpni_cpt = cpt;
180         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
181
182         net = lnet_get_net_locked(LNET_NIDNET(nid));
183         lpni->lpni_net = net;
184         if (net) {
185                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
186                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
187                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
188                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
189         } else {
190                 /*
191                  * This peer_ni is not on a local network, so we
192                  * cannot add the credits here. In case the net is
193                  * added later, add the peer_ni to the remote peer ni
194                  * list so it can be easily found and revisited.
195                  */
196                 /* FIXME: per-net implementation instead? */
197                 atomic_inc(&lpni->lpni_refcount);
198                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
199                               &the_lnet.ln_remote_peer_ni_list);
200         }
201
202         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
203
204         return lpni;
205 }
206
207 static struct lnet_peer_net *
208 lnet_peer_net_alloc(__u32 net_id)
209 {
210         struct lnet_peer_net *lpn;
211
212         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
213         if (!lpn)
214                 return NULL;
215
216         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
217         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
218         lpn->lpn_net_id = net_id;
219
220         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
221
222         return lpn;
223 }
224
225 void
226 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
227 {
228         struct lnet_peer *lp;
229
230         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
231
232         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
233         LASSERT(list_empty(&lpn->lpn_peer_nis));
234         LASSERT(list_empty(&lpn->lpn_peer_nets));
235         lp = lpn->lpn_peer;
236         lpn->lpn_peer = NULL;
237         LIBCFS_FREE(lpn, sizeof(*lpn));
238
239         lnet_peer_decref_locked(lp);
240 }
241
242 static struct lnet_peer *
243 lnet_peer_alloc(lnet_nid_t nid)
244 {
245         struct lnet_peer *lp;
246
247         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
248         if (!lp)
249                 return NULL;
250
251         INIT_LIST_HEAD(&lp->lp_peer_list);
252         INIT_LIST_HEAD(&lp->lp_peer_nets);
253         INIT_LIST_HEAD(&lp->lp_dc_list);
254         INIT_LIST_HEAD(&lp->lp_dc_pendq);
255         init_waitqueue_head(&lp->lp_dc_waitq);
256         spin_lock_init(&lp->lp_lock);
257         lp->lp_primary_nid = nid;
258         lp->lp_disc_src_nid = LNET_NID_ANY;
259
260         /*
261          * Turn off discovery for loopback peer. If you're creating a peer
262          * for the loopback interface then that was initiated when we
263          * attempted to send a message over the loopback. There is no need
264          * to ever use a different interface when sending messages to
265          * myself.
266          */
267         if (nid == LNET_NID_LO_0)
268                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
269         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
270
271         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
272
273         return lp;
274 }
275
276 void
277 lnet_destroy_peer_locked(struct lnet_peer *lp)
278 {
279         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nid2str(lp->lp_primary_nid));
280
281         LASSERT(atomic_read(&lp->lp_refcount) == 0);
282         LASSERT(list_empty(&lp->lp_peer_nets));
283         LASSERT(list_empty(&lp->lp_peer_list));
284         LASSERT(list_empty(&lp->lp_dc_list));
285
286         if (lp->lp_data)
287                 lnet_ping_buffer_decref(lp->lp_data);
288
289         /*
290          * if there are messages still on the pending queue, then make
291          * sure to queue them on the ln_msg_resend list so they can be
292          * resent at a later point if the discovery thread is still
293          * running.
294          * If the discovery thread has stopped, then the wakeup will be a
295          * no-op, and it is expected the lnet_shutdown_lndnets() will
296          * eventually be called, which will traverse this list and
297          * finalize the messages on the list.
298          * We can not resend them now because we're holding the cpt lock.
299          * Releasing the lock can cause an inconsistent state
300          */
301         spin_lock(&the_lnet.ln_msg_resend_lock);
302         spin_lock(&lp->lp_lock);
303         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
304         spin_unlock(&lp->lp_lock);
305         spin_unlock(&the_lnet.ln_msg_resend_lock);
306         wake_up(&the_lnet.ln_dc_waitq);
307
308         LIBCFS_FREE(lp, sizeof(*lp));
309 }
310
311 /*
312  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
313  * that peer_net, detach the peer_net from the peer.
314  *
315  * Call with lnet_net_lock/EX held
316  */
317 static void
318 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
319 {
320         struct lnet_peer_table *ptable;
321         struct lnet_peer_net *lpn;
322         struct lnet_peer *lp;
323
324         /*
325          * Belts and suspenders: gracefully handle teardown of a
326          * partially connected peer_ni.
327          */
328         lpn = lpni->lpni_peer_net;
329
330         list_del_init(&lpni->lpni_peer_nis);
331         /*
332          * If there are no lpni's left, we detach lpn from
333          * lp_peer_nets, so it cannot be found anymore.
334          */
335         if (list_empty(&lpn->lpn_peer_nis))
336                 list_del_init(&lpn->lpn_peer_nets);
337
338         /* Update peer NID count. */
339         lp = lpn->lpn_peer;
340         lp->lp_nnis--;
341
342         /*
343          * If there are no more peer nets, make the peer unfindable
344          * via the peer_tables.
345          *
346          * Otherwise, if the peer is DISCOVERED, tell discovery to
347          * take another look at it. This is a no-op if discovery for
348          * this peer did the detaching.
349          */
350         if (list_empty(&lp->lp_peer_nets)) {
351                 list_del_init(&lp->lp_peer_list);
352                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
353                 ptable->pt_peers--;
354         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
355                 /* Discovery isn't running, nothing to do here. */
356         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
357                 lnet_peer_queue_for_discovery(lp);
358                 wake_up(&the_lnet.ln_dc_waitq);
359         }
360         CDEBUG(D_NET, "peer %s NID %s\n",
361                 libcfs_nid2str(lp->lp_primary_nid),
362                 libcfs_nid2str(lpni->lpni_nid));
363 }
364
365 /* called with lnet_net_lock LNET_LOCK_EX held */
366 static int
367 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni)
368 {
369         struct lnet_peer_table *ptable = NULL;
370
371         /* don't remove a peer_ni if it's also a gateway */
372         if (lpni->lpni_rtr_refcount > 0) {
373                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
374                        libcfs_nid2str(lpni->lpni_nid));
375                 return -EBUSY;
376         }
377
378         lnet_peer_remove_from_remote_list(lpni);
379
380         /* remove peer ni from the hash list. */
381         list_del_init(&lpni->lpni_hashlist);
382
383         /*
384          * indicate the peer is being deleted so the monitor thread can
385          * remove it from the recovery queue.
386          */
387         spin_lock(&lpni->lpni_lock);
388         lpni->lpni_state |= LNET_PEER_NI_DELETING;
389         spin_unlock(&lpni->lpni_lock);
390
391         /* decrement the ref count on the peer table */
392         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
393         LASSERT(ptable->pt_number > 0);
394         ptable->pt_number--;
395
396         /*
397          * The peer_ni can no longer be found with a lookup. But there
398          * can be current users, so keep track of it on the zombie
399          * list until the reference count has gone to zero.
400          *
401          * The last reference may be lost in a place where the
402          * lnet_net_lock locks only a single cpt, and that cpt may not
403          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
404          * has its own lock.
405          */
406         spin_lock(&ptable->pt_zombie_lock);
407         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
408         ptable->pt_zombies++;
409         spin_unlock(&ptable->pt_zombie_lock);
410
411         /* no need to keep this peer_ni on the hierarchy anymore */
412         lnet_peer_detach_peer_ni_locked(lpni);
413
414         /* remove hashlist reference on peer_ni */
415         lnet_peer_ni_decref_locked(lpni);
416
417         return 0;
418 }
419
420 void lnet_peer_uninit(void)
421 {
422         struct lnet_peer_ni *lpni, *tmp;
423
424         lnet_net_lock(LNET_LOCK_EX);
425
426         /* remove all peer_nis from the remote peer and the hash list */
427         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
428                                  lpni_on_remote_peer_ni_list)
429                 lnet_peer_ni_del_locked(lpni);
430
431         lnet_peer_tables_destroy();
432
433         lnet_net_unlock(LNET_LOCK_EX);
434 }
435
436 static int
437 lnet_peer_del_locked(struct lnet_peer *peer)
438 {
439         struct lnet_peer_ni *lpni = NULL, *lpni2;
440         int rc = 0, rc2 = 0;
441
442         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(peer->lp_primary_nid));
443
444         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
445         while (lpni != NULL) {
446                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
447                 rc = lnet_peer_ni_del_locked(lpni);
448                 if (rc != 0)
449                         rc2 = rc;
450                 lpni = lpni2;
451         }
452
453         return rc2;
454 }
455
456 static int
457 lnet_peer_del(struct lnet_peer *peer)
458 {
459         lnet_net_lock(LNET_LOCK_EX);
460         lnet_peer_del_locked(peer);
461         lnet_net_unlock(LNET_LOCK_EX);
462
463         return 0;
464 }
465
466 /*
467  * Delete a NID from a peer. Call with ln_api_mutex held.
468  *
469  * Error codes:
470  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
471  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
472  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
473  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
474  */
475 static int
476 lnet_peer_del_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
477 {
478         struct lnet_peer_ni *lpni;
479         lnet_nid_t primary_nid = lp->lp_primary_nid;
480         int rc = 0;
481
482         if (!(flags & LNET_PEER_CONFIGURED)) {
483                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
484                         rc = -EPERM;
485                         goto out;
486                 }
487         }
488         lpni = lnet_find_peer_ni_locked(nid);
489         if (!lpni) {
490                 rc = -ENOENT;
491                 goto out;
492         }
493         lnet_peer_ni_decref_locked(lpni);
494         if (lp != lpni->lpni_peer_net->lpn_peer) {
495                 rc = -ECHILD;
496                 goto out;
497         }
498
499         /*
500          * This function only allows deletion of the primary NID if it
501          * is the only NID.
502          */
503         if (nid == lp->lp_primary_nid && lp->lp_nnis != 1) {
504                 rc = -EBUSY;
505                 goto out;
506         }
507
508         lnet_net_lock(LNET_LOCK_EX);
509
510         rc = lnet_peer_ni_del_locked(lpni);
511
512         lnet_net_unlock(LNET_LOCK_EX);
513
514 out:
515         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
516                libcfs_nid2str(primary_nid), libcfs_nid2str(nid), flags, rc);
517
518         return rc;
519 }
520
521 static void
522 lnet_peer_table_cleanup_locked(struct lnet_net *net,
523                                struct lnet_peer_table *ptable)
524 {
525         int                      i;
526         struct lnet_peer_ni     *next;
527         struct lnet_peer_ni     *lpni;
528         struct lnet_peer        *peer;
529
530         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
531                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
532                                          lpni_hashlist) {
533                         if (net != NULL && net != lpni->lpni_net)
534                                 continue;
535
536                         peer = lpni->lpni_peer_net->lpn_peer;
537                         if (peer->lp_primary_nid != lpni->lpni_nid) {
538                                 lnet_peer_ni_del_locked(lpni);
539                                 continue;
540                         }
541                         /*
542                          * Removing the primary NID implies removing
543                          * the entire peer. Advance next beyond any
544                          * peer_ni that belongs to the same peer.
545                          */
546                         list_for_each_entry_from(next, &ptable->pt_hash[i],
547                                                  lpni_hashlist) {
548                                 if (next->lpni_peer_net->lpn_peer != peer)
549                                         break;
550                         }
551                         lnet_peer_del_locked(peer);
552                 }
553         }
554 }
555
556 static void
557 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
558 {
559         int     i = 3;
560
561         spin_lock(&ptable->pt_zombie_lock);
562         while (ptable->pt_zombies) {
563                 spin_unlock(&ptable->pt_zombie_lock);
564
565                 if (is_power_of_2(i)) {
566                         CDEBUG(D_WARNING,
567                                "Waiting for %d zombies on peer table\n",
568                                ptable->pt_zombies);
569                 }
570                 set_current_state(TASK_UNINTERRUPTIBLE);
571                 schedule_timeout(cfs_time_seconds(1) >> 1);
572                 spin_lock(&ptable->pt_zombie_lock);
573         }
574         spin_unlock(&ptable->pt_zombie_lock);
575 }
576
577 static void
578 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
579                                 struct lnet_peer_table *ptable)
580 {
581         struct lnet_peer_ni     *lp;
582         struct lnet_peer_ni     *tmp;
583         lnet_nid_t              lpni_nid;
584         int                     i;
585
586         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
587                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
588                                          lpni_hashlist) {
589                         if (net != lp->lpni_net)
590                                 continue;
591
592                         if (lp->lpni_rtr_refcount == 0)
593                                 continue;
594
595                         lpni_nid = lp->lpni_nid;
596
597                         lnet_net_unlock(LNET_LOCK_EX);
598                         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), lpni_nid);
599                         lnet_net_lock(LNET_LOCK_EX);
600                 }
601         }
602 }
603
604 void
605 lnet_peer_tables_cleanup(struct lnet_net *net)
606 {
607         int i;
608         struct lnet_peer_table *ptable;
609
610         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
611         /* If just deleting the peers for a NI, get rid of any routes these
612          * peers are gateways for. */
613         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
614                 lnet_net_lock(LNET_LOCK_EX);
615                 lnet_peer_table_del_rtrs_locked(net, ptable);
616                 lnet_net_unlock(LNET_LOCK_EX);
617         }
618
619         /* Start the cleanup process */
620         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
621                 lnet_net_lock(LNET_LOCK_EX);
622                 lnet_peer_table_cleanup_locked(net, ptable);
623                 lnet_net_unlock(LNET_LOCK_EX);
624         }
625
626         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
627                 lnet_peer_ni_finalize_wait(ptable);
628 }
629
630 static struct lnet_peer_ni *
631 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, lnet_nid_t nid)
632 {
633         struct list_head        *peers;
634         struct lnet_peer_ni     *lp;
635
636         LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
637
638         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
639         list_for_each_entry(lp, peers, lpni_hashlist) {
640                 if (lp->lpni_nid == nid) {
641                         lnet_peer_ni_addref_locked(lp);
642                         return lp;
643                 }
644         }
645
646         return NULL;
647 }
648
649 struct lnet_peer_ni *
650 lnet_find_peer_ni_locked(lnet_nid_t nid)
651 {
652         struct lnet_peer_ni *lpni;
653         struct lnet_peer_table *ptable;
654         int cpt;
655
656         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
657
658         ptable = the_lnet.ln_peer_tables[cpt];
659         lpni = lnet_get_peer_ni_locked(ptable, nid);
660
661         return lpni;
662 }
663
664 struct lnet_peer *
665 lnet_find_peer(lnet_nid_t nid)
666 {
667         struct lnet_peer_ni *lpni;
668         struct lnet_peer *lp = NULL;
669         int cpt;
670
671         cpt = lnet_net_lock_current();
672         lpni = lnet_find_peer_ni_locked(nid);
673         if (lpni) {
674                 lp = lpni->lpni_peer_net->lpn_peer;
675                 lnet_peer_addref_locked(lp);
676                 lnet_peer_ni_decref_locked(lpni);
677         }
678         lnet_net_unlock(cpt);
679
680         return lp;
681 }
682
683 struct lnet_peer_ni *
684 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
685                              struct lnet_peer_net *peer_net,
686                              struct lnet_peer_ni *prev)
687 {
688         struct lnet_peer_ni *lpni;
689         struct lnet_peer_net *net = peer_net;
690
691         if (!prev) {
692                 if (!net) {
693                         if (list_empty(&peer->lp_peer_nets))
694                                 return NULL;
695
696                         net = list_entry(peer->lp_peer_nets.next,
697                                          struct lnet_peer_net,
698                                          lpn_peer_nets);
699                 }
700                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
701                                   lpni_peer_nis);
702
703                 return lpni;
704         }
705
706         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
707                 /*
708                  * if you reached the end of the peer ni list and the peer
709                  * net is specified then there are no more peer nis in that
710                  * net.
711                  */
712                 if (net)
713                         return NULL;
714
715                 /*
716                  * we reached the end of this net ni list. move to the
717                  * next net
718                  */
719                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
720                     &peer->lp_peer_nets)
721                         /* no more nets and no more NIs. */
722                         return NULL;
723
724                 /* get the next net */
725                 net = list_entry(prev->lpni_peer_net->lpn_peer_nets.next,
726                                  struct lnet_peer_net,
727                                  lpn_peer_nets);
728                 /* get the ni on it */
729                 lpni = list_entry(net->lpn_peer_nis.next, struct lnet_peer_ni,
730                                   lpni_peer_nis);
731
732                 return lpni;
733         }
734
735         /* there are more nis left */
736         lpni = list_entry(prev->lpni_peer_nis.next,
737                           struct lnet_peer_ni, lpni_peer_nis);
738
739         return lpni;
740 }
741
742 /* Call with the ln_api_mutex held */
743 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
744 {
745         struct lnet_process_id id;
746         struct lnet_peer_table *ptable;
747         struct lnet_peer *lp;
748         __u32 count = 0;
749         __u32 size = 0;
750         int lncpt;
751         int cpt;
752         __u32 i;
753         int rc;
754
755         rc = -ESHUTDOWN;
756         if (the_lnet.ln_state != LNET_STATE_RUNNING)
757                 goto done;
758
759         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
760
761         /*
762          * Count the number of peers, and return E2BIG if the buffer
763          * is too small. We'll also return the desired size.
764          */
765         rc = -E2BIG;
766         for (cpt = 0; cpt < lncpt; cpt++) {
767                 ptable = the_lnet.ln_peer_tables[cpt];
768                 count += ptable->pt_peers;
769         }
770         size = count * sizeof(*ids);
771         if (size > *sizep)
772                 goto done;
773
774         /*
775          * Walk the peer lists and copy out the primary nids.
776          * This is safe because the peer lists are only modified
777          * while the ln_api_mutex is held. So we don't need to
778          * hold the lnet_net_lock as well, and can therefore
779          * directly call copy_to_user().
780          */
781         rc = -EFAULT;
782         memset(&id, 0, sizeof(id));
783         id.pid = LNET_PID_LUSTRE;
784         i = 0;
785         for (cpt = 0; cpt < lncpt; cpt++) {
786                 ptable = the_lnet.ln_peer_tables[cpt];
787                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
788                         if (i >= count)
789                                 goto done;
790                         id.nid = lp->lp_primary_nid;
791                         if (copy_to_user(&ids[i], &id, sizeof(id)))
792                                 goto done;
793                         i++;
794                 }
795         }
796         rc = 0;
797 done:
798         *countp = count;
799         *sizep = size;
800         return rc;
801 }
802
803 /*
804  * Start pushes to peers that need to be updated for a configuration
805  * change on this node.
806  */
807 void
808 lnet_push_update_to_peers(int force)
809 {
810         struct lnet_peer_table *ptable;
811         struct lnet_peer *lp;
812         int lncpt;
813         int cpt;
814
815         lnet_net_lock(LNET_LOCK_EX);
816         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
817         for (cpt = 0; cpt < lncpt; cpt++) {
818                 ptable = the_lnet.ln_peer_tables[cpt];
819                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
820                         if (force) {
821                                 spin_lock(&lp->lp_lock);
822                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
823                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
824                                 spin_unlock(&lp->lp_lock);
825                         }
826                         if (lnet_peer_needs_push(lp))
827                                 lnet_peer_queue_for_discovery(lp);
828                 }
829         }
830         lnet_net_unlock(LNET_LOCK_EX);
831         wake_up(&the_lnet.ln_dc_waitq);
832 }
833
834 /*
835  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
836  * this is a preferred point-to-point path. Call with lnet_net_lock in
837  * shared mmode.
838  */
839 bool
840 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, lnet_nid_t nid)
841 {
842         int i;
843
844         if (lpni->lpni_pref_nnids == 0)
845                 return false;
846         if (lpni->lpni_pref_nnids == 1)
847                 return lpni->lpni_pref.nid == nid;
848         for (i = 0; i < lpni->lpni_pref_nnids; i++) {
849                 if (lpni->lpni_pref.nids[i] == nid)
850                         return true;
851         }
852         return false;
853 }
854
855 /*
856  * Set a single ni as preferred, provided no preferred ni is already
857  * defined. Only to be used for non-multi-rail peer_ni.
858  */
859 int
860 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
861 {
862         int rc = 0;
863
864         spin_lock(&lpni->lpni_lock);
865         if (nid == LNET_NID_ANY) {
866                 rc = -EINVAL;
867         } else if (lpni->lpni_pref_nnids > 0) {
868                 rc = -EPERM;
869         } else if (lpni->lpni_pref_nnids == 0) {
870                 lpni->lpni_pref.nid = nid;
871                 lpni->lpni_pref_nnids = 1;
872                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
873         }
874         spin_unlock(&lpni->lpni_lock);
875
876         CDEBUG(D_NET, "peer %s nid %s: %d\n",
877                libcfs_nid2str(lpni->lpni_nid), libcfs_nid2str(nid), rc);
878         return rc;
879 }
880
881 /*
882  * Clear the preferred NID from a non-multi-rail peer_ni, provided
883  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
884  */
885 int
886 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
887 {
888         int rc = 0;
889
890         spin_lock(&lpni->lpni_lock);
891         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
892                 lpni->lpni_pref_nnids = 0;
893                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
894         } else if (lpni->lpni_pref_nnids == 0) {
895                 rc = -ENOENT;
896         } else {
897                 rc = -EPERM;
898         }
899         spin_unlock(&lpni->lpni_lock);
900
901         CDEBUG(D_NET, "peer %s: %d\n",
902                libcfs_nid2str(lpni->lpni_nid), rc);
903         return rc;
904 }
905
906 /*
907  * Clear the preferred NIDs from a non-multi-rail peer.
908  */
909 void
910 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
911 {
912         struct lnet_peer_ni *lpni = NULL;
913
914         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
915                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
916 }
917
918 int
919 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
920 {
921         lnet_nid_t *nids = NULL;
922         lnet_nid_t *oldnids = NULL;
923         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
924         int size;
925         int i;
926         int rc = 0;
927
928         if (nid == LNET_NID_ANY) {
929                 rc = -EINVAL;
930                 goto out;
931         }
932
933         if (lpni->lpni_pref_nnids == 1 && lpni->lpni_pref.nid == nid) {
934                 rc = -EEXIST;
935                 goto out;
936         }
937
938         /* A non-MR node may have only one preferred NI per peer_ni */
939         if (lpni->lpni_pref_nnids > 0) {
940                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
941                         rc = -EPERM;
942                         goto out;
943                 }
944         }
945
946         if (lpni->lpni_pref_nnids != 0) {
947                 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
948                 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
949                 if (!nids) {
950                         rc = -ENOMEM;
951                         goto out;
952                 }
953                 for (i = 0; i < lpni->lpni_pref_nnids; i++) {
954                         if (lpni->lpni_pref.nids[i] == nid) {
955                                 LIBCFS_FREE(nids, size);
956                                 rc = -EEXIST;
957                                 goto out;
958                         }
959                         nids[i] = lpni->lpni_pref.nids[i];
960                 }
961                 nids[i] = nid;
962         }
963
964         lnet_net_lock(LNET_LOCK_EX);
965         spin_lock(&lpni->lpni_lock);
966         if (lpni->lpni_pref_nnids == 0) {
967                 lpni->lpni_pref.nid = nid;
968         } else {
969                 oldnids = lpni->lpni_pref.nids;
970                 lpni->lpni_pref.nids = nids;
971         }
972         lpni->lpni_pref_nnids++;
973         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
974         spin_unlock(&lpni->lpni_lock);
975         lnet_net_unlock(LNET_LOCK_EX);
976
977         if (oldnids) {
978                 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
979                 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
980         }
981 out:
982         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
983                 spin_lock(&lpni->lpni_lock);
984                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
985                 spin_unlock(&lpni->lpni_lock);
986         }
987         CDEBUG(D_NET, "peer %s nid %s: %d\n",
988                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
989         return rc;
990 }
991
992 int
993 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid)
994 {
995         lnet_nid_t *nids = NULL;
996         lnet_nid_t *oldnids = NULL;
997         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
998         int size;
999         int i, j;
1000         int rc = 0;
1001
1002         if (lpni->lpni_pref_nnids == 0) {
1003                 rc = -ENOENT;
1004                 goto out;
1005         }
1006
1007         if (lpni->lpni_pref_nnids == 1) {
1008                 if (lpni->lpni_pref.nid != nid) {
1009                         rc = -ENOENT;
1010                         goto out;
1011                 }
1012         } else if (lpni->lpni_pref_nnids == 2) {
1013                 if (lpni->lpni_pref.nids[0] != nid &&
1014                     lpni->lpni_pref.nids[1] != nid) {
1015                         rc = -ENOENT;
1016                         goto out;
1017                 }
1018         } else {
1019                 size = sizeof(*nids) * (lpni->lpni_pref_nnids - 1);
1020                 LIBCFS_CPT_ALLOC(nids, lnet_cpt_table(), lpni->lpni_cpt, size);
1021                 if (!nids) {
1022                         rc = -ENOMEM;
1023                         goto out;
1024                 }
1025                 for (i = 0, j = 0; i < lpni->lpni_pref_nnids; i++) {
1026                         if (lpni->lpni_pref.nids[i] != nid)
1027                                 continue;
1028                         nids[j++] = lpni->lpni_pref.nids[i];
1029                 }
1030                 /* Check if we actually removed a nid. */
1031                 if (j == lpni->lpni_pref_nnids) {
1032                         LIBCFS_FREE(nids, size);
1033                         rc = -ENOENT;
1034                         goto out;
1035                 }
1036         }
1037
1038         lnet_net_lock(LNET_LOCK_EX);
1039         spin_lock(&lpni->lpni_lock);
1040         if (lpni->lpni_pref_nnids == 1) {
1041                 lpni->lpni_pref.nid = LNET_NID_ANY;
1042         } else if (lpni->lpni_pref_nnids == 2) {
1043                 oldnids = lpni->lpni_pref.nids;
1044                 if (oldnids[0] == nid)
1045                         lpni->lpni_pref.nid = oldnids[1];
1046                 else
1047                         lpni->lpni_pref.nid = oldnids[2];
1048         } else {
1049                 oldnids = lpni->lpni_pref.nids;
1050                 lpni->lpni_pref.nids = nids;
1051         }
1052         lpni->lpni_pref_nnids--;
1053         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1054         spin_unlock(&lpni->lpni_lock);
1055         lnet_net_unlock(LNET_LOCK_EX);
1056
1057         if (oldnids) {
1058                 size = sizeof(*nids) * (lpni->lpni_pref_nnids + 1);
1059                 LIBCFS_FREE(oldnids, sizeof(*oldnids) * size);
1060         }
1061 out:
1062         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1063                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid), rc);
1064         return rc;
1065 }
1066
1067 lnet_nid_t
1068 lnet_peer_primary_nid_locked(lnet_nid_t nid)
1069 {
1070         struct lnet_peer_ni *lpni;
1071         lnet_nid_t primary_nid = nid;
1072
1073         lpni = lnet_find_peer_ni_locked(nid);
1074         if (lpni) {
1075                 primary_nid = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1076                 lnet_peer_ni_decref_locked(lpni);
1077         }
1078
1079         return primary_nid;
1080 }
1081
1082 bool
1083 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1084 {
1085         if (lnet_peer_discovery_disabled)
1086                 return true;
1087
1088         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1089             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1090                 return true;
1091         }
1092
1093         return false;
1094 }
1095
1096 /*
1097  * Peer Discovery
1098  */
1099 bool
1100 lnet_is_discovery_disabled(struct lnet_peer *lp)
1101 {
1102         bool rc = false;
1103
1104         spin_lock(&lp->lp_lock);
1105         rc = lnet_is_discovery_disabled_locked(lp);
1106         spin_unlock(&lp->lp_lock);
1107
1108         return rc;
1109 }
1110
1111 lnet_nid_t
1112 LNetPrimaryNID(lnet_nid_t nid)
1113 {
1114         struct lnet_peer *lp;
1115         struct lnet_peer_ni *lpni;
1116         lnet_nid_t primary_nid = nid;
1117         int rc = 0;
1118         int cpt;
1119
1120         if (nid == LNET_NID_LO_0)
1121                 return LNET_NID_LO_0;
1122
1123         cpt = lnet_net_lock_current();
1124         lpni = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
1125         if (IS_ERR(lpni)) {
1126                 rc = PTR_ERR(lpni);
1127                 goto out_unlock;
1128         }
1129         lp = lpni->lpni_peer_net->lpn_peer;
1130
1131         while (!lnet_peer_is_uptodate(lp)) {
1132                 spin_lock(&lp->lp_lock);
1133                 /* force a full discovery cycle */
1134                 lp->lp_state |= LNET_PEER_FORCE_PING | LNET_PEER_FORCE_PUSH;
1135                 spin_unlock(&lp->lp_lock);
1136
1137                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1138                 if (rc)
1139                         goto out_decref;
1140                 lp = lpni->lpni_peer_net->lpn_peer;
1141
1142                 /* Only try once if discovery is disabled */
1143                 if (lnet_is_discovery_disabled(lp))
1144                         break;
1145         }
1146         primary_nid = lp->lp_primary_nid;
1147 out_decref:
1148         lnet_peer_ni_decref_locked(lpni);
1149 out_unlock:
1150         lnet_net_unlock(cpt);
1151
1152         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nid2str(nid),
1153                libcfs_nid2str(primary_nid), rc);
1154         return primary_nid;
1155 }
1156 EXPORT_SYMBOL(LNetPrimaryNID);
1157
1158 struct lnet_peer_net *
1159 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1160 {
1161         struct lnet_peer_net *peer_net;
1162         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1163                 if (peer_net->lpn_net_id == net_id)
1164                         return peer_net;
1165         }
1166         return NULL;
1167 }
1168
1169 /*
1170  * Attach a peer_ni to a peer_net and peer. This function assumes
1171  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1172  * may be attached to a different peer, in which case it will be
1173  * properly detached first. The whole operation is done atomically.
1174  *
1175  * Always returns 0.  This is the last function called from functions
1176  * that do return an int, so returning 0 here allows the compiler to
1177  * do a tail call.
1178  */
1179 static int
1180 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1181                                 struct lnet_peer_net *lpn,
1182                                 struct lnet_peer_ni *lpni,
1183                                 unsigned flags)
1184 {
1185         struct lnet_peer_table *ptable;
1186
1187         /* Install the new peer_ni */
1188         lnet_net_lock(LNET_LOCK_EX);
1189         /* Add peer_ni to global peer table hash, if necessary. */
1190         if (list_empty(&lpni->lpni_hashlist)) {
1191                 int hash = lnet_nid2peerhash(lpni->lpni_nid);
1192
1193                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1194                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1195                 ptable->pt_version++;
1196                 ptable->pt_number++;
1197                 /* This is the 1st refcount on lpni. */
1198                 atomic_inc(&lpni->lpni_refcount);
1199         }
1200
1201         /* Detach the peer_ni from an existing peer, if necessary. */
1202         if (lpni->lpni_peer_net) {
1203                 LASSERT(lpni->lpni_peer_net != lpn);
1204                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1205                 lnet_peer_detach_peer_ni_locked(lpni);
1206                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1207                 lpni->lpni_peer_net = NULL;
1208         }
1209
1210         /* Add peer_ni to peer_net */
1211         lpni->lpni_peer_net = lpn;
1212         list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1213         lnet_peer_net_addref_locked(lpn);
1214
1215         /* Add peer_net to peer */
1216         if (!lpn->lpn_peer) {
1217                 lpn->lpn_peer = lp;
1218                 list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1219                 lnet_peer_addref_locked(lp);
1220         }
1221
1222         /* Add peer to global peer list, if necessary */
1223         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1224         if (list_empty(&lp->lp_peer_list)) {
1225                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1226                 ptable->pt_peers++;
1227         }
1228
1229
1230         /* Update peer state */
1231         spin_lock(&lp->lp_lock);
1232         if (flags & LNET_PEER_CONFIGURED) {
1233                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1234                         lp->lp_state |= LNET_PEER_CONFIGURED;
1235         }
1236         if (flags & LNET_PEER_MULTI_RAIL) {
1237                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1238                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1239                         lnet_peer_clr_non_mr_pref_nids(lp);
1240                 }
1241         }
1242         spin_unlock(&lp->lp_lock);
1243
1244         lp->lp_nnis++;
1245         lnet_net_unlock(LNET_LOCK_EX);
1246
1247         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1248                libcfs_nid2str(lp->lp_primary_nid),
1249                libcfs_nid2str(lpni->lpni_nid), flags);
1250
1251         return 0;
1252 }
1253
1254 /*
1255  * Create a new peer, with nid as its primary nid.
1256  *
1257  * Call with the lnet_api_mutex held.
1258  */
1259 static int
1260 lnet_peer_add(lnet_nid_t nid, unsigned flags)
1261 {
1262         struct lnet_peer *lp;
1263         struct lnet_peer_net *lpn;
1264         struct lnet_peer_ni *lpni;
1265         int rc = 0;
1266
1267         LASSERT(nid != LNET_NID_ANY);
1268
1269         /*
1270          * No need for the lnet_net_lock here, because the
1271          * lnet_api_mutex is held.
1272          */
1273         lpni = lnet_find_peer_ni_locked(nid);
1274         if (lpni) {
1275                 /* A peer with this NID already exists. */
1276                 lp = lpni->lpni_peer_net->lpn_peer;
1277                 lnet_peer_ni_decref_locked(lpni);
1278                 /*
1279                  * This is an error if the peer was configured and the
1280                  * primary NID differs or an attempt is made to change
1281                  * the Multi-Rail flag. Otherwise the assumption is
1282                  * that an existing peer is being modified.
1283                  */
1284                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1285                         if (lp->lp_primary_nid != nid)
1286                                 rc = -EEXIST;
1287                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1288                                 rc = -EPERM;
1289                         goto out;
1290                 }
1291                 /* Delete and recreate as a configured peer. */
1292                 lnet_peer_del(lp);
1293         }
1294
1295         /* Create peer, peer_net, and peer_ni. */
1296         rc = -ENOMEM;
1297         lp = lnet_peer_alloc(nid);
1298         if (!lp)
1299                 goto out;
1300         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1301         if (!lpn)
1302                 goto out_free_lp;
1303         lpni = lnet_peer_ni_alloc(nid);
1304         if (!lpni)
1305                 goto out_free_lpn;
1306
1307         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1308
1309 out_free_lpn:
1310         LIBCFS_FREE(lpn, sizeof(*lpn));
1311 out_free_lp:
1312         LIBCFS_FREE(lp, sizeof(*lp));
1313 out:
1314         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1315                libcfs_nid2str(nid), flags, rc);
1316         return rc;
1317 }
1318
1319 /*
1320  * Add a NID to a peer. Call with ln_api_mutex held.
1321  *
1322  * Error codes:
1323  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1324  *  -EEXIST:   The NID was configured by DLC for a different peer.
1325  *  -ENOMEM:   Out of memory.
1326  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1327  *             non-multi-rail peer.
1328  */
1329 static int
1330 lnet_peer_add_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1331 {
1332         struct lnet_peer_net *lpn;
1333         struct lnet_peer_ni *lpni;
1334         int rc = 0;
1335
1336         LASSERT(lp);
1337         LASSERT(nid != LNET_NID_ANY);
1338
1339         /* A configured peer can only be updated through configuration. */
1340         if (!(flags & LNET_PEER_CONFIGURED)) {
1341                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1342                         rc = -EPERM;
1343                         goto out;
1344                 }
1345         }
1346
1347         /*
1348          * The MULTI_RAIL flag can be set but not cleared, because
1349          * that would leave the peer struct in an invalid state.
1350          */
1351         if (flags & LNET_PEER_MULTI_RAIL) {
1352                 spin_lock(&lp->lp_lock);
1353                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1354                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1355                         lnet_peer_clr_non_mr_pref_nids(lp);
1356                 }
1357                 spin_unlock(&lp->lp_lock);
1358         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1359                 rc = -EPERM;
1360                 goto out;
1361         }
1362
1363         lpni = lnet_find_peer_ni_locked(nid);
1364         if (lpni) {
1365                 /*
1366                  * A peer_ni already exists. This is only a problem if
1367                  * it is not connected to this peer and was configured
1368                  * by DLC.
1369                  */
1370                 lnet_peer_ni_decref_locked(lpni);
1371                 if (lpni->lpni_peer_net->lpn_peer == lp)
1372                         goto out;
1373                 if (lnet_peer_ni_is_configured(lpni)) {
1374                         rc = -EEXIST;
1375                         goto out;
1376                 }
1377                 /* If this is the primary NID, destroy the peer. */
1378                 if (lnet_peer_ni_is_primary(lpni)) {
1379                         lnet_peer_del(lpni->lpni_peer_net->lpn_peer);
1380                         lpni = lnet_peer_ni_alloc(nid);
1381                         if (!lpni) {
1382                                 rc = -ENOMEM;
1383                                 goto out;
1384                         }
1385                 }
1386         } else {
1387                 lpni = lnet_peer_ni_alloc(nid);
1388                 if (!lpni) {
1389                         rc = -ENOMEM;
1390                         goto out;
1391                 }
1392         }
1393
1394         /*
1395          * Get the peer_net. Check that we're not adding a second
1396          * peer_ni on a peer_net of a non-multi-rail peer.
1397          */
1398         lpn = lnet_peer_get_net_locked(lp, LNET_NIDNET(nid));
1399         if (!lpn) {
1400                 lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1401                 if (!lpn) {
1402                         rc = -ENOMEM;
1403                         goto out_free_lpni;
1404                 }
1405         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1406                 rc = -ENOTUNIQ;
1407                 goto out_free_lpni;
1408         }
1409
1410         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1411
1412 out_free_lpni:
1413         /* If the peer_ni was allocated above its peer_net pointer is NULL */
1414         if (!lpni->lpni_peer_net)
1415                 LIBCFS_FREE(lpni, sizeof(*lpni));
1416 out:
1417         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1418                libcfs_nid2str(lp->lp_primary_nid), libcfs_nid2str(nid),
1419                flags, rc);
1420         return rc;
1421 }
1422
1423 /*
1424  * Update the primary NID of a peer, if possible.
1425  *
1426  * Call with the lnet_api_mutex held.
1427  */
1428 static int
1429 lnet_peer_set_primary_nid(struct lnet_peer *lp, lnet_nid_t nid, unsigned flags)
1430 {
1431         lnet_nid_t old = lp->lp_primary_nid;
1432         int rc = 0;
1433
1434         if (lp->lp_primary_nid == nid)
1435                 goto out;
1436         rc = lnet_peer_add_nid(lp, nid, flags);
1437         if (rc)
1438                 goto out;
1439         lp->lp_primary_nid = nid;
1440 out:
1441         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1442                libcfs_nid2str(old), libcfs_nid2str(nid), rc);
1443         return rc;
1444 }
1445
1446 /*
1447  * lpni creation initiated due to traffic either sending or receiving.
1448  */
1449 static int
1450 lnet_peer_ni_traffic_add(lnet_nid_t nid, lnet_nid_t pref)
1451 {
1452         struct lnet_peer *lp;
1453         struct lnet_peer_net *lpn;
1454         struct lnet_peer_ni *lpni;
1455         unsigned flags = 0;
1456         int rc = 0;
1457
1458         if (nid == LNET_NID_ANY) {
1459                 rc = -EINVAL;
1460                 goto out;
1461         }
1462
1463         /* lnet_net_lock is not needed here because ln_api_lock is held */
1464         lpni = lnet_find_peer_ni_locked(nid);
1465         if (lpni) {
1466                 /*
1467                  * We must have raced with another thread. Since we
1468                  * know next to nothing about a peer_ni created by
1469                  * traffic, we just assume everything is ok and
1470                  * return.
1471                  */
1472                 lnet_peer_ni_decref_locked(lpni);
1473                 goto out;
1474         }
1475
1476         /* Create peer, peer_net, and peer_ni. */
1477         rc = -ENOMEM;
1478         lp = lnet_peer_alloc(nid);
1479         if (!lp)
1480                 goto out;
1481         lpn = lnet_peer_net_alloc(LNET_NIDNET(nid));
1482         if (!lpn)
1483                 goto out_free_lp;
1484         lpni = lnet_peer_ni_alloc(nid);
1485         if (!lpni)
1486                 goto out_free_lpn;
1487         if (pref != LNET_NID_ANY)
1488                 lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1489
1490         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1491
1492 out_free_lpn:
1493         LIBCFS_FREE(lpn, sizeof(*lpn));
1494 out_free_lp:
1495         LIBCFS_FREE(lp, sizeof(*lp));
1496 out:
1497         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(nid), rc);
1498         return rc;
1499 }
1500
1501 /*
1502  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1503  *
1504  * This API handles the following combinations:
1505  *   Create a peer with its primary NI if only the prim_nid is provided
1506  *   Add a NID to a peer identified by the prim_nid. The peer identified
1507  *   by the prim_nid must already exist.
1508  *   The peer being created may be non-MR.
1509  *
1510  * The caller must hold ln_api_mutex. This prevents the peer from
1511  * being created/modified/deleted by a different thread.
1512  */
1513 int
1514 lnet_add_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid, bool mr)
1515 {
1516         struct lnet_peer *lp = NULL;
1517         struct lnet_peer_ni *lpni;
1518         unsigned flags;
1519
1520         /* The prim_nid must always be specified */
1521         if (prim_nid == LNET_NID_ANY)
1522                 return -EINVAL;
1523
1524         flags = LNET_PEER_CONFIGURED;
1525         if (mr)
1526                 flags |= LNET_PEER_MULTI_RAIL;
1527
1528         /*
1529          * If nid isn't specified, we must create a new peer with
1530          * prim_nid as its primary nid.
1531          */
1532         if (nid == LNET_NID_ANY)
1533                 return lnet_peer_add(prim_nid, flags);
1534
1535         /* Look up the prim_nid, which must exist. */
1536         lpni = lnet_find_peer_ni_locked(prim_nid);
1537         if (!lpni)
1538                 return -ENOENT;
1539         lnet_peer_ni_decref_locked(lpni);
1540         lp = lpni->lpni_peer_net->lpn_peer;
1541
1542         /* Peer must have been configured. */
1543         if (!(lp->lp_state & LNET_PEER_CONFIGURED)) {
1544                 CDEBUG(D_NET, "peer %s was not configured\n",
1545                        libcfs_nid2str(prim_nid));
1546                 return -ENOENT;
1547         }
1548
1549         /* Primary NID must match */
1550         if (lp->lp_primary_nid != prim_nid) {
1551                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1552                        libcfs_nid2str(prim_nid),
1553                        libcfs_nid2str(lp->lp_primary_nid));
1554                 return -ENODEV;
1555         }
1556
1557         /* Multi-Rail flag must match. */
1558         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
1559                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
1560                        libcfs_nid2str(prim_nid));
1561                 return -EPERM;
1562         }
1563
1564         return lnet_peer_add_nid(lp, nid, flags);
1565 }
1566
1567 /*
1568  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
1569  *
1570  * This API handles the following combinations:
1571  *   Delete a NI from a peer if both prim_nid and nid are provided.
1572  *   Delete a peer if only prim_nid is provided.
1573  *   Delete a peer if its primary nid is provided.
1574  *
1575  * The caller must hold ln_api_mutex. This prevents the peer from
1576  * being modified/deleted by a different thread.
1577  */
1578 int
1579 lnet_del_peer_ni(lnet_nid_t prim_nid, lnet_nid_t nid)
1580 {
1581         struct lnet_peer *lp;
1582         struct lnet_peer_ni *lpni;
1583         unsigned flags;
1584
1585         if (prim_nid == LNET_NID_ANY)
1586                 return -EINVAL;
1587
1588         lpni = lnet_find_peer_ni_locked(prim_nid);
1589         if (!lpni)
1590                 return -ENOENT;
1591         lnet_peer_ni_decref_locked(lpni);
1592         lp = lpni->lpni_peer_net->lpn_peer;
1593
1594         if (prim_nid != lp->lp_primary_nid) {
1595                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
1596                        libcfs_nid2str(prim_nid),
1597                        libcfs_nid2str(lp->lp_primary_nid));
1598                 return -ENODEV;
1599         }
1600
1601         if (nid == LNET_NID_ANY || nid == lp->lp_primary_nid)
1602                 return lnet_peer_del(lp);
1603
1604         flags = LNET_PEER_CONFIGURED;
1605         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
1606                 flags |= LNET_PEER_MULTI_RAIL;
1607
1608         return lnet_peer_del_nid(lp, nid, flags);
1609 }
1610
1611 void
1612 lnet_destroy_peer_ni_locked(struct lnet_peer_ni *lpni)
1613 {
1614         struct lnet_peer_table *ptable;
1615         struct lnet_peer_net *lpn;
1616
1617         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nid2str(lpni->lpni_nid));
1618
1619         LASSERT(atomic_read(&lpni->lpni_refcount) == 0);
1620         LASSERT(lpni->lpni_rtr_refcount == 0);
1621         LASSERT(list_empty(&lpni->lpni_txq));
1622         LASSERT(lpni->lpni_txqnob == 0);
1623         LASSERT(list_empty(&lpni->lpni_peer_nis));
1624         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
1625
1626         lpn = lpni->lpni_peer_net;
1627         lpni->lpni_peer_net = NULL;
1628         lpni->lpni_net = NULL;
1629
1630         /* remove the peer ni from the zombie list */
1631         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1632         spin_lock(&ptable->pt_zombie_lock);
1633         list_del_init(&lpni->lpni_hashlist);
1634         ptable->pt_zombies--;
1635         spin_unlock(&ptable->pt_zombie_lock);
1636
1637         if (lpni->lpni_pref_nnids > 1) {
1638                 LIBCFS_FREE(lpni->lpni_pref.nids,
1639                         sizeof(*lpni->lpni_pref.nids) * lpni->lpni_pref_nnids);
1640         }
1641         LIBCFS_FREE(lpni, sizeof(*lpni));
1642
1643         lnet_peer_net_decref_locked(lpn);
1644 }
1645
1646 struct lnet_peer_ni *
1647 lnet_nid2peerni_ex(lnet_nid_t nid, int cpt)
1648 {
1649         struct lnet_peer_ni *lpni = NULL;
1650         int rc;
1651
1652         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1653                 return ERR_PTR(-ESHUTDOWN);
1654
1655         /*
1656          * find if a peer_ni already exists.
1657          * If so then just return that.
1658          */
1659         lpni = lnet_find_peer_ni_locked(nid);
1660         if (lpni)
1661                 return lpni;
1662
1663         lnet_net_unlock(cpt);
1664
1665         rc = lnet_peer_ni_traffic_add(nid, LNET_NID_ANY);
1666         if (rc) {
1667                 lpni = ERR_PTR(rc);
1668                 goto out_net_relock;
1669         }
1670
1671         lpni = lnet_find_peer_ni_locked(nid);
1672         LASSERT(lpni);
1673
1674 out_net_relock:
1675         lnet_net_lock(cpt);
1676
1677         return lpni;
1678 }
1679
1680 /*
1681  * Get a peer_ni for the given nid, create it if necessary. Takes a
1682  * hold on the peer_ni.
1683  */
1684 struct lnet_peer_ni *
1685 lnet_nid2peerni_locked(lnet_nid_t nid, lnet_nid_t pref, int cpt)
1686 {
1687         struct lnet_peer_ni *lpni = NULL;
1688         int rc;
1689
1690         if (the_lnet.ln_state != LNET_STATE_RUNNING)
1691                 return ERR_PTR(-ESHUTDOWN);
1692
1693         /*
1694          * find if a peer_ni already exists.
1695          * If so then just return that.
1696          */
1697         lpni = lnet_find_peer_ni_locked(nid);
1698         if (lpni)
1699                 return lpni;
1700
1701         /*
1702          * Slow path:
1703          * use the lnet_api_mutex to serialize the creation of the peer_ni
1704          * and the creation/deletion of the local ni/net. When a local ni is
1705          * created, if there exists a set of peer_nis on that network,
1706          * they need to be traversed and updated. When a local NI is
1707          * deleted, which could result in a network being deleted, then
1708          * all peer nis on that network need to be removed as well.
1709          *
1710          * Creation through traffic should also be serialized with
1711          * creation through DLC.
1712          */
1713         lnet_net_unlock(cpt);
1714         mutex_lock(&the_lnet.ln_api_mutex);
1715         /*
1716          * Shutdown is only set under the ln_api_lock, so a single
1717          * check here is sufficent.
1718          */
1719         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1720                 lpni = ERR_PTR(-ESHUTDOWN);
1721                 goto out_mutex_unlock;
1722         }
1723
1724         rc = lnet_peer_ni_traffic_add(nid, pref);
1725         if (rc) {
1726                 lpni = ERR_PTR(rc);
1727                 goto out_mutex_unlock;
1728         }
1729
1730         lpni = lnet_find_peer_ni_locked(nid);
1731         LASSERT(lpni);
1732
1733 out_mutex_unlock:
1734         mutex_unlock(&the_lnet.ln_api_mutex);
1735         lnet_net_lock(cpt);
1736
1737         /* Lock has been dropped, check again for shutdown. */
1738         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1739                 if (!IS_ERR(lpni))
1740                         lnet_peer_ni_decref_locked(lpni);
1741                 lpni = ERR_PTR(-ESHUTDOWN);
1742         }
1743
1744         return lpni;
1745 }
1746
1747 /*
1748  * Is a peer uptodate from the point of view of discovery?
1749  *
1750  * If it is currently being processed, obviously not.
1751  * A forced Ping or Push is also handled by the discovery thread.
1752  *
1753  * Otherwise look at whether the peer needs rediscovering.
1754  */
1755 bool
1756 lnet_peer_is_uptodate(struct lnet_peer *lp)
1757 {
1758         bool rc;
1759
1760         spin_lock(&lp->lp_lock);
1761         if (lp->lp_state & (LNET_PEER_DISCOVERING |
1762                             LNET_PEER_FORCE_PING |
1763                             LNET_PEER_FORCE_PUSH)) {
1764                 rc = false;
1765         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
1766                 rc = true;
1767         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
1768                 if (lnet_peer_discovery_disabled)
1769                         rc = true;
1770                 else
1771                         rc = false;
1772         } else if (lnet_peer_needs_push(lp)) {
1773                 rc = false;
1774         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
1775                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
1776                         rc = true;
1777                 else
1778                         rc = false;
1779         } else {
1780                 rc = false;
1781         }
1782         spin_unlock(&lp->lp_lock);
1783
1784         return rc;
1785 }
1786
1787 /*
1788  * Queue a peer for the attention of the discovery thread.  Call with
1789  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
1790  * -EALREADY if the peer was already queued.
1791  */
1792 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
1793 {
1794         int rc;
1795
1796         spin_lock(&lp->lp_lock);
1797         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
1798                 lp->lp_state |= LNET_PEER_DISCOVERING;
1799         spin_unlock(&lp->lp_lock);
1800         if (list_empty(&lp->lp_dc_list)) {
1801                 lnet_peer_addref_locked(lp);
1802                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
1803                 wake_up(&the_lnet.ln_dc_waitq);
1804                 rc = 0;
1805         } else {
1806                 rc = -EALREADY;
1807         }
1808
1809         CDEBUG(D_NET, "Queue peer %s: %d\n",
1810                libcfs_nid2str(lp->lp_primary_nid), rc);
1811
1812         return rc;
1813 }
1814
1815 /*
1816  * Discovery of a peer is complete. Wake all waiters on the peer.
1817  * Call with lnet_net_lock/EX held.
1818  */
1819 static void lnet_peer_discovery_complete(struct lnet_peer *lp)
1820 {
1821         struct lnet_msg *msg, *tmp;
1822         int rc = 0;
1823         struct list_head pending_msgs;
1824
1825         INIT_LIST_HEAD(&pending_msgs);
1826
1827         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
1828                libcfs_nid2str(lp->lp_primary_nid));
1829
1830         list_del_init(&lp->lp_dc_list);
1831         spin_lock(&lp->lp_lock);
1832         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
1833         spin_unlock(&lp->lp_lock);
1834         wake_up_all(&lp->lp_dc_waitq);
1835
1836         lnet_net_unlock(LNET_LOCK_EX);
1837
1838         /* iterate through all pending messages and send them again */
1839         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
1840                 list_del_init(&msg->msg_list);
1841                 if (lp->lp_dc_error) {
1842                         lnet_finalize(msg, lp->lp_dc_error);
1843                         continue;
1844                 }
1845
1846                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
1847                        lnet_msgtyp2str(msg->msg_type),
1848                        libcfs_id2str(msg->msg_target));
1849                 rc = lnet_send(msg->msg_src_nid_param, msg,
1850                                msg->msg_rtr_nid_param);
1851                 if (rc < 0) {
1852                         CNETERR("Error sending %s to %s: %d\n",
1853                                lnet_msgtyp2str(msg->msg_type),
1854                                libcfs_id2str(msg->msg_target), rc);
1855                         lnet_finalize(msg, rc);
1856                 }
1857         }
1858         lnet_net_lock(LNET_LOCK_EX);
1859         lnet_peer_decref_locked(lp);
1860 }
1861
1862 /*
1863  * Handle inbound push.
1864  * Like any event handler, called with lnet_res_lock/CPT held.
1865  */
1866 void lnet_peer_push_event(struct lnet_event *ev)
1867 {
1868         struct lnet_ping_buffer *pbuf = ev->md.user_ptr;
1869         struct lnet_peer *lp;
1870
1871         /* lnet_find_peer() adds a refcount */
1872         lp = lnet_find_peer(ev->source.nid);
1873         if (!lp) {
1874                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
1875                        libcfs_nid2str(ev->initiator.nid),
1876                        libcfs_nid2str(ev->source.nid));
1877                 return;
1878         }
1879
1880         /* Ensure peer state remains consistent while we modify it. */
1881         spin_lock(&lp->lp_lock);
1882
1883         /*
1884          * If some kind of error happened the contents of the message
1885          * cannot be used. Clear the NIDS_UPTODATE and set the
1886          * FORCE_PING flag to trigger a ping.
1887          */
1888         if (ev->status) {
1889                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1890                 lp->lp_state |= LNET_PEER_FORCE_PING;
1891                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
1892                        ev->status,
1893                        libcfs_nid2str(lp->lp_primary_nid),
1894                        libcfs_nid2str(ev->source.nid));
1895                 goto out;
1896         }
1897
1898         /*
1899          * A push with invalid or corrupted info. Clear the UPTODATE
1900          * flag to trigger a ping.
1901          */
1902         if (lnet_ping_info_validate(&pbuf->pb_info)) {
1903                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1904                 lp->lp_state |= LNET_PEER_FORCE_PING;
1905                 CDEBUG(D_NET, "Corrupted Push from %s\n",
1906                        libcfs_nid2str(lp->lp_primary_nid));
1907                 goto out;
1908         }
1909
1910         /*
1911          * Make sure we'll allocate the correct size ping buffer when
1912          * pinging the peer.
1913          */
1914         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
1915                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
1916
1917         /*
1918          * A non-Multi-Rail peer is not supposed to be capable of
1919          * sending a push.
1920          */
1921         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
1922                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
1923                        libcfs_nid2str(lp->lp_primary_nid));
1924                 goto out;
1925         }
1926
1927         /*
1928          * The peer may have discovery disabled at its end. Set
1929          * NO_DISCOVERY as appropriate.
1930          */
1931         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
1932                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
1933                        libcfs_nid2str(lp->lp_primary_nid));
1934                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
1935         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
1936                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
1937                        libcfs_nid2str(lp->lp_primary_nid));
1938                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
1939         }
1940
1941         /*
1942          * Update the MULTI_RAIL flag based on the push. If the peer
1943          * was configured with DLC then the setting should match what
1944          * DLC put in.
1945          * NB: We verified above that the MR feature bit is set in pi_features
1946          */
1947         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1948                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
1949                        libcfs_nid2str(lp->lp_primary_nid), lp);
1950         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
1951                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
1952                       libcfs_nid2str(lp->lp_primary_nid));
1953         } else if (lnet_peer_discovery_disabled) {
1954                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
1955                        libcfs_nid2str(lp->lp_primary_nid), lp);
1956         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
1957                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
1958                        libcfs_nid2str(lp->lp_primary_nid), lp);
1959         } else {
1960                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
1961                        libcfs_nid2str(lp->lp_primary_nid), lp);
1962                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
1963                 lnet_peer_clr_non_mr_pref_nids(lp);
1964         }
1965
1966         /*
1967          * Check for truncation of the Put message. Clear the
1968          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
1969          * and tell discovery to allocate a bigger buffer.
1970          */
1971         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
1972                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
1973                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
1974                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1975                 lp->lp_state |= LNET_PEER_FORCE_PING;
1976                 CDEBUG(D_NET, "Truncated Push from %s (%d nids)\n",
1977                        libcfs_nid2str(lp->lp_primary_nid),
1978                        pbuf->pb_info.pi_nnis);
1979                 goto out;
1980         }
1981
1982         /* always assume new data */
1983         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
1984         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1985
1986         /*
1987          * If there is data present that hasn't been processed yet,
1988          * we'll replace it if the Put contained newer data and it
1989          * fits. We're racing with a Ping or earlier Push in this
1990          * case.
1991          */
1992         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
1993                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
1994                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
1995                     pbuf->pb_info.pi_nnis <= lp->lp_data->pb_nnis) {
1996                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
1997                                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
1998                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
1999                               libcfs_nid2str(lp->lp_primary_nid),
2000                               LNET_PING_BUFFER_SEQNO(pbuf),
2001                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2002                 }
2003                 goto out;
2004         }
2005
2006         /*
2007          * Allocate a buffer to copy the data. On a failure we drop
2008          * the Push and set FORCE_PING to force the discovery
2009          * thread to fix the problem by pinging the peer.
2010          */
2011         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_nnis, GFP_ATOMIC);
2012         if (!lp->lp_data) {
2013                 lp->lp_state |= LNET_PEER_FORCE_PING;
2014                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2015                        libcfs_nid2str(lp->lp_primary_nid),
2016                        LNET_PING_BUFFER_SEQNO(pbuf));
2017                 goto out;
2018         }
2019
2020         /* Success */
2021         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2022                LNET_PING_INFO_SIZE(pbuf->pb_info.pi_nnis));
2023         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2024         CDEBUG(D_NET, "Received Push %s %u\n",
2025                libcfs_nid2str(lp->lp_primary_nid),
2026                LNET_PING_BUFFER_SEQNO(pbuf));
2027
2028 out:
2029         /*
2030          * Queue the peer for discovery if not done, force it on the request
2031          * queue and wake the discovery thread if the peer was already queued,
2032          * because its status changed.
2033          */
2034         spin_unlock(&lp->lp_lock);
2035         lnet_net_lock(LNET_LOCK_EX);
2036         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2037                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2038                 wake_up(&the_lnet.ln_dc_waitq);
2039         }
2040         /* Drop refcount from lookup */
2041         lnet_peer_decref_locked(lp);
2042         lnet_net_unlock(LNET_LOCK_EX);
2043 }
2044
2045 /*
2046  * Clear the discovery error state, unless we're already discovering
2047  * this peer, in which case the error is current.
2048  */
2049 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2050 {
2051         spin_lock(&lp->lp_lock);
2052         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2053                 lp->lp_dc_error = 0;
2054         spin_unlock(&lp->lp_lock);
2055 }
2056
2057 /*
2058  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2059  * dropped/retaken within this function. An lnet_peer_ni is passed in
2060  * because discovery could tear down an lnet_peer.
2061  */
2062 int
2063 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2064 {
2065         DEFINE_WAIT(wait);
2066         struct lnet_peer *lp;
2067         int rc = 0;
2068         int count = 0;
2069
2070 again:
2071         lnet_net_unlock(cpt);
2072         lnet_net_lock(LNET_LOCK_EX);
2073         lp = lpni->lpni_peer_net->lpn_peer;
2074         lnet_peer_clear_discovery_error(lp);
2075
2076         /*
2077          * We're willing to be interrupted. The lpni can become a
2078          * zombie if we race with DLC, so we must check for that.
2079          */
2080         for (;;) {
2081                 /* Keep lp alive when the lnet_net_lock is unlocked */
2082                 lnet_peer_addref_locked(lp);
2083                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2084                 if (signal_pending(current))
2085                         break;
2086                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2087                         break;
2088                 /*
2089                  * Don't repeat discovery if discovery is disabled. This is
2090                  * done to ensure we can use discovery as a standard ping as
2091                  * well for backwards compatibility with routers which do not
2092                  * have discovery or have discovery disabled
2093                  */
2094                 if (lnet_is_discovery_disabled(lp) && count > 0)
2095                         break;
2096                 if (lp->lp_dc_error)
2097                         break;
2098                 if (lnet_peer_is_uptodate(lp))
2099                         break;
2100                 lnet_peer_queue_for_discovery(lp);
2101                 count++;
2102                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2103
2104                 /*
2105                  * If caller requested a non-blocking operation then
2106                  * return immediately. Once discovery is complete any
2107                  * pending messages that were stopped due to discovery
2108                  * will be transmitted.
2109                  */
2110                 if (!block)
2111                         break;
2112
2113                 lnet_net_unlock(LNET_LOCK_EX);
2114                 schedule();
2115                 finish_wait(&lp->lp_dc_waitq, &wait);
2116                 lnet_net_lock(LNET_LOCK_EX);
2117                 lnet_peer_decref_locked(lp);
2118                 /* Peer may have changed */
2119                 lp = lpni->lpni_peer_net->lpn_peer;
2120         }
2121         finish_wait(&lp->lp_dc_waitq, &wait);
2122
2123         lnet_net_unlock(LNET_LOCK_EX);
2124         lnet_net_lock(cpt);
2125         lnet_peer_decref_locked(lp);
2126         /*
2127          * The peer may have changed, so re-check and rediscover if that turns
2128          * out to have been the case. The reference count on lp ensured that
2129          * even if it was unlinked from lpni the memory could not be recycled.
2130          * Thus the check below is sufficient to determine whether the peer
2131          * changed. If the peer changed, then lp must not be dereferenced.
2132          */
2133         if (lp != lpni->lpni_peer_net->lpn_peer)
2134                 goto again;
2135
2136         if (signal_pending(current))
2137                 rc = -EINTR;
2138         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2139                 rc = -ESHUTDOWN;
2140         else if (lp->lp_dc_error)
2141                 rc = lp->lp_dc_error;
2142         else if (!block)
2143                 CDEBUG(D_NET, "non-blocking discovery\n");
2144         else if (!lnet_peer_is_uptodate(lp))
2145                 goto again;
2146
2147         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2148                (lp ? libcfs_nid2str(lp->lp_primary_nid) : "(none)"),
2149                libcfs_nid2str(lpni->lpni_nid), rc,
2150                (!block) ? "pending discovery" : "discovery complete");
2151
2152         return rc;
2153 }
2154
2155 /* Handle an incoming ack for a push. */
2156 static void
2157 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2158 {
2159         struct lnet_ping_buffer *pbuf;
2160
2161         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
2162         spin_lock(&lp->lp_lock);
2163         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2164         lp->lp_push_error = ev->status;
2165         if (ev->status)
2166                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2167         else
2168                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2169         spin_unlock(&lp->lp_lock);
2170
2171         CDEBUG(D_NET, "peer %s ev->status %d\n",
2172                libcfs_nid2str(lp->lp_primary_nid), ev->status);
2173 }
2174
2175 /* Handle a Reply message. This is the reply to a Ping message. */
2176 static void
2177 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2178 {
2179         struct lnet_ping_buffer *pbuf;
2180         int rc;
2181
2182         spin_lock(&lp->lp_lock);
2183
2184         lp->lp_disc_src_nid = ev->target.nid;
2185
2186         /*
2187          * If some kind of error happened the contents of message
2188          * cannot be used. Set PING_FAILED to trigger a retry.
2189          */
2190         if (ev->status) {
2191                 lp->lp_state |= LNET_PEER_PING_FAILED;
2192                 lp->lp_ping_error = ev->status;
2193                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2194                        ev->status,
2195                        libcfs_nid2str(lp->lp_primary_nid),
2196                        libcfs_nid2str(ev->source.nid));
2197                 goto out;
2198         }
2199
2200         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md.start);
2201         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2202                 lnet_swap_pinginfo(pbuf);
2203
2204         /*
2205          * A reply with invalid or corrupted info. Set PING_FAILED to
2206          * trigger a retry.
2207          */
2208         rc = lnet_ping_info_validate(&pbuf->pb_info);
2209         if (rc) {
2210                 lp->lp_state |= LNET_PEER_PING_FAILED;
2211                 lp->lp_ping_error = 0;
2212                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2213                        libcfs_nid2str(lp->lp_primary_nid), rc);
2214                 goto out;
2215         }
2216
2217         /*
2218          * Update the MULTI_RAIL flag based on the reply. If the peer
2219          * was configured with DLC then the setting should match what
2220          * DLC put in.
2221          */
2222         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2223                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2224                         /* Everything's fine */
2225                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2226                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2227                               libcfs_nid2str(lp->lp_primary_nid));
2228                 } else {
2229                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2230                         lnet_peer_clr_non_mr_pref_nids(lp);
2231                 }
2232         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2233                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2234                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2235                               libcfs_nid2str(lp->lp_primary_nid));
2236                 } else {
2237                         CERROR("Multi-Rail state vanished from %s\n",
2238                                libcfs_nid2str(lp->lp_primary_nid));
2239                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2240                 }
2241         }
2242
2243         /*
2244          * Make sure we'll allocate the correct size ping buffer when
2245          * pinging the peer.
2246          */
2247         if (lp->lp_data_nnis < pbuf->pb_info.pi_nnis)
2248                 lp->lp_data_nnis = pbuf->pb_info.pi_nnis;
2249
2250         /*
2251          * The peer may have discovery disabled at its end. Set
2252          * NO_DISCOVERY as appropriate.
2253          */
2254         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2255                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2256                        libcfs_nid2str(lp->lp_primary_nid));
2257                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2258         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2259                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2260                        libcfs_nid2str(lp->lp_primary_nid));
2261                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2262         }
2263
2264         /*
2265          * Check for truncation of the Reply. Clear PING_SENT and set
2266          * PING_FAILED to trigger a retry.
2267          */
2268         if (pbuf->pb_nnis < pbuf->pb_info.pi_nnis) {
2269                 if (the_lnet.ln_push_target_nnis < pbuf->pb_info.pi_nnis)
2270                         the_lnet.ln_push_target_nnis = pbuf->pb_info.pi_nnis;
2271                 lp->lp_state |= LNET_PEER_PING_FAILED;
2272                 lp->lp_ping_error = 0;
2273                 CDEBUG(D_NET, "Truncated Reply from %s (%d nids)\n",
2274                        libcfs_nid2str(lp->lp_primary_nid),
2275                        pbuf->pb_info.pi_nnis);
2276                 goto out;
2277         }
2278
2279         /*
2280          * Check the sequence numbers in the reply. These are only
2281          * available if the reply came from a Multi-Rail peer.
2282          */
2283         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2284             pbuf->pb_info.pi_nnis > 1 &&
2285             lp->lp_primary_nid == pbuf->pb_info.pi_ni[1].ns_nid) {
2286                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2287                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2288                                 libcfs_nid2str(lp->lp_primary_nid),
2289                                 LNET_PING_BUFFER_SEQNO(pbuf),
2290                                 lp->lp_peer_seqno);
2291
2292                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2293         }
2294
2295         /* We're happy with the state of the data in the buffer. */
2296         CDEBUG(D_NET, "peer %s data present %u\n",
2297                libcfs_nid2str(lp->lp_primary_nid), lp->lp_peer_seqno);
2298         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2299                 lnet_ping_buffer_decref(lp->lp_data);
2300         else
2301                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2302         lnet_ping_buffer_addref(pbuf);
2303         lp->lp_data = pbuf;
2304 out:
2305         lp->lp_state &= ~LNET_PEER_PING_SENT;
2306         spin_unlock(&lp->lp_lock);
2307 }
2308
2309 /*
2310  * Send event handling. Only matters for error cases, where we clean
2311  * up state on the peer and peer_ni that would otherwise be updated in
2312  * the REPLY event handler for a successful Ping, and the ACK event
2313  * handler for a successful Push.
2314  */
2315 static int
2316 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2317 {
2318         int rc = 0;
2319
2320         if (!ev->status)
2321                 goto out;
2322
2323         spin_lock(&lp->lp_lock);
2324         if (ev->msg_type == LNET_MSG_GET) {
2325                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2326                 lp->lp_state |= LNET_PEER_PING_FAILED;
2327                 lp->lp_ping_error = ev->status;
2328         } else { /* ev->msg_type == LNET_MSG_PUT */
2329                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2330                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2331                 lp->lp_push_error = ev->status;
2332         }
2333         spin_unlock(&lp->lp_lock);
2334         rc = LNET_REDISCOVER_PEER;
2335 out:
2336         CDEBUG(D_NET, "%s Send to %s: %d\n",
2337                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
2338                 libcfs_nid2str(ev->target.nid), rc);
2339         return rc;
2340 }
2341
2342 /*
2343  * Unlink event handling. This event is only seen if a call to
2344  * LNetMDUnlink() caused the event to be unlinked. If this call was
2345  * made after the event was set up in LNetGet() or LNetPut() then we
2346  * assume the Ping or Push timed out.
2347  */
2348 static void
2349 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
2350 {
2351         spin_lock(&lp->lp_lock);
2352         /* We've passed through LNetGet() */
2353         if (lp->lp_state & LNET_PEER_PING_SENT) {
2354                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2355                 lp->lp_state |= LNET_PEER_PING_FAILED;
2356                 lp->lp_ping_error = -ETIMEDOUT;
2357                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
2358                         libcfs_nid2str(lp->lp_primary_nid));
2359         }
2360         /* We've passed through LNetPut() */
2361         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
2362                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2363                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2364                 lp->lp_push_error = -ETIMEDOUT;
2365                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
2366                         libcfs_nid2str(lp->lp_primary_nid));
2367         }
2368         spin_unlock(&lp->lp_lock);
2369 }
2370
2371 /*
2372  * Event handler for the discovery EQ.
2373  *
2374  * Called with lnet_res_lock(cpt) held. The cpt is the
2375  * lnet_cpt_of_cookie() of the md handle cookie.
2376  */
2377 static void lnet_discovery_event_handler(struct lnet_event *event)
2378 {
2379         struct lnet_peer *lp = event->md.user_ptr;
2380         struct lnet_ping_buffer *pbuf;
2381         int rc;
2382
2383         /* discovery needs to take another look */
2384         rc = LNET_REDISCOVER_PEER;
2385
2386         CDEBUG(D_NET, "Received event: %d\n", event->type);
2387
2388         switch (event->type) {
2389         case LNET_EVENT_ACK:
2390                 lnet_discovery_event_ack(lp, event);
2391                 break;
2392         case LNET_EVENT_REPLY:
2393                 lnet_discovery_event_reply(lp, event);
2394                 break;
2395         case LNET_EVENT_SEND:
2396                 /* Only send failure triggers a retry. */
2397                 rc = lnet_discovery_event_send(lp, event);
2398                 break;
2399         case LNET_EVENT_UNLINK:
2400                 /* LNetMDUnlink() was called */
2401                 lnet_discovery_event_unlink(lp, event);
2402                 break;
2403         default:
2404                 /* Invalid events. */
2405                 LBUG();
2406         }
2407         lnet_net_lock(LNET_LOCK_EX);
2408         if (event->unlinked) {
2409                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md.start);
2410                 lnet_ping_buffer_decref(pbuf);
2411                 lnet_peer_decref_locked(lp);
2412         }
2413
2414         /* put peer back at end of request queue, if discovery not already
2415          * done */
2416         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
2417             lnet_peer_queue_for_discovery(lp)) {
2418                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2419                 wake_up(&the_lnet.ln_dc_waitq);
2420         }
2421         lnet_net_unlock(LNET_LOCK_EX);
2422 }
2423
2424 /*
2425  * Build a peer from incoming data.
2426  *
2427  * The NIDs in the incoming data are supposed to be structured as follows:
2428  *  - loopback
2429  *  - primary NID
2430  *  - other NIDs in same net
2431  *  - NIDs in second net
2432  *  - NIDs in third net
2433  *  - ...
2434  * This due to the way the list of NIDs in the data is created.
2435  *
2436  * Note that this function will mark the peer uptodate unless an
2437  * ENOMEM is encontered. All other errors are due to a conflict
2438  * between the DLC configuration and what discovery sees. We treat DLC
2439  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
2440  * peer from becoming stuck in discovery.
2441  */
2442 static int lnet_peer_merge_data(struct lnet_peer *lp,
2443                                 struct lnet_ping_buffer *pbuf)
2444 {
2445         struct lnet_peer_ni *lpni;
2446         lnet_nid_t *curnis = NULL;
2447         lnet_nid_t *addnis = NULL;
2448         lnet_nid_t *delnis = NULL;
2449         unsigned flags;
2450         int ncurnis;
2451         int naddnis;
2452         int ndelnis;
2453         int nnis = 0;
2454         int i;
2455         int j;
2456         int rc;
2457
2458         flags = LNET_PEER_DISCOVERED;
2459         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2460                 flags |= LNET_PEER_MULTI_RAIL;
2461
2462         nnis = MAX(lp->lp_nnis, pbuf->pb_info.pi_nnis);
2463         LIBCFS_ALLOC(curnis, nnis * sizeof(lnet_nid_t));
2464         LIBCFS_ALLOC(addnis, nnis * sizeof(lnet_nid_t));
2465         LIBCFS_ALLOC(delnis, nnis * sizeof(lnet_nid_t));
2466         if (!curnis || !addnis || !delnis) {
2467                 rc = -ENOMEM;
2468                 goto out;
2469         }
2470         ncurnis = 0;
2471         naddnis = 0;
2472         ndelnis = 0;
2473
2474         /* Construct the list of NIDs present in peer. */
2475         lpni = NULL;
2476         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
2477                 curnis[ncurnis++] = lpni->lpni_nid;
2478
2479         /*
2480          * Check for NIDs in pbuf not present in curnis[].
2481          * The loop starts at 1 to skip the loopback NID.
2482          */
2483         for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
2484                 for (j = 0; j < ncurnis; j++)
2485                         if (pbuf->pb_info.pi_ni[i].ns_nid == curnis[j])
2486                                 break;
2487                 if (j == ncurnis)
2488                         addnis[naddnis++] = pbuf->pb_info.pi_ni[i].ns_nid;
2489         }
2490         /*
2491          * Check for NIDs in curnis[] not present in pbuf.
2492          * The nested loop starts at 1 to skip the loopback NID.
2493          *
2494          * But never add the loopback NID to delnis[]: if it is
2495          * present in curnis[] then this peer is for this node.
2496          */
2497         for (i = 0; i < ncurnis; i++) {
2498                 if (curnis[i] == LNET_NID_LO_0)
2499                         continue;
2500                 for (j = 1; j < pbuf->pb_info.pi_nnis; j++)
2501                         if (curnis[i] == pbuf->pb_info.pi_ni[j].ns_nid)
2502                                 break;
2503                 if (j == pbuf->pb_info.pi_nnis)
2504                         delnis[ndelnis++] = curnis[i];
2505         }
2506
2507         rc = 0;
2508         if (lnet_is_discovery_disabled(lp))
2509                 goto out;
2510
2511         for (i = 0; i < naddnis; i++) {
2512                 rc = lnet_peer_add_nid(lp, addnis[i], flags);
2513                 if (rc) {
2514                         CERROR("Error adding NID %s to peer %s: %d\n",
2515                                libcfs_nid2str(addnis[i]),
2516                                libcfs_nid2str(lp->lp_primary_nid), rc);
2517                         if (rc == -ENOMEM)
2518                                 goto out;
2519                 }
2520         }
2521         for (i = 0; i < ndelnis; i++) {
2522                 rc = lnet_peer_del_nid(lp, delnis[i], flags);
2523                 if (rc) {
2524                         CERROR("Error deleting NID %s from peer %s: %d\n",
2525                                libcfs_nid2str(delnis[i]),
2526                                libcfs_nid2str(lp->lp_primary_nid), rc);
2527                         if (rc == -ENOMEM)
2528                                 goto out;
2529                 }
2530         }
2531         /*
2532          * Errors other than -ENOMEM are due to peers having been
2533          * configured with DLC. Ignore these because DLC overrides
2534          * Discovery.
2535          */
2536         rc = 0;
2537 out:
2538         LIBCFS_FREE(curnis, nnis * sizeof(lnet_nid_t));
2539         LIBCFS_FREE(addnis, nnis * sizeof(lnet_nid_t));
2540         LIBCFS_FREE(delnis, nnis * sizeof(lnet_nid_t));
2541         lnet_ping_buffer_decref(pbuf);
2542         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2543
2544         if (rc) {
2545                 spin_lock(&lp->lp_lock);
2546                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2547                 lp->lp_state |= LNET_PEER_FORCE_PING;
2548                 spin_unlock(&lp->lp_lock);
2549         }
2550         return rc;
2551 }
2552
2553 /*
2554  * The data in pbuf says lp is its primary peer, but the data was
2555  * received by a different peer. Try to update lp with the data.
2556  */
2557 static int
2558 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
2559 {
2560         struct lnet_handle_md mdh;
2561
2562         /* Queue lp for discovery, and force it on the request queue. */
2563         lnet_net_lock(LNET_LOCK_EX);
2564         if (lnet_peer_queue_for_discovery(lp))
2565                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2566         lnet_net_unlock(LNET_LOCK_EX);
2567
2568         LNetInvalidateMDHandle(&mdh);
2569
2570         /*
2571          * Decide whether we can move the peer to the DATA_PRESENT state.
2572          *
2573          * We replace stale data for a multi-rail peer, repair PING_FAILED
2574          * status, and preempt FORCE_PING.
2575          *
2576          * If after that we have DATA_PRESENT, we merge it into this peer.
2577          */
2578         spin_lock(&lp->lp_lock);
2579         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2580                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
2581                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2582                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2583                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2584                         lnet_ping_buffer_decref(pbuf);
2585                         pbuf = lp->lp_data;
2586                         lp->lp_data = NULL;
2587                 }
2588         }
2589         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2590                 lnet_ping_buffer_decref(lp->lp_data);
2591                 lp->lp_data = NULL;
2592                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2593         }
2594         if (lp->lp_state & LNET_PEER_PING_FAILED) {
2595                 mdh = lp->lp_ping_mdh;
2596                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2597                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
2598                 lp->lp_ping_error = 0;
2599         }
2600         if (lp->lp_state & LNET_PEER_FORCE_PING)
2601                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
2602         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2603         spin_unlock(&lp->lp_lock);
2604
2605         if (!LNetMDHandleIsInvalid(mdh))
2606                 LNetMDUnlink(mdh);
2607
2608         if (pbuf)
2609                 return lnet_peer_merge_data(lp, pbuf);
2610
2611         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2612         return 0;
2613 }
2614
2615 static bool lnet_is_nid_in_ping_info(lnet_nid_t nid, struct lnet_ping_info *pinfo)
2616 {
2617         int i;
2618
2619         for (i = 0; i < pinfo->pi_nnis; i++) {
2620                 if (pinfo->pi_ni[i].ns_nid == nid)
2621                         return true;
2622         }
2623
2624         return false;
2625 }
2626
2627 /*
2628  * Update a peer using the data received.
2629  */
2630 static int lnet_peer_data_present(struct lnet_peer *lp)
2631 __must_hold(&lp->lp_lock)
2632 {
2633         struct lnet_ping_buffer *pbuf;
2634         struct lnet_peer_ni *lpni;
2635         lnet_nid_t nid = LNET_NID_ANY;
2636         unsigned flags;
2637         int rc = 0;
2638
2639         pbuf = lp->lp_data;
2640         lp->lp_data = NULL;
2641         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
2642         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
2643         spin_unlock(&lp->lp_lock);
2644
2645         /*
2646          * Modifications of peer structures are done while holding the
2647          * ln_api_mutex. A global lock is required because we may be
2648          * modifying multiple peer structures, and a mutex greatly
2649          * simplifies memory management.
2650          *
2651          * The actual changes to the data structures must also protect
2652          * against concurrent lookups, for which the lnet_net_lock in
2653          * LNET_LOCK_EX mode is used.
2654          */
2655         mutex_lock(&the_lnet.ln_api_mutex);
2656         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2657                 rc = -ESHUTDOWN;
2658                 goto out;
2659         }
2660
2661         /*
2662          * If this peer is not on the peer list then it is being torn
2663          * down, and our reference count may be all that is keeping it
2664          * alive. Don't do any work on it.
2665          */
2666         if (list_empty(&lp->lp_peer_list))
2667                 goto out;
2668
2669         flags = LNET_PEER_DISCOVERED;
2670         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
2671                 flags |= LNET_PEER_MULTI_RAIL;
2672
2673         /*
2674          * Check whether the primary NID in the message matches the
2675          * primary NID of the peer. If it does, update the peer, if
2676          * it it does not, check whether there is already a peer with
2677          * that primary NID. If no such peer exists, try to update
2678          * the primary NID of the current peer (allowed if it was
2679          * created due to message traffic) and complete the update.
2680          * If the peer did exist, hand off the data to it.
2681          *
2682          * The peer for the loopback interface is a special case: this
2683          * is the peer for the local node, and we want to set its
2684          * primary NID to the correct value here. Moreover, this peer
2685          * can show up with only the loopback NID in the ping buffer.
2686          */
2687         if (pbuf->pb_info.pi_nnis <= 1)
2688                 goto out;
2689         nid = pbuf->pb_info.pi_ni[1].ns_nid;
2690         if (lp->lp_primary_nid == LNET_NID_LO_0) {
2691                 rc = lnet_peer_set_primary_nid(lp, nid, flags);
2692                 if (!rc)
2693                         rc = lnet_peer_merge_data(lp, pbuf);
2694         } else if (lp->lp_primary_nid == nid ||
2695                    (lnet_is_nid_in_ping_info(lp->lp_primary_nid, &pbuf->pb_info) &&
2696                     lnet_is_discovery_disabled(lp))) {
2697                 rc = lnet_peer_merge_data(lp, pbuf);
2698         } else {
2699                 lpni = lnet_find_peer_ni_locked(nid);
2700                 if (!lpni) {
2701                         rc = lnet_peer_set_primary_nid(lp, nid, flags);
2702                         if (rc) {
2703                                 CERROR("Primary NID error %s versus %s: %d\n",
2704                                        libcfs_nid2str(lp->lp_primary_nid),
2705                                        libcfs_nid2str(nid), rc);
2706                         } else {
2707                                 rc = lnet_peer_merge_data(lp, pbuf);
2708                         }
2709                 } else {
2710                         rc = lnet_peer_set_primary_data(
2711                                 lpni->lpni_peer_net->lpn_peer, pbuf);
2712                         lnet_peer_ni_decref_locked(lpni);
2713                 }
2714         }
2715 out:
2716         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2717         mutex_unlock(&the_lnet.ln_api_mutex);
2718
2719         spin_lock(&lp->lp_lock);
2720         /* Tell discovery to re-check the peer immediately. */
2721         if (!rc)
2722                 rc = LNET_REDISCOVER_PEER;
2723         return rc;
2724 }
2725
2726 /*
2727  * A ping failed. Clear the PING_FAILED state and set the
2728  * FORCE_PING state, to ensure a retry even if discovery is
2729  * disabled. This avoids being left with incorrect state.
2730  */
2731 static int lnet_peer_ping_failed(struct lnet_peer *lp)
2732 __must_hold(&lp->lp_lock)
2733 {
2734         struct lnet_handle_md mdh;
2735         int rc;
2736
2737         mdh = lp->lp_ping_mdh;
2738         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
2739         lp->lp_state &= ~LNET_PEER_PING_FAILED;
2740         lp->lp_state |= LNET_PEER_FORCE_PING;
2741         rc = lp->lp_ping_error;
2742         lp->lp_ping_error = 0;
2743         spin_unlock(&lp->lp_lock);
2744
2745         if (!LNetMDHandleIsInvalid(mdh))
2746                 LNetMDUnlink(mdh);
2747
2748         CDEBUG(D_NET, "peer %s:%d\n",
2749                libcfs_nid2str(lp->lp_primary_nid), rc);
2750
2751         spin_lock(&lp->lp_lock);
2752         return rc ? rc : LNET_REDISCOVER_PEER;
2753 }
2754
2755 /*
2756  * Select NID to send a Ping or Push to.
2757  */
2758 static lnet_nid_t lnet_peer_select_nid(struct lnet_peer *lp)
2759 {
2760         struct lnet_peer_ni *lpni;
2761
2762         /* Look for a direct-connected NID for this peer. */
2763         lpni = NULL;
2764         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2765                 if (!lnet_get_net_locked(lpni->lpni_peer_net->lpn_net_id))
2766                         continue;
2767                 break;
2768         }
2769         if (lpni)
2770                 return lpni->lpni_nid;
2771
2772         /* Look for a routed-connected NID for this peer. */
2773         lpni = NULL;
2774         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
2775                 if (!lnet_find_rnet_locked(lpni->lpni_peer_net->lpn_net_id))
2776                         continue;
2777                 break;
2778         }
2779         if (lpni)
2780                 return lpni->lpni_nid;
2781
2782         return LNET_NID_ANY;
2783 }
2784
2785 /* Active side of ping. */
2786 static int lnet_peer_send_ping(struct lnet_peer *lp)
2787 __must_hold(&lp->lp_lock)
2788 {
2789         lnet_nid_t pnid;
2790         int nnis;
2791         int rc;
2792         int cpt;
2793
2794         lp->lp_state |= LNET_PEER_PING_SENT;
2795         lp->lp_state &= ~LNET_PEER_FORCE_PING;
2796         spin_unlock(&lp->lp_lock);
2797
2798         cpt = lnet_net_lock_current();
2799         /* Refcount for MD. */
2800         lnet_peer_addref_locked(lp);
2801         pnid = lnet_peer_select_nid(lp);
2802         lnet_net_unlock(cpt);
2803
2804         nnis = MAX(lp->lp_data_nnis, LNET_INTERFACES_MIN);
2805
2806         rc = lnet_send_ping(pnid, &lp->lp_ping_mdh, nnis, lp,
2807                             the_lnet.ln_dc_eqh, false);
2808
2809         /*
2810          * if LNetMDBind in lnet_send_ping fails we need to decrement the
2811          * refcount on the peer, otherwise LNetMDUnlink will be called
2812          * which will eventually do that.
2813          */
2814         if (rc > 0) {
2815                 lnet_net_lock(cpt);
2816                 lnet_peer_decref_locked(lp);
2817                 lnet_net_unlock(cpt);
2818                 rc = -rc; /* change the rc to negative value */
2819                 goto fail_error;
2820         } else if (rc < 0) {
2821                 goto fail_error;
2822         }
2823
2824         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2825
2826         spin_lock(&lp->lp_lock);
2827         return 0;
2828
2829 fail_error:
2830         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2831         /*
2832          * The errors that get us here are considered hard errors and
2833          * cause Discovery to terminate. So we clear PING_SENT, but do
2834          * not set either PING_FAILED or FORCE_PING. In fact we need
2835          * to clear PING_FAILED, because the unlink event handler will
2836          * have set it if we called LNetMDUnlink() above.
2837          */
2838         spin_lock(&lp->lp_lock);
2839         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
2840         return rc;
2841 }
2842
2843 /*
2844  * This function exists because you cannot call LNetMDUnlink() from an
2845  * event handler.
2846  */
2847 static int lnet_peer_push_failed(struct lnet_peer *lp)
2848 __must_hold(&lp->lp_lock)
2849 {
2850         struct lnet_handle_md mdh;
2851         int rc;
2852
2853         mdh = lp->lp_push_mdh;
2854         LNetInvalidateMDHandle(&lp->lp_push_mdh);
2855         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
2856         rc = lp->lp_push_error;
2857         lp->lp_push_error = 0;
2858         spin_unlock(&lp->lp_lock);
2859
2860         if (!LNetMDHandleIsInvalid(mdh))
2861                 LNetMDUnlink(mdh);
2862
2863         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2864         spin_lock(&lp->lp_lock);
2865         return rc ? rc : LNET_REDISCOVER_PEER;
2866 }
2867
2868 /*
2869  * Mark the peer as discovered.
2870  */
2871 static int lnet_peer_discovered(struct lnet_peer *lp)
2872 __must_hold(&lp->lp_lock)
2873 {
2874         lp->lp_state |= LNET_PEER_DISCOVERED;
2875         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
2876                           LNET_PEER_REDISCOVER);
2877
2878         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2879
2880         return 0;
2881 }
2882
2883 /* Active side of push. */
2884 static int lnet_peer_send_push(struct lnet_peer *lp)
2885 __must_hold(&lp->lp_lock)
2886 {
2887         struct lnet_ping_buffer *pbuf;
2888         struct lnet_process_id id;
2889         struct lnet_md md;
2890         int cpt;
2891         int rc;
2892
2893         /* Don't push to a non-multi-rail peer. */
2894         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
2895                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
2896                 /* if peer's NIDs are uptodate then peer is discovered */
2897                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
2898                         rc = lnet_peer_discovered(lp);
2899                         return rc;
2900                 }
2901
2902                 return 0;
2903         }
2904
2905         lp->lp_state |= LNET_PEER_PUSH_SENT;
2906         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
2907         spin_unlock(&lp->lp_lock);
2908
2909         cpt = lnet_net_lock_current();
2910         pbuf = the_lnet.ln_ping_target;
2911         lnet_ping_buffer_addref(pbuf);
2912         lnet_net_unlock(cpt);
2913
2914         /* Push source MD */
2915         md.start     = &pbuf->pb_info;
2916         md.length    = LNET_PING_INFO_SIZE(pbuf->pb_nnis);
2917         md.threshold = 2; /* Put/Ack */
2918         md.max_size  = 0;
2919         md.options   = 0;
2920         md.eq_handle = the_lnet.ln_dc_eqh;
2921         md.user_ptr  = lp;
2922
2923         rc = LNetMDBind(md, LNET_UNLINK, &lp->lp_push_mdh);
2924         if (rc) {
2925                 lnet_ping_buffer_decref(pbuf);
2926                 CERROR("Can't bind push source MD: %d\n", rc);
2927                 goto fail_error;
2928         }
2929         cpt = lnet_net_lock_current();
2930         /* Refcount for MD. */
2931         lnet_peer_addref_locked(lp);
2932         id.pid = LNET_PID_LUSTRE;
2933         id.nid = lnet_peer_select_nid(lp);
2934         lnet_net_unlock(cpt);
2935
2936         if (id.nid == LNET_NID_ANY) {
2937                 rc = -EHOSTUNREACH;
2938                 goto fail_unlink;
2939         }
2940
2941         rc = LNetPut(lp->lp_disc_src_nid, lp->lp_push_mdh,
2942                      LNET_ACK_REQ, id, LNET_RESERVED_PORTAL,
2943                      LNET_PROTO_PING_MATCHBITS, 0, 0);
2944
2945         /*
2946          * reset the discovery nid. There is no need to restrict sending
2947          * from that source, if we call lnet_push_update_to_peers(). It'll
2948          * get set to a specific NID, if we initiate discovery from the
2949          * scratch
2950          */
2951         lp->lp_disc_src_nid = LNET_NID_ANY;
2952
2953         if (rc)
2954                 goto fail_unlink;
2955
2956         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
2957
2958         spin_lock(&lp->lp_lock);
2959         return 0;
2960
2961 fail_unlink:
2962         LNetMDUnlink(lp->lp_push_mdh);
2963         LNetInvalidateMDHandle(&lp->lp_push_mdh);
2964 fail_error:
2965         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nid2str(lp->lp_primary_nid), rc);
2966         /*
2967          * The errors that get us here are considered hard errors and
2968          * cause Discovery to terminate. So we clear PUSH_SENT, but do
2969          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
2970          * because the unlink event handler will have set it if we
2971          * called LNetMDUnlink() above.
2972          */
2973         spin_lock(&lp->lp_lock);
2974         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
2975         return rc;
2976 }
2977
2978 /*
2979  * An unrecoverable error was encountered during discovery.
2980  * Set error status in peer and abort discovery.
2981  */
2982 static void lnet_peer_discovery_error(struct lnet_peer *lp, int error)
2983 {
2984         CDEBUG(D_NET, "Discovery error %s: %d\n",
2985                libcfs_nid2str(lp->lp_primary_nid), error);
2986
2987         spin_lock(&lp->lp_lock);
2988         lp->lp_dc_error = error;
2989         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2990         lp->lp_state |= LNET_PEER_REDISCOVER;
2991         spin_unlock(&lp->lp_lock);
2992 }
2993
2994 /*
2995  * Mark the peer as to be rediscovered.
2996  */
2997 static int lnet_peer_rediscover(struct lnet_peer *lp)
2998 __must_hold(&lp->lp_lock)
2999 {
3000         lp->lp_state |= LNET_PEER_REDISCOVER;
3001         lp->lp_state &= ~LNET_PEER_DISCOVERING;
3002
3003         CDEBUG(D_NET, "peer %s\n", libcfs_nid2str(lp->lp_primary_nid));
3004
3005         return 0;
3006 }
3007
3008 /*
3009  * Discovering this peer is taking too long. Cancel any Ping or Push
3010  * that discovery is waiting on by unlinking the relevant MDs. The
3011  * lnet_discovery_event_handler() will proceed from here and complete
3012  * the cleanup.
3013  */
3014 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
3015 {
3016         struct lnet_handle_md ping_mdh;
3017         struct lnet_handle_md push_mdh;
3018
3019         LNetInvalidateMDHandle(&ping_mdh);
3020         LNetInvalidateMDHandle(&push_mdh);
3021
3022         spin_lock(&lp->lp_lock);
3023         if (lp->lp_state & LNET_PEER_PING_SENT) {
3024                 ping_mdh = lp->lp_ping_mdh;
3025                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3026         }
3027         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3028                 push_mdh = lp->lp_push_mdh;
3029                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
3030         }
3031         spin_unlock(&lp->lp_lock);
3032
3033         if (!LNetMDHandleIsInvalid(ping_mdh))
3034                 LNetMDUnlink(ping_mdh);
3035         if (!LNetMDHandleIsInvalid(push_mdh))
3036                 LNetMDUnlink(push_mdh);
3037 }
3038
3039 /*
3040  * Wait for work to be queued or some other change that must be
3041  * attended to. Returns non-zero if the discovery thread should shut
3042  * down.
3043  */
3044 static int lnet_peer_discovery_wait_for_work(void)
3045 {
3046         int cpt;
3047         int rc = 0;
3048
3049         DEFINE_WAIT(wait);
3050
3051         cpt = lnet_net_lock_current();
3052         for (;;) {
3053                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3054                                 TASK_INTERRUPTIBLE);
3055                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3056                         break;
3057                 if (lnet_push_target_resize_needed())
3058                         break;
3059                 if (!list_empty(&the_lnet.ln_dc_request))
3060                         break;
3061                 if (!list_empty(&the_lnet.ln_msg_resend))
3062                         break;
3063                 lnet_net_unlock(cpt);
3064
3065                 /*
3066                  * wakeup max every second to check if there are peers that
3067                  * have been stuck on the working queue for greater than
3068                  * the peer timeout.
3069                  */
3070                 schedule_timeout(cfs_time_seconds(1));
3071                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3072                 cpt = lnet_net_lock_current();
3073         }
3074         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3075
3076         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3077                 rc = -ESHUTDOWN;
3078
3079         lnet_net_unlock(cpt);
3080
3081         CDEBUG(D_NET, "woken: %d\n", rc);
3082
3083         return rc;
3084 }
3085
3086 /*
3087  * Messages that were pending on a destroyed peer will be put on a global
3088  * resend list. The message resend list will be checked by
3089  * the discovery thread when it wakes up, and will resend messages. These
3090  * messages can still be sendable in the case the lpni which was the initial
3091  * cause of the message re-queue was transfered to another peer.
3092  *
3093  * It is possible that LNet could be shutdown while we're iterating
3094  * through the list. lnet_shudown_lndnets() will attempt to access the
3095  * resend list, but will have to wait until the spinlock is released, by
3096  * which time there shouldn't be any more messages on the resend list.
3097  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3098  * for the messages so they can be released. The other case is that
3099  * lnet_shudown_lndnets() can finalize all the messages before this
3100  * function can visit the resend list, in which case this function will be
3101  * a no-op.
3102  */
3103 static void lnet_resend_msgs(void)
3104 {
3105         struct lnet_msg *msg, *tmp;
3106         struct list_head resend;
3107         int rc;
3108
3109         INIT_LIST_HEAD(&resend);
3110
3111         spin_lock(&the_lnet.ln_msg_resend_lock);
3112         list_splice(&the_lnet.ln_msg_resend, &resend);
3113         spin_unlock(&the_lnet.ln_msg_resend_lock);
3114
3115         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3116                 list_del_init(&msg->msg_list);
3117                 rc = lnet_send(msg->msg_src_nid_param, msg,
3118                                msg->msg_rtr_nid_param);
3119                 if (rc < 0) {
3120                         CNETERR("Error sending %s to %s: %d\n",
3121                                lnet_msgtyp2str(msg->msg_type),
3122                                libcfs_id2str(msg->msg_target), rc);
3123                         lnet_finalize(msg, rc);
3124                 }
3125         }
3126 }
3127
3128 /* The discovery thread. */
3129 static int lnet_peer_discovery(void *arg)
3130 {
3131         struct lnet_peer *lp;
3132         int rc;
3133
3134         CDEBUG(D_NET, "started\n");
3135         cfs_block_allsigs();
3136
3137         for (;;) {
3138                 if (lnet_peer_discovery_wait_for_work())
3139                         break;
3140
3141                 lnet_resend_msgs();
3142
3143                 if (lnet_push_target_resize_needed())
3144                         lnet_push_target_resize();
3145
3146                 lnet_net_lock(LNET_LOCK_EX);
3147                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3148                         break;
3149
3150                 /*
3151                  * Process all incoming discovery work requests.  When
3152                  * discovery must wait on a peer to change state, it
3153                  * is added to the tail of the ln_dc_working queue. A
3154                  * timestamp keeps track of when the peer was added,
3155                  * so we can time out discovery requests that take too
3156                  * long.
3157                  */
3158                 while (!list_empty(&the_lnet.ln_dc_request)) {
3159                         lp = list_first_entry(&the_lnet.ln_dc_request,
3160                                               struct lnet_peer, lp_dc_list);
3161                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
3162                         /*
3163                          * set the time the peer was put on the dc_working
3164                          * queue. It shouldn't remain on the queue
3165                          * forever, in case the GET message (for ping)
3166                          * doesn't get a REPLY or the PUT message (for
3167                          * push) doesn't get an ACK.
3168                          */
3169                         lp->lp_last_queued = ktime_get_real_seconds();
3170                         lnet_net_unlock(LNET_LOCK_EX);
3171
3172                         /*
3173                          * Select an action depending on the state of
3174                          * the peer and whether discovery is disabled.
3175                          * The check whether discovery is disabled is
3176                          * done after the code that handles processing
3177                          * for arrived data, cleanup for failures, and
3178                          * forcing a Ping or Push.
3179                          */
3180                         spin_lock(&lp->lp_lock);
3181                         CDEBUG(D_NET, "peer %s state %#x\n",
3182                                 libcfs_nid2str(lp->lp_primary_nid),
3183                                 lp->lp_state);
3184                         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
3185                                 rc = lnet_peer_data_present(lp);
3186                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
3187                                 rc = lnet_peer_ping_failed(lp);
3188                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
3189                                 rc = lnet_peer_push_failed(lp);
3190                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
3191                                 rc = lnet_peer_send_ping(lp);
3192                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
3193                                 rc = lnet_peer_send_push(lp);
3194                         else if (lnet_peer_discovery_disabled)
3195                                 rc = lnet_peer_rediscover(lp);
3196                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
3197                                 rc = lnet_peer_send_ping(lp);
3198                         else if (lnet_peer_needs_push(lp))
3199                                 rc = lnet_peer_send_push(lp);
3200                         else
3201                                 rc = lnet_peer_discovered(lp);
3202                         CDEBUG(D_NET, "peer %s state %#x rc %d\n",
3203                                 libcfs_nid2str(lp->lp_primary_nid),
3204                                 lp->lp_state, rc);
3205                         spin_unlock(&lp->lp_lock);
3206
3207                         lnet_net_lock(LNET_LOCK_EX);
3208                         if (rc == LNET_REDISCOVER_PEER) {
3209                                 list_move(&lp->lp_dc_list,
3210                                           &the_lnet.ln_dc_request);
3211                         } else if (rc) {
3212                                 lnet_peer_discovery_error(lp, rc);
3213                         }
3214                         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
3215                                 lnet_peer_discovery_complete(lp);
3216                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3217                                 break;
3218                 }
3219
3220                 lnet_net_unlock(LNET_LOCK_EX);
3221         }
3222
3223         CDEBUG(D_NET, "stopping\n");
3224         /*
3225          * Clean up before telling lnet_peer_discovery_stop() that
3226          * we're done. Use wake_up() below to somewhat reduce the
3227          * size of the thundering herd if there are multiple threads
3228          * waiting on discovery of a single peer.
3229          */
3230
3231         /* Queue cleanup 1: stop all pending pings and pushes. */
3232         lnet_net_lock(LNET_LOCK_EX);
3233         while (!list_empty(&the_lnet.ln_dc_working)) {
3234                 lp = list_first_entry(&the_lnet.ln_dc_working,
3235                                       struct lnet_peer, lp_dc_list);
3236                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
3237                 lnet_net_unlock(LNET_LOCK_EX);
3238                 lnet_peer_cancel_discovery(lp);
3239                 lnet_net_lock(LNET_LOCK_EX);
3240         }
3241         lnet_net_unlock(LNET_LOCK_EX);
3242
3243         /* Queue cleanup 2: wait for the expired queue to clear. */
3244         while (!list_empty(&the_lnet.ln_dc_expired))
3245                 schedule_timeout(cfs_time_seconds(1));
3246
3247         /* Queue cleanup 3: clear the request queue. */
3248         lnet_net_lock(LNET_LOCK_EX);
3249         while (!list_empty(&the_lnet.ln_dc_request)) {
3250                 lp = list_first_entry(&the_lnet.ln_dc_request,
3251                                       struct lnet_peer, lp_dc_list);
3252                 lnet_peer_discovery_error(lp, -ESHUTDOWN);
3253                 lnet_peer_discovery_complete(lp);
3254         }
3255         lnet_net_unlock(LNET_LOCK_EX);
3256
3257         LNetEQFree(the_lnet.ln_dc_eqh);
3258         LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
3259
3260         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3261         wake_up(&the_lnet.ln_dc_waitq);
3262
3263         CDEBUG(D_NET, "stopped\n");
3264
3265         return 0;
3266 }
3267
3268 /* ln_api_mutex is held on entry. */
3269 int lnet_peer_discovery_start(void)
3270 {
3271         struct task_struct *task;
3272         int rc;
3273
3274         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
3275                 return -EALREADY;
3276
3277         rc = LNetEQAlloc(0, lnet_discovery_event_handler, &the_lnet.ln_dc_eqh);
3278         if (rc != 0) {
3279                 CERROR("Can't allocate discovery EQ: %d\n", rc);
3280                 return rc;
3281         }
3282
3283         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
3284         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
3285         if (IS_ERR(task)) {
3286                 rc = PTR_ERR(task);
3287                 CERROR("Can't start peer discovery thread: %d\n", rc);
3288
3289                 LNetEQFree(the_lnet.ln_dc_eqh);
3290                 LNetInvalidateEQHandle(&the_lnet.ln_dc_eqh);
3291
3292                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
3293         }
3294
3295         CDEBUG(D_NET, "discovery start: %d\n", rc);
3296
3297         return rc;
3298 }
3299
3300 /* ln_api_mutex is held on entry. */
3301 void lnet_peer_discovery_stop(void)
3302 {
3303         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
3304                 return;
3305
3306         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
3307         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
3308         wake_up(&the_lnet.ln_dc_waitq);
3309
3310         wait_event(the_lnet.ln_dc_waitq,
3311                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
3312
3313         LASSERT(list_empty(&the_lnet.ln_dc_request));
3314         LASSERT(list_empty(&the_lnet.ln_dc_working));
3315         LASSERT(list_empty(&the_lnet.ln_dc_expired));
3316
3317         CDEBUG(D_NET, "discovery stopped\n");
3318 }
3319
3320 /* Debugging */
3321
3322 void
3323 lnet_debug_peer(lnet_nid_t nid)
3324 {
3325         char                    *aliveness = "NA";
3326         struct lnet_peer_ni     *lp;
3327         int                     cpt;
3328
3329         cpt = lnet_cpt_of_nid(nid, NULL);
3330         lnet_net_lock(cpt);
3331
3332         lp = lnet_nid2peerni_locked(nid, LNET_NID_ANY, cpt);
3333         if (IS_ERR(lp)) {
3334                 lnet_net_unlock(cpt);
3335                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nid2str(nid));
3336                 return;
3337         }
3338
3339         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
3340                 aliveness = lp->lpni_alive ? "up" : "down";
3341
3342         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
3343                libcfs_nid2str(lp->lpni_nid), atomic_read(&lp->lpni_refcount),
3344                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
3345                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
3346                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
3347
3348         lnet_peer_ni_decref_locked(lp);
3349
3350         lnet_net_unlock(cpt);
3351 }
3352
3353 /* Gathering information for userspace. */
3354
3355 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
3356                           char aliveness[LNET_MAX_STR_LEN],
3357                           __u32 *cpt_iter, __u32 *refcount,
3358                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
3359                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
3360                           __u32 *peer_tx_qnob)
3361 {
3362         struct lnet_peer_table          *peer_table;
3363         struct lnet_peer_ni             *lp;
3364         int                             j;
3365         int                             lncpt;
3366         bool                            found = false;
3367
3368         /* get the number of CPTs */
3369         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3370
3371         /* if the cpt number to be examined is >= the number of cpts in
3372          * the system then indicate that there are no more cpts to examin
3373          */
3374         if (*cpt_iter >= lncpt)
3375                 return -ENOENT;
3376
3377         /* get the current table */
3378         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
3379         /* if the ptable is NULL then there are no more cpts to examine */
3380         if (peer_table == NULL)
3381                 return -ENOENT;
3382
3383         lnet_net_lock(*cpt_iter);
3384
3385         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
3386                 struct list_head *peers = &peer_table->pt_hash[j];
3387
3388                 list_for_each_entry(lp, peers, lpni_hashlist) {
3389                         if (peer_index-- > 0)
3390                                 continue;
3391
3392                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
3393                         if (lnet_isrouter(lp) ||
3394                                 lnet_peer_aliveness_enabled(lp))
3395                                 snprintf(aliveness, LNET_MAX_STR_LEN,
3396                                          lp->lpni_alive ? "up" : "down");
3397
3398                         *nid = lp->lpni_nid;
3399                         *refcount = atomic_read(&lp->lpni_refcount);
3400                         *ni_peer_tx_credits =
3401                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
3402                         *peer_tx_credits = lp->lpni_txcredits;
3403                         *peer_rtr_credits = lp->lpni_rtrcredits;
3404                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
3405                         *peer_tx_qnob = lp->lpni_txqnob;
3406
3407                         found = true;
3408                 }
3409
3410         }
3411         lnet_net_unlock(*cpt_iter);
3412
3413         *cpt_iter = lncpt;
3414
3415         return found ? 0 : -ENOENT;
3416 }
3417
3418 /* ln_api_mutex is held, which keeps the peer list stable */
3419 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
3420 {
3421         struct lnet_ioctl_element_stats *lpni_stats;
3422         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
3423         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
3424         struct lnet_peer_ni_credit_info *lpni_info;
3425         struct lnet_peer_ni *lpni;
3426         struct lnet_peer *lp;
3427         lnet_nid_t nid;
3428         __u32 size;
3429         int rc;
3430
3431         lp = lnet_find_peer(cfg->prcfg_prim_nid);
3432
3433         if (!lp) {
3434                 rc = -ENOENT;
3435                 goto out;
3436         }
3437
3438         size = sizeof(nid) + sizeof(*lpni_info) + sizeof(*lpni_stats)
3439                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
3440         size *= lp->lp_nnis;
3441         if (size > cfg->prcfg_size) {
3442                 cfg->prcfg_size = size;
3443                 rc = -E2BIG;
3444                 goto out_lp_decref;
3445         }
3446
3447         cfg->prcfg_prim_nid = lp->lp_primary_nid;
3448         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
3449         cfg->prcfg_cfg_nid = lp->lp_primary_nid;
3450         cfg->prcfg_count = lp->lp_nnis;
3451         cfg->prcfg_size = size;
3452         cfg->prcfg_state = lp->lp_state;
3453
3454         /* Allocate helper buffers. */
3455         rc = -ENOMEM;
3456         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
3457         if (!lpni_info)
3458                 goto out_lp_decref;
3459         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
3460         if (!lpni_stats)
3461                 goto out_free_info;
3462         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
3463         if (!lpni_msg_stats)
3464                 goto out_free_stats;
3465         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
3466         if (!lpni_hstats)
3467                 goto out_free_msg_stats;
3468
3469
3470         lpni = NULL;
3471         rc = -EFAULT;
3472         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
3473                 nid = lpni->lpni_nid;
3474                 if (copy_to_user(bulk, &nid, sizeof(nid)))
3475                         goto out_free_hstats;
3476                 bulk += sizeof(nid);
3477
3478                 memset(lpni_info, 0, sizeof(*lpni_info));
3479                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
3480                 if (lnet_isrouter(lpni) ||
3481                         lnet_peer_aliveness_enabled(lpni))
3482                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
3483                                 lpni->lpni_alive ? "up" : "down");
3484
3485                 lpni_info->cr_refcount = atomic_read(&lpni->lpni_refcount);
3486                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
3487                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
3488                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
3489                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
3490                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
3491                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
3492                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
3493                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
3494                         goto out_free_hstats;
3495                 bulk += sizeof(*lpni_info);
3496
3497                 memset(lpni_stats, 0, sizeof(*lpni_stats));
3498                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
3499                                                             LNET_STATS_TYPE_SEND);
3500                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
3501                                                             LNET_STATS_TYPE_RECV);
3502                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
3503                                                             LNET_STATS_TYPE_DROP);
3504                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
3505                         goto out_free_hstats;
3506                 bulk += sizeof(*lpni_stats);
3507                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
3508                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
3509                         goto out_free_hstats;
3510                 bulk += sizeof(*lpni_msg_stats);
3511                 lpni_hstats->hlpni_network_timeout =
3512                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
3513                 lpni_hstats->hlpni_remote_dropped =
3514                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
3515                 lpni_hstats->hlpni_remote_timeout =
3516                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
3517                 lpni_hstats->hlpni_remote_error =
3518                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
3519                 lpni_hstats->hlpni_health_value =
3520                   atomic_read(&lpni->lpni_healthv);
3521                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
3522                         goto out_free_hstats;
3523                 bulk += sizeof(*lpni_hstats);
3524         }
3525         rc = 0;
3526
3527 out_free_hstats:
3528         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
3529 out_free_msg_stats:
3530         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
3531 out_free_stats:
3532         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
3533 out_free_info:
3534         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
3535 out_lp_decref:
3536         lnet_peer_decref_locked(lp);
3537 out:
3538         return rc;
3539 }
3540
3541 void
3542 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
3543 {
3544         /* the mt could've shutdown and cleaned up the queues */
3545         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
3546                 return;
3547
3548         if (list_empty(&lpni->lpni_recovery) &&
3549             atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
3550                 CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
3551                         libcfs_nid2str(lpni->lpni_nid),
3552                         atomic_read(&lpni->lpni_healthv));
3553                 list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
3554                 lnet_peer_ni_addref_locked(lpni);
3555         }
3556 }
3557
3558 /* Call with the ln_api_mutex held */
3559 void
3560 lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
3561 {
3562         struct lnet_peer_table *ptable;
3563         struct lnet_peer *lp;
3564         struct lnet_peer_net *lpn;
3565         struct lnet_peer_ni *lpni;
3566         int lncpt;
3567         int cpt;
3568
3569         if (the_lnet.ln_state != LNET_STATE_RUNNING)
3570                 return;
3571
3572         if (!all) {
3573                 lnet_net_lock(LNET_LOCK_EX);
3574                 lpni = lnet_find_peer_ni_locked(nid);
3575                 if (!lpni) {
3576                         lnet_net_unlock(LNET_LOCK_EX);
3577                         return;
3578                 }
3579                 atomic_set(&lpni->lpni_healthv, value);
3580                 lnet_peer_ni_add_to_recoveryq_locked(lpni);
3581                 lnet_peer_ni_decref_locked(lpni);
3582                 lnet_net_unlock(LNET_LOCK_EX);
3583                 return;
3584         }
3585
3586         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
3587
3588         /*
3589          * Walk all the peers and reset the healhv for each one to the
3590          * maximum value.
3591          */
3592         lnet_net_lock(LNET_LOCK_EX);
3593         for (cpt = 0; cpt < lncpt; cpt++) {
3594                 ptable = the_lnet.ln_peer_tables[cpt];
3595                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
3596                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
3597                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
3598                                                     lpni_peer_nis) {
3599                                         atomic_set(&lpni->lpni_healthv, value);
3600                                         lnet_peer_ni_add_to_recoveryq_locked(lpni);
3601                                 }
3602                         }
3603                 }
3604         }
3605         lnet_net_unlock(LNET_LOCK_EX);
3606 }
3607