Whamcloud - gitweb
LU-17062 lnet: Update lnet_peer_*_decref_locked usage
[fs/lustre-release.git] / lnet / lnet / peer.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/lnet/peer.c
32  */
33
34 #define DEBUG_SUBSYSTEM S_LNET
35
36 #include <linux/sched.h>
37 #ifdef HAVE_SCHED_HEADERS
38 #include <linux/sched/signal.h>
39 #endif
40 #include <linux/uaccess.h>
41
42 #include <lnet/udsp.h>
43 #include <lnet/lib-lnet.h>
44 #include <uapi/linux/lnet/lnet-dlc.h>
45
46 /* Value indicating that recovery needs to re-check a peer immediately. */
47 #define LNET_REDISCOVER_PEER    (1)
48
49 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp);
50 static int lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
51                             unsigned int flags);
52
53 static void
54 lnet_peer_remove_from_remote_list(struct lnet_peer_ni *lpni)
55 {
56         if (!list_empty(&lpni->lpni_on_remote_peer_ni_list)) {
57                 list_del_init(&lpni->lpni_on_remote_peer_ni_list);
58                 lnet_peer_ni_decref_locked(lpni);
59         }
60 }
61
62 void
63 lnet_peer_net_added(struct lnet_net *net)
64 {
65         struct lnet_peer_ni *lpni, *tmp;
66
67         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
68                                  lpni_on_remote_peer_ni_list) {
69
70                 if (LNET_NID_NET(&lpni->lpni_nid) == net->net_id) {
71                         lpni->lpni_net = net;
72
73                         spin_lock(&lpni->lpni_lock);
74                         lpni->lpni_txcredits =
75                                 lpni->lpni_net->net_tunables.lct_peer_tx_credits;
76                         lpni->lpni_mintxcredits = lpni->lpni_txcredits;
77                         lpni->lpni_rtrcredits =
78                                 lnet_peer_buffer_credits(lpni->lpni_net);
79                         lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
80                         spin_unlock(&lpni->lpni_lock);
81
82                         lnet_peer_remove_from_remote_list(lpni);
83                 }
84         }
85 }
86
87 static void
88 lnet_peer_tables_destroy(void)
89 {
90         struct lnet_peer_table  *ptable;
91         struct list_head        *hash;
92         int                     i;
93         int                     j;
94
95         if (!the_lnet.ln_peer_tables)
96                 return;
97
98         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
99                 hash = ptable->pt_hash;
100                 if (!hash) /* not intialized */
101                         break;
102
103                 LASSERT(list_empty(&ptable->pt_zombie_list));
104
105                 ptable->pt_hash = NULL;
106                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
107                         LASSERT(list_empty(&hash[j]));
108
109                 CFS_FREE_PTR_ARRAY(hash, LNET_PEER_HASH_SIZE);
110         }
111
112         cfs_percpt_free(the_lnet.ln_peer_tables);
113         the_lnet.ln_peer_tables = NULL;
114 }
115
116 int
117 lnet_peer_tables_create(void)
118 {
119         struct lnet_peer_table  *ptable;
120         struct list_head        *hash;
121         int                     i;
122         int                     j;
123
124         the_lnet.ln_peer_tables = cfs_percpt_alloc(lnet_cpt_table(),
125                                                    sizeof(*ptable));
126         if (the_lnet.ln_peer_tables == NULL) {
127                 CERROR("Failed to allocate cpu-partition peer tables\n");
128                 return -ENOMEM;
129         }
130
131         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
132                 LIBCFS_CPT_ALLOC(hash, lnet_cpt_table(), i,
133                                  LNET_PEER_HASH_SIZE * sizeof(*hash));
134                 if (hash == NULL) {
135                         CERROR("Failed to create peer hash table\n");
136                         lnet_peer_tables_destroy();
137                         return -ENOMEM;
138                 }
139
140                 spin_lock_init(&ptable->pt_zombie_lock);
141                 INIT_LIST_HEAD(&ptable->pt_zombie_list);
142
143                 INIT_LIST_HEAD(&ptable->pt_peer_list);
144
145                 for (j = 0; j < LNET_PEER_HASH_SIZE; j++)
146                         INIT_LIST_HEAD(&hash[j]);
147                 ptable->pt_hash = hash; /* sign of initialization */
148         }
149
150         return 0;
151 }
152
153 static struct lnet_peer_ni *
154 lnet_peer_ni_alloc(struct lnet_nid *nid)
155 {
156         struct lnet_peer_ni *lpni;
157         struct lnet_net *net;
158         int cpt;
159
160         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
161
162         LIBCFS_CPT_ALLOC(lpni, lnet_cpt_table(), cpt, sizeof(*lpni));
163         if (!lpni)
164                 return NULL;
165
166         INIT_LIST_HEAD(&lpni->lpni_txq);
167         INIT_LIST_HEAD(&lpni->lpni_hashlist);
168         INIT_LIST_HEAD(&lpni->lpni_peer_nis);
169         INIT_LIST_HEAD(&lpni->lpni_recovery);
170         INIT_LIST_HEAD(&lpni->lpni_on_remote_peer_ni_list);
171         INIT_LIST_HEAD(&lpni->lpni_rtr_pref_nids);
172         LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
173         kref_init(&lpni->lpni_kref);
174         lpni->lpni_sel_priority = LNET_MAX_SELECTION_PRIORITY;
175
176         spin_lock_init(&lpni->lpni_lock);
177
178         if (lnet_peers_start_down())
179                 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
180         else
181                 lpni->lpni_ns_status = LNET_NI_STATUS_UP;
182         lpni->lpni_ping_feats = LNET_PING_FEAT_INVAL;
183         lpni->lpni_nid = *nid;
184         lpni->lpni_cpt = cpt;
185         atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
186
187         net = lnet_get_net_locked(LNET_NID_NET(nid));
188         lpni->lpni_net = net;
189         if (net) {
190                 lpni->lpni_txcredits = net->net_tunables.lct_peer_tx_credits;
191                 lpni->lpni_mintxcredits = lpni->lpni_txcredits;
192                 lpni->lpni_rtrcredits = lnet_peer_buffer_credits(net);
193                 lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
194         } else {
195                 /*
196                  * This peer_ni is not on a local network, so we
197                  * cannot add the credits here. In case the net is
198                  * added later, add the peer_ni to the remote peer ni
199                  * list so it can be easily found and revisited.
200                  */
201                 /* FIXME: per-net implementation instead? */
202                 lnet_peer_ni_addref_locked(lpni);
203                 list_add_tail(&lpni->lpni_on_remote_peer_ni_list,
204                               &the_lnet.ln_remote_peer_ni_list);
205         }
206
207         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
208
209         return lpni;
210 }
211
212 static struct lnet_peer_net *
213 lnet_peer_net_alloc(__u32 net_id)
214 {
215         struct lnet_peer_net *lpn;
216
217         LIBCFS_CPT_ALLOC(lpn, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lpn));
218         if (!lpn)
219                 return NULL;
220
221         INIT_LIST_HEAD(&lpn->lpn_peer_nets);
222         INIT_LIST_HEAD(&lpn->lpn_peer_nis);
223         lpn->lpn_net_id = net_id;
224         lpn->lpn_sel_priority = LNET_MAX_SELECTION_PRIORITY;
225
226         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
227
228         return lpn;
229 }
230
231 void
232 lnet_destroy_peer_net_locked(struct lnet_peer_net *lpn)
233 {
234         struct lnet_peer *lp;
235
236         CDEBUG(D_NET, "%p net %s\n", lpn, libcfs_net2str(lpn->lpn_net_id));
237
238         LASSERT(atomic_read(&lpn->lpn_refcount) == 0);
239         LASSERT(list_empty(&lpn->lpn_peer_nis));
240         LASSERT(list_empty(&lpn->lpn_peer_nets));
241         lp = lpn->lpn_peer;
242         lpn->lpn_peer = NULL;
243         LIBCFS_FREE(lpn, sizeof(*lpn));
244
245         lnet_peer_decref_locked(lp);
246 }
247
248 static struct lnet_peer *
249 lnet_peer_alloc(struct lnet_nid *nid)
250 {
251         struct lnet_peer *lp;
252
253         LIBCFS_CPT_ALLOC(lp, lnet_cpt_table(), CFS_CPT_ANY, sizeof(*lp));
254         if (!lp)
255                 return NULL;
256
257         INIT_LIST_HEAD(&lp->lp_rtrq);
258         INIT_LIST_HEAD(&lp->lp_routes);
259         INIT_LIST_HEAD(&lp->lp_peer_list);
260         INIT_LIST_HEAD(&lp->lp_peer_nets);
261         INIT_LIST_HEAD(&lp->lp_dc_list);
262         INIT_LIST_HEAD(&lp->lp_dc_pendq);
263         INIT_LIST_HEAD(&lp->lp_rtr_list);
264         init_waitqueue_head(&lp->lp_dc_waitq);
265         spin_lock_init(&lp->lp_lock);
266         lp->lp_primary_nid = *nid;
267         lp->lp_disc_src_nid = LNET_ANY_NID;
268         lp->lp_disc_dst_nid = LNET_ANY_NID;
269         if (lnet_peers_start_down())
270                 lp->lp_alive = false;
271         else
272                 lp->lp_alive = true;
273
274         /*
275          * all peers created on a router should have health on
276          * if it's not already on.
277          */
278         if (the_lnet.ln_routing && !lnet_health_sensitivity)
279                 lp->lp_health_sensitivity = 1;
280
281         /*
282          * Turn off discovery for loopback peer. If you're creating a peer
283          * for the loopback interface then that was initiated when we
284          * attempted to send a message over the loopback. There is no need
285          * to ever use a different interface when sending messages to
286          * myself.
287          */
288         if (nid_is_lo0(nid))
289                 lp->lp_state = LNET_PEER_NO_DISCOVERY;
290         lp->lp_cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
291
292         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
293
294         return lp;
295 }
296
297 void
298 lnet_destroy_peer_locked(struct lnet_peer *lp)
299 {
300         CDEBUG(D_NET, "%p nid %s\n", lp, libcfs_nidstr(&lp->lp_primary_nid));
301
302         LASSERT(atomic_read(&lp->lp_refcount) == 0);
303         LASSERT(lp->lp_rtr_refcount == 0);
304         LASSERT(list_empty(&lp->lp_peer_nets));
305         LASSERT(list_empty(&lp->lp_peer_list));
306         LASSERT(list_empty(&lp->lp_dc_list));
307
308         if (lp->lp_data)
309                 lnet_ping_buffer_decref(lp->lp_data);
310
311         /*
312          * if there are messages still on the pending queue, then make
313          * sure to queue them on the ln_msg_resend list so they can be
314          * resent at a later point if the discovery thread is still
315          * running.
316          * If the discovery thread has stopped, then the wakeup will be a
317          * no-op, and it is expected the lnet_shutdown_lndnets() will
318          * eventually be called, which will traverse this list and
319          * finalize the messages on the list.
320          * We can not resend them now because we're holding the cpt lock.
321          * Releasing the lock can cause an inconsistent state
322          */
323         spin_lock(&the_lnet.ln_msg_resend_lock);
324         spin_lock(&lp->lp_lock);
325         list_splice(&lp->lp_dc_pendq, &the_lnet.ln_msg_resend);
326         spin_unlock(&lp->lp_lock);
327         spin_unlock(&the_lnet.ln_msg_resend_lock);
328         wake_up(&the_lnet.ln_dc_waitq);
329
330         LIBCFS_FREE(lp, sizeof(*lp));
331 }
332
333 /*
334  * Detach a peer_ni from its peer_net. If this was the last peer_ni on
335  * that peer_net, detach the peer_net from the peer.
336  *
337  * Call with lnet_net_lock/EX held
338  */
339 static void
340 lnet_peer_detach_peer_ni_locked(struct lnet_peer_ni *lpni)
341 {
342         struct lnet_peer_table *ptable;
343         struct lnet_peer_net *lpn;
344         struct lnet_peer *lp;
345
346         /*
347          * Belts and suspenders: gracefully handle teardown of a
348          * partially connected peer_ni.
349          */
350         lpn = lpni->lpni_peer_net;
351
352         list_del_init(&lpni->lpni_peer_nis);
353         /*
354          * If there are no lpni's left, we detach lpn from
355          * lp_peer_nets, so it cannot be found anymore.
356          */
357         if (list_empty(&lpn->lpn_peer_nis))
358                 list_del_init(&lpn->lpn_peer_nets);
359
360         /* Update peer NID count. */
361         lp = lpn->lpn_peer;
362         lp->lp_nnis--;
363
364         /*
365          * If there are no more peer nets, make the peer unfindable
366          * via the peer_tables.
367          *
368          * Otherwise, if the peer is DISCOVERED, tell discovery to
369          * take another look at it. This is a no-op if discovery for
370          * this peer did the detaching.
371          */
372         if (list_empty(&lp->lp_peer_nets)) {
373                 list_del_init(&lp->lp_peer_list);
374                 ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
375                 ptable->pt_peers--;
376         } else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
377                 /* Discovery isn't running, nothing to do here. */
378         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
379                 lnet_peer_queue_for_discovery(lp);
380                 wake_up(&the_lnet.ln_dc_waitq);
381         }
382         CDEBUG(D_NET, "peer %s NID %s\n",
383                 libcfs_nidstr(&lp->lp_primary_nid),
384                 libcfs_nidstr(&lpni->lpni_nid));
385 }
386
387 /* called with lnet_net_lock LNET_LOCK_EX held */
388 static int
389 lnet_peer_ni_del_locked(struct lnet_peer_ni *lpni, bool force)
390 {
391         struct lnet_peer_table *ptable = NULL;
392
393         /* don't remove a peer_ni if it's also a gateway */
394         if (lnet_isrouter(lpni) && !force) {
395                 CERROR("Peer NI %s is a gateway. Can not delete it\n",
396                        libcfs_nidstr(&lpni->lpni_nid));
397                 return -EBUSY;
398         }
399
400         lnet_peer_remove_from_remote_list(lpni);
401
402         /* remove peer ni from the hash list. */
403         list_del_init(&lpni->lpni_hashlist);
404
405         /*
406          * indicate the peer is being deleted so the monitor thread can
407          * remove it from the recovery queue.
408          */
409         spin_lock(&lpni->lpni_lock);
410         lpni->lpni_state |= LNET_PEER_NI_DELETING;
411         spin_unlock(&lpni->lpni_lock);
412
413         /* decrement the ref count on the peer table */
414         ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
415
416         /*
417          * The peer_ni can no longer be found with a lookup. But there
418          * can be current users, so keep track of it on the zombie
419          * list until the reference count has gone to zero.
420          *
421          * The last reference may be lost in a place where the
422          * lnet_net_lock locks only a single cpt, and that cpt may not
423          * be lpni->lpni_cpt. So the zombie list of lnet_peer_table
424          * has its own lock.
425          */
426         spin_lock(&ptable->pt_zombie_lock);
427         list_add(&lpni->lpni_hashlist, &ptable->pt_zombie_list);
428         ptable->pt_zombies++;
429         spin_unlock(&ptable->pt_zombie_lock);
430
431         /* no need to keep this peer_ni on the hierarchy anymore */
432         lnet_peer_detach_peer_ni_locked(lpni);
433
434         /* remove hashlist reference on peer_ni */
435         lnet_peer_ni_decref_locked(lpni);
436
437         return 0;
438 }
439
440 void lnet_peer_uninit(void)
441 {
442         struct lnet_peer_ni *lpni, *tmp;
443
444         lnet_net_lock(LNET_LOCK_EX);
445
446         /* remove all peer_nis from the remote peer and the hash list */
447         list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_remote_peer_ni_list,
448                                  lpni_on_remote_peer_ni_list)
449                 lnet_peer_ni_del_locked(lpni, false);
450
451         lnet_peer_tables_destroy();
452
453         lnet_net_unlock(LNET_LOCK_EX);
454 }
455
456 static int
457 lnet_peer_del_locked(struct lnet_peer *peer)
458 {
459         struct lnet_peer_ni *lpni = NULL, *lpni2;
460         int rc = 0, rc2 = 0;
461
462         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&peer->lp_primary_nid));
463
464         spin_lock(&peer->lp_lock);
465         peer->lp_state |= LNET_PEER_MARK_DELETED;
466         spin_unlock(&peer->lp_lock);
467
468         lpni = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
469         while (lpni != NULL) {
470                 lpni2 = lnet_get_next_peer_ni_locked(peer, NULL, lpni);
471                 rc = lnet_peer_ni_del_locked(lpni, false);
472                 if (rc != 0)
473                         rc2 = rc;
474                 lpni = lpni2;
475         }
476
477         return rc2;
478 }
479
480 /*
481  * Discovering this peer is taking too long. Cancel any Ping or Push
482  * that discovery is waiting on by unlinking the relevant MDs. The
483  * lnet_discovery_event_handler() will proceed from here and complete
484  * the cleanup.
485  */
486 static void lnet_peer_cancel_discovery(struct lnet_peer *lp)
487 {
488         struct lnet_handle_md ping_mdh;
489         struct lnet_handle_md push_mdh;
490
491         LNetInvalidateMDHandle(&ping_mdh);
492         LNetInvalidateMDHandle(&push_mdh);
493
494         spin_lock(&lp->lp_lock);
495         if (lp->lp_state & LNET_PEER_PING_SENT) {
496                 ping_mdh = lp->lp_ping_mdh;
497                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
498         }
499         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
500                 push_mdh = lp->lp_push_mdh;
501                 LNetInvalidateMDHandle(&lp->lp_push_mdh);
502         }
503         spin_unlock(&lp->lp_lock);
504
505         if (!LNetMDHandleIsInvalid(ping_mdh))
506                 LNetMDUnlink(ping_mdh);
507         if (!LNetMDHandleIsInvalid(push_mdh))
508                 LNetMDUnlink(push_mdh);
509 }
510
511 static int
512 lnet_peer_del(struct lnet_peer *peer)
513 {
514         int rc;
515
516         lnet_peer_cancel_discovery(peer);
517         lnet_net_lock(LNET_LOCK_EX);
518         rc = lnet_peer_del_locked(peer);
519         lnet_net_unlock(LNET_LOCK_EX);
520
521         return rc;
522 }
523
524 /*
525  * Delete a NID from a peer. Call with ln_api_mutex held.
526  *
527  * Error codes:
528  *  -EPERM:  Non-DLC deletion from DLC-configured peer.
529  *  -ENOENT: No lnet_peer_ni corresponding to the nid.
530  *  -ECHILD: The lnet_peer_ni isn't connected to the peer.
531  *  -EBUSY:  The lnet_peer_ni is the primary, and not the only peer_ni.
532  */
533 static int
534 lnet_peer_del_nid(struct lnet_peer *lp, struct lnet_nid *nid,
535                   unsigned int flags)
536 {
537         struct lnet_peer_ni *lpni;
538         struct lnet_nid primary_nid = lp->lp_primary_nid;
539         int rc = 0;
540         bool force = (flags & LNET_PEER_RTR_NI_FORCE_DEL) ? true : false;
541
542         if (!(flags & LNET_PEER_CONFIGURED)) {
543                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
544                         rc = -EPERM;
545                         goto out;
546                 }
547         }
548
549         /* If we're asked to lock down the primary NID we shouldn't be
550          * deleting it
551          */
552         if (lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
553             nid_same(&primary_nid, nid)) {
554                 rc = -EPERM;
555                 goto out;
556         }
557
558         lpni = lnet_peer_ni_find_locked(nid);
559         if (!lpni) {
560                 rc = -ENOENT;
561                 goto out;
562         }
563         if (lp != lpni->lpni_peer_net->lpn_peer) {
564                 rc = -ECHILD;
565                 lnet_peer_ni_decref_locked(lpni);
566                 goto out;
567         }
568
569         /*
570          * This function only allows deletion of the primary NID if it
571          * is the only NID.
572          */
573         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && !force) {
574                 rc = -EBUSY;
575                 lnet_peer_ni_decref_locked(lpni);
576                 goto out;
577         }
578
579         lnet_net_lock(LNET_LOCK_EX);
580
581         if (nid_same(nid, &lp->lp_primary_nid) && lp->lp_nnis != 1 && force) {
582                 struct lnet_peer_ni *lpni2;
583                 /* assign the next peer_ni to be the primary */
584                 lpni2 = lnet_get_next_peer_ni_locked(lp, NULL, lpni);
585                 LASSERT(lpni2);
586                 lp->lp_primary_nid = lpni2->lpni_nid;
587         }
588         rc = lnet_peer_ni_del_locked(lpni, force);
589         lnet_peer_ni_decref_locked(lpni);
590
591         lnet_net_unlock(LNET_LOCK_EX);
592
593 out:
594         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
595                libcfs_nidstr(&primary_nid), libcfs_nidstr(nid),
596                flags, rc);
597
598         return rc;
599 }
600
601 static void
602 lnet_peer_table_cleanup_locked(struct lnet_net *net,
603                                struct lnet_peer_table *ptable)
604 {
605         int                      i;
606         struct lnet_peer_ni     *next;
607         struct lnet_peer_ni     *lpni;
608         struct lnet_peer        *peer;
609
610         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
611                 list_for_each_entry_safe(lpni, next, &ptable->pt_hash[i],
612                                          lpni_hashlist) {
613                         if (net != NULL && net != lpni->lpni_net)
614                                 continue;
615
616                         peer = lpni->lpni_peer_net->lpn_peer;
617                         if (!nid_same(&peer->lp_primary_nid,
618                                        &lpni->lpni_nid)) {
619                                 lnet_peer_ni_del_locked(lpni, false);
620                                 continue;
621                         }
622                         /*
623                          * Removing the primary NID implies removing
624                          * the entire peer. Advance next beyond any
625                          * peer_ni that belongs to the same peer.
626                          */
627                         list_for_each_entry_from(next, &ptable->pt_hash[i],
628                                                  lpni_hashlist) {
629                                 if (next->lpni_peer_net->lpn_peer != peer)
630                                         break;
631                         }
632                         lnet_peer_del_locked(peer);
633                 }
634         }
635 }
636
637 static void
638 lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
639 {
640         wait_var_event_warning(&ptable->pt_zombies,
641                                ptable->pt_zombies == 0,
642                                "Waiting for %d zombies on peer table\n",
643                                ptable->pt_zombies);
644 }
645
646 static void
647 lnet_peer_table_del_rtrs_locked(struct lnet_net *net,
648                                 struct lnet_peer_table *ptable)
649 {
650         struct lnet_peer_ni     *lp;
651         struct lnet_peer_ni     *tmp;
652         struct lnet_nid         gw_nid;
653         int                     i;
654
655         for (i = 0; i < LNET_PEER_HASH_SIZE; i++) {
656                 list_for_each_entry_safe(lp, tmp, &ptable->pt_hash[i],
657                                          lpni_hashlist) {
658                         if (net != lp->lpni_net)
659                                 continue;
660
661                         if (!lnet_isrouter(lp))
662                                 continue;
663
664                         gw_nid = lp->lpni_peer_net->lpn_peer->lp_primary_nid;
665
666                         lnet_net_unlock(LNET_LOCK_EX);
667                         lnet_del_route(LNET_NET_ANY, &gw_nid);
668                         lnet_net_lock(LNET_LOCK_EX);
669                 }
670         }
671 }
672
673 void
674 lnet_peer_tables_cleanup(struct lnet_net *net)
675 {
676         int i;
677         struct lnet_peer_table *ptable;
678
679         LASSERT(the_lnet.ln_state != LNET_STATE_SHUTDOWN || net != NULL);
680         /* If just deleting the peers for a NI, get rid of any routes these
681          * peers are gateways for. */
682         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
683                 lnet_net_lock(LNET_LOCK_EX);
684                 lnet_peer_table_del_rtrs_locked(net, ptable);
685                 lnet_net_unlock(LNET_LOCK_EX);
686         }
687
688         /* Start the cleanup process */
689         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables) {
690                 lnet_net_lock(LNET_LOCK_EX);
691                 lnet_peer_table_cleanup_locked(net, ptable);
692                 lnet_net_unlock(LNET_LOCK_EX);
693         }
694
695         cfs_percpt_for_each(ptable, i, the_lnet.ln_peer_tables)
696                 lnet_peer_ni_finalize_wait(ptable);
697 }
698
699 static struct lnet_peer_ni *
700 lnet_get_peer_ni_locked(struct lnet_peer_table *ptable, struct lnet_nid *nid)
701 {
702         struct list_head        *peers;
703         struct lnet_peer_ni     *lp;
704
705         if (the_lnet.ln_state != LNET_STATE_RUNNING)
706                 return NULL;
707
708         peers = &ptable->pt_hash[lnet_nid2peerhash(nid)];
709         list_for_each_entry(lp, peers, lpni_hashlist) {
710                 if (nid_same(&lp->lpni_nid, nid)) {
711                         lnet_peer_ni_addref_locked(lp);
712                         return lp;
713                 }
714         }
715
716         return NULL;
717 }
718
719 struct lnet_peer_ni *
720 lnet_peer_ni_find_locked(struct lnet_nid *nid)
721 {
722         struct lnet_peer_ni *lpni;
723         struct lnet_peer_table *ptable;
724         int cpt;
725
726         cpt = lnet_nid_cpt_hash(nid, LNET_CPT_NUMBER);
727
728         ptable = the_lnet.ln_peer_tables[cpt];
729         lpni = lnet_get_peer_ni_locked(ptable, nid);
730
731         return lpni;
732 }
733
734 struct lnet_peer_ni *
735 lnet_peer_ni_get_locked(struct lnet_peer *lp, struct lnet_nid *nid)
736 {
737         struct lnet_peer_net *lpn;
738         struct lnet_peer_ni *lpni;
739
740         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
741         if (!lpn)
742                 return NULL;
743
744         list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
745                 if (nid_same(&lpni->lpni_nid, nid))
746                         return lpni;
747         }
748
749         return NULL;
750 }
751
752 struct lnet_peer *
753 lnet_find_peer(struct lnet_nid *nid)
754 {
755         struct lnet_peer_ni *lpni;
756         struct lnet_peer *lp = NULL;
757         int cpt;
758
759         cpt = lnet_net_lock_current();
760         lpni = lnet_peer_ni_find_locked(nid);
761         if (lpni) {
762                 lp = lpni->lpni_peer_net->lpn_peer;
763                 lnet_peer_addref_locked(lp);
764                 lnet_peer_ni_decref_locked(lpni);
765         }
766         lnet_net_unlock(cpt);
767
768         return lp;
769 }
770
771 struct lnet_peer_net *
772 lnet_get_next_peer_net_locked(struct lnet_peer *lp, __u32 prev_lpn_id)
773 {
774         struct lnet_peer_net *net;
775
776         if (!prev_lpn_id) {
777                 /* no net id provided return the first net */
778                 net = list_first_entry_or_null(&lp->lp_peer_nets,
779                                                struct lnet_peer_net,
780                                                lpn_peer_nets);
781
782                 return net;
783         }
784
785         /* find the net after the one provided */
786         list_for_each_entry(net, &lp->lp_peer_nets, lpn_peer_nets) {
787                 if (net->lpn_net_id == prev_lpn_id) {
788                         /*
789                          * if we reached the end of the list loop to the
790                          * beginning.
791                          */
792                         if (net->lpn_peer_nets.next == &lp->lp_peer_nets)
793                                 return list_first_entry_or_null(&lp->lp_peer_nets,
794                                                                 struct lnet_peer_net,
795                                                                 lpn_peer_nets);
796                         else
797                                 return list_next_entry(net, lpn_peer_nets);
798                 }
799         }
800
801         return NULL;
802 }
803
804 struct lnet_peer_ni *
805 lnet_get_next_peer_ni_locked(struct lnet_peer *peer,
806                              struct lnet_peer_net *peer_net,
807                              struct lnet_peer_ni *prev)
808 {
809         struct lnet_peer_ni *lpni;
810         struct lnet_peer_net *net = peer_net;
811
812         if (!prev) {
813                 if (!net) {
814                         if (list_empty(&peer->lp_peer_nets))
815                                 return NULL;
816
817                         net = list_first_entry(&peer->lp_peer_nets,
818                                                struct lnet_peer_net,
819                                                lpn_peer_nets);
820                 }
821                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
822                                         lpni_peer_nis);
823
824                 return lpni;
825         }
826
827         if (prev->lpni_peer_nis.next == &prev->lpni_peer_net->lpn_peer_nis) {
828                 /*
829                  * if you reached the end of the peer ni list and the peer
830                  * net is specified then there are no more peer nis in that
831                  * net.
832                  */
833                 if (net)
834                         return NULL;
835
836                 /*
837                  * we reached the end of this net ni list. move to the
838                  * next net
839                  */
840                 if (prev->lpni_peer_net->lpn_peer_nets.next ==
841                     &peer->lp_peer_nets)
842                         /* no more nets and no more NIs. */
843                         return NULL;
844
845                 /* get the next net */
846                 net = list_first_entry(&prev->lpni_peer_net->lpn_peer_nets,
847                                        struct lnet_peer_net,
848                                        lpn_peer_nets);
849                 /* get the ni on it */
850                 lpni = list_first_entry(&net->lpn_peer_nis, struct lnet_peer_ni,
851                                         lpni_peer_nis);
852
853                 return lpni;
854         }
855
856         /* there are more nis left */
857         lpni = list_first_entry(&prev->lpni_peer_nis,
858                                 struct lnet_peer_ni, lpni_peer_nis);
859
860         return lpni;
861 }
862
863 /* Call with the ln_api_mutex held */
864 int lnet_get_peer_list(u32 *countp, u32 *sizep, struct lnet_process_id __user *ids)
865 {
866         struct lnet_process_id id;
867         struct lnet_peer_table *ptable;
868         struct lnet_peer *lp;
869         __u32 count = 0;
870         __u32 size = 0;
871         int lncpt;
872         int cpt;
873         __u32 i;
874         int rc;
875
876         rc = -ESHUTDOWN;
877         if (the_lnet.ln_state != LNET_STATE_RUNNING)
878                 goto done;
879
880         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
881
882         /*
883          * Count the number of peers, and return E2BIG if the buffer
884          * is too small. We'll also return the desired size.
885          */
886         rc = -E2BIG;
887         for (cpt = 0; cpt < lncpt; cpt++) {
888                 ptable = the_lnet.ln_peer_tables[cpt];
889                 count += ptable->pt_peers;
890         }
891         size = count * sizeof(*ids);
892         if (size > *sizep)
893                 goto done;
894
895         /*
896          * Walk the peer lists and copy out the primary nids.
897          * This is safe because the peer lists are only modified
898          * while the ln_api_mutex is held. So we don't need to
899          * hold the lnet_net_lock as well, and can therefore
900          * directly call copy_to_user().
901          */
902         rc = -EFAULT;
903         memset(&id, 0, sizeof(id));
904         id.pid = LNET_PID_LUSTRE;
905         i = 0;
906         for (cpt = 0; cpt < lncpt; cpt++) {
907                 ptable = the_lnet.ln_peer_tables[cpt];
908                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
909                         if (!nid_is_nid4(&lp->lp_primary_nid))
910                                 continue;
911                         if (i >= count)
912                                 goto done;
913                         id.nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
914                         if (copy_to_user(&ids[i], &id, sizeof(id)))
915                                 goto done;
916                         i++;
917                 }
918         }
919         rc = 0;
920 done:
921         *countp = count;
922         *sizep = size;
923         return rc;
924 }
925
926 /*
927  * Start pushes to peers that need to be updated for a configuration
928  * change on this node.
929  */
930 void
931 lnet_push_update_to_peers(int force)
932 {
933         struct lnet_peer_table *ptable;
934         struct lnet_peer *lp;
935         int lncpt;
936         int cpt;
937
938         lnet_net_lock(LNET_LOCK_EX);
939         if (lnet_peer_discovery_disabled)
940                 force = 0;
941         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
942         for (cpt = 0; cpt < lncpt; cpt++) {
943                 ptable = the_lnet.ln_peer_tables[cpt];
944                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
945                         if (force) {
946                                 spin_lock(&lp->lp_lock);
947                                 if (lp->lp_state & LNET_PEER_MULTI_RAIL)
948                                         lp->lp_state |= LNET_PEER_FORCE_PUSH;
949                                 spin_unlock(&lp->lp_lock);
950                         }
951                         if (lnet_peer_needs_push(lp))
952                                 lnet_peer_queue_for_discovery(lp);
953                 }
954         }
955         lnet_net_unlock(LNET_LOCK_EX);
956         wake_up(&the_lnet.ln_dc_waitq);
957 }
958
959 /* find the NID in the preferred gateways for the remote peer
960  * return:
961  *      false: list is not empty and NID is not preferred
962  *      false: list is empty
963  *      true: nid is found in the list
964  */
965 bool
966 lnet_peer_is_pref_rtr_locked(struct lnet_peer_ni *lpni,
967                              struct lnet_nid *gw_nid)
968 {
969         struct lnet_nid_list *ne;
970
971         CDEBUG(D_NET, "%s: rtr pref emtpy: %d\n",
972                libcfs_nidstr(&lpni->lpni_nid),
973                list_empty(&lpni->lpni_rtr_pref_nids));
974
975         if (list_empty(&lpni->lpni_rtr_pref_nids))
976                 return false;
977
978         /* iterate through all the preferred NIDs and see if any of them
979          * matches the provided gw_nid
980          */
981         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
982                 CDEBUG(D_NET, "Comparing pref %s with gw %s\n",
983                        libcfs_nidstr(&ne->nl_nid),
984                        libcfs_nidstr(gw_nid));
985                 if (nid_same(&ne->nl_nid, gw_nid))
986                         return true;
987         }
988
989         return false;
990 }
991
992 void
993 lnet_peer_clr_pref_rtrs(struct lnet_peer_ni *lpni)
994 {
995         struct list_head zombies;
996         struct lnet_nid_list *ne;
997         struct lnet_nid_list *tmp;
998         int cpt = lpni->lpni_cpt;
999
1000         INIT_LIST_HEAD(&zombies);
1001
1002         lnet_net_lock(cpt);
1003         list_splice_init(&lpni->lpni_rtr_pref_nids, &zombies);
1004         lnet_net_unlock(cpt);
1005
1006         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1007                 list_del(&ne->nl_list);
1008                 LIBCFS_FREE(ne, sizeof(*ne));
1009         }
1010 }
1011
1012 int
1013 lnet_peer_add_pref_rtr(struct lnet_peer_ni *lpni,
1014                        struct lnet_nid *gw_nid)
1015 {
1016         int cpt = lpni->lpni_cpt;
1017         struct lnet_nid_list *ne = NULL;
1018
1019         /* This function is called with api_mutex held. When the api_mutex
1020          * is held the list can not be modified, as it is only modified as
1021          * a result of applying a UDSP and that happens under api_mutex
1022          * lock.
1023          */
1024         __must_hold(&the_lnet.ln_api_mutex);
1025
1026         list_for_each_entry(ne, &lpni->lpni_rtr_pref_nids, nl_list) {
1027                 if (nid_same(&ne->nl_nid, gw_nid))
1028                         return -EEXIST;
1029         }
1030
1031         LIBCFS_CPT_ALLOC(ne, lnet_cpt_table(), cpt, sizeof(*ne));
1032         if (!ne)
1033                 return -ENOMEM;
1034
1035         ne->nl_nid = *gw_nid;
1036
1037         /* Lock the cpt to protect against addition and checks in the
1038          * selection algorithm
1039          */
1040         lnet_net_lock(cpt);
1041         list_add(&ne->nl_list, &lpni->lpni_rtr_pref_nids);
1042         lnet_net_unlock(cpt);
1043
1044         return 0;
1045 }
1046
1047 /*
1048  * Test whether a ni is a preferred ni for this peer_ni, e.g, whether
1049  * this is a preferred point-to-point path. Call with lnet_net_lock in
1050  * shared mmode.
1051  */
1052 bool
1053 lnet_peer_is_pref_nid_locked(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1054 {
1055         struct lnet_nid_list *ne;
1056
1057         if (lpni->lpni_pref_nnids == 0)
1058                 return false;
1059         if (lpni->lpni_pref_nnids == 1)
1060                 return nid_same(&lpni->lpni_pref.nid, nid);
1061         list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1062                 if (nid_same(&ne->nl_nid, nid))
1063                         return true;
1064         }
1065         return false;
1066 }
1067
1068 /*
1069  * Set a single ni as preferred, provided no preferred ni is already
1070  * defined. Only to be used for non-multi-rail peer_ni.
1071  */
1072 int
1073 lnet_peer_ni_set_non_mr_pref_nid(struct lnet_peer_ni *lpni,
1074                                   struct lnet_nid *nid)
1075 {
1076         int rc = 0;
1077
1078         if (!nid)
1079                 return -EINVAL;
1080         spin_lock(&lpni->lpni_lock);
1081         if (LNET_NID_IS_ANY(nid)) {
1082                 rc = -EINVAL;
1083         } else if (lpni->lpni_pref_nnids > 0) {
1084                 rc = -EPERM;
1085         } else if (lpni->lpni_pref_nnids == 0) {
1086                 lpni->lpni_pref.nid = *nid;
1087                 lpni->lpni_pref_nnids = 1;
1088                 lpni->lpni_state |= LNET_PEER_NI_NON_MR_PREF;
1089         }
1090         spin_unlock(&lpni->lpni_lock);
1091
1092         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1093                libcfs_nidstr(&lpni->lpni_nid), libcfs_nidstr(nid), rc);
1094         return rc;
1095 }
1096
1097 /*
1098  * Clear the preferred NID from a non-multi-rail peer_ni, provided
1099  * this preference was set by lnet_peer_ni_set_non_mr_pref_nid().
1100  */
1101 static int
1102 lnet_peer_ni_clr_non_mr_pref_nid(struct lnet_peer_ni *lpni)
1103 {
1104         int rc = 0;
1105
1106         spin_lock(&lpni->lpni_lock);
1107         if (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF) {
1108                 lpni->lpni_pref_nnids = 0;
1109                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1110         } else if (lpni->lpni_pref_nnids == 0) {
1111                 rc = -ENOENT;
1112         } else {
1113                 rc = -EPERM;
1114         }
1115         spin_unlock(&lpni->lpni_lock);
1116
1117         CDEBUG(D_NET, "peer %s: %d\n",
1118                libcfs_nidstr(&lpni->lpni_nid), rc);
1119         return rc;
1120 }
1121
1122 void
1123 lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni, __u32 priority)
1124 {
1125         lpni->lpni_sel_priority = priority;
1126 }
1127
1128 /*
1129  * Clear the preferred NIDs from a non-multi-rail peer.
1130  */
1131 static void
1132 lnet_peer_clr_non_mr_pref_nids(struct lnet_peer *lp)
1133 {
1134         struct lnet_peer_ni *lpni = NULL;
1135
1136         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
1137                 lnet_peer_ni_clr_non_mr_pref_nid(lpni);
1138 }
1139
1140 int
1141 lnet_peer_add_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1142 {
1143         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1144         struct lnet_nid_list *ne1 = NULL;
1145         struct lnet_nid_list *ne2 = NULL;
1146         struct lnet_nid *tmp_nid = NULL;
1147         int rc = 0;
1148
1149         if (LNET_NID_IS_ANY(nid)) {
1150                 rc = -EINVAL;
1151                 goto out;
1152         }
1153
1154         if (lpni->lpni_pref_nnids == 1 &&
1155             nid_same(&lpni->lpni_pref.nid, nid)) {
1156                 rc = -EEXIST;
1157                 goto out;
1158         }
1159
1160         /* A non-MR node may have only one preferred NI per peer_ni */
1161         if (lpni->lpni_pref_nnids > 0 &&
1162             !(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1163                 rc = -EPERM;
1164                 goto out;
1165         }
1166
1167         /* add the new preferred nid to the list of preferred nids */
1168         if (lpni->lpni_pref_nnids != 0) {
1169                 size_t alloc_size = sizeof(*ne1);
1170
1171                 if (lpni->lpni_pref_nnids == 1) {
1172                         tmp_nid = &lpni->lpni_pref.nid;
1173                         INIT_LIST_HEAD(&lpni->lpni_pref.nids);
1174                 }
1175
1176                 list_for_each_entry(ne1, &lpni->lpni_pref.nids, nl_list) {
1177                         if (nid_same(&ne1->nl_nid, nid)) {
1178                                 rc = -EEXIST;
1179                                 goto out;
1180                         }
1181                 }
1182
1183                 LIBCFS_CPT_ALLOC(ne1, lnet_cpt_table(), lpni->lpni_cpt,
1184                                  alloc_size);
1185                 if (!ne1) {
1186                         rc = -ENOMEM;
1187                         goto out;
1188                 }
1189
1190                 /* move the originally stored nid to the list */
1191                 if (lpni->lpni_pref_nnids == 1) {
1192                         LIBCFS_CPT_ALLOC(ne2, lnet_cpt_table(),
1193                                 lpni->lpni_cpt, alloc_size);
1194                         if (!ne2) {
1195                                 rc = -ENOMEM;
1196                                 goto out;
1197                         }
1198                         INIT_LIST_HEAD(&ne2->nl_list);
1199                         ne2->nl_nid = *tmp_nid;
1200                 }
1201                 ne1->nl_nid = *nid;
1202         }
1203
1204         lnet_net_lock(LNET_LOCK_EX);
1205         spin_lock(&lpni->lpni_lock);
1206         if (lpni->lpni_pref_nnids == 0) {
1207                 lpni->lpni_pref.nid = *nid;
1208         } else {
1209                 if (ne2)
1210                         list_add_tail(&ne2->nl_list, &lpni->lpni_pref.nids);
1211                 list_add_tail(&ne1->nl_list, &lpni->lpni_pref.nids);
1212         }
1213         lpni->lpni_pref_nnids++;
1214         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1215         spin_unlock(&lpni->lpni_lock);
1216         lnet_net_unlock(LNET_LOCK_EX);
1217
1218 out:
1219         if (rc == -EEXIST && (lpni->lpni_state & LNET_PEER_NI_NON_MR_PREF)) {
1220                 spin_lock(&lpni->lpni_lock);
1221                 lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1222                 spin_unlock(&lpni->lpni_lock);
1223         }
1224         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1225                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1226         return rc;
1227 }
1228
1229 int
1230 lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, struct lnet_nid *nid)
1231 {
1232         struct lnet_peer *lp = lpni->lpni_peer_net->lpn_peer;
1233         struct lnet_nid_list *ne = NULL;
1234         int rc = 0;
1235
1236         if (lpni->lpni_pref_nnids == 0) {
1237                 rc = -ENOENT;
1238                 goto out;
1239         }
1240
1241         if (lpni->lpni_pref_nnids == 1) {
1242                 if (!nid_same(&lpni->lpni_pref.nid, nid)) {
1243                         rc = -ENOENT;
1244                         goto out;
1245                 }
1246         } else {
1247                 list_for_each_entry(ne, &lpni->lpni_pref.nids, nl_list) {
1248                         if (nid_same(&ne->nl_nid, nid))
1249                                 goto remove_nid_entry;
1250                 }
1251                 rc = -ENOENT;
1252                 ne = NULL;
1253                 goto out;
1254         }
1255
1256 remove_nid_entry:
1257         lnet_net_lock(LNET_LOCK_EX);
1258         spin_lock(&lpni->lpni_lock);
1259         if (lpni->lpni_pref_nnids == 1)
1260                 lpni->lpni_pref.nid = LNET_ANY_NID;
1261         else {
1262                 list_del_init(&ne->nl_list);
1263                 if (lpni->lpni_pref_nnids == 2) {
1264                         struct lnet_nid_list *ne, *tmp;
1265
1266                         list_for_each_entry_safe(ne, tmp,
1267                                                  &lpni->lpni_pref.nids,
1268                                                  nl_list) {
1269                                 lpni->lpni_pref.nid = ne->nl_nid;
1270                                 list_del_init(&ne->nl_list);
1271                                 LIBCFS_FREE(ne, sizeof(*ne));
1272                         }
1273                 }
1274         }
1275         lpni->lpni_pref_nnids--;
1276         lpni->lpni_state &= ~LNET_PEER_NI_NON_MR_PREF;
1277         spin_unlock(&lpni->lpni_lock);
1278         lnet_net_unlock(LNET_LOCK_EX);
1279
1280         if (ne)
1281                 LIBCFS_FREE(ne, sizeof(*ne));
1282 out:
1283         CDEBUG(D_NET, "peer %s nid %s: %d\n",
1284                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid), rc);
1285         return rc;
1286 }
1287
1288 void
1289 lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni)
1290 {
1291         struct list_head zombies;
1292         struct lnet_nid_list *ne;
1293         struct lnet_nid_list *tmp;
1294
1295         INIT_LIST_HEAD(&zombies);
1296
1297         lnet_net_lock(LNET_LOCK_EX);
1298         if (lpni->lpni_pref_nnids == 1)
1299                 lpni->lpni_pref.nid = LNET_ANY_NID;
1300         else if (lpni->lpni_pref_nnids > 1)
1301                 list_splice_init(&lpni->lpni_pref.nids, &zombies);
1302         lpni->lpni_pref_nnids = 0;
1303         lnet_net_unlock(LNET_LOCK_EX);
1304
1305         list_for_each_entry_safe(ne, tmp, &zombies, nl_list) {
1306                 list_del_init(&ne->nl_list);
1307                 LIBCFS_FREE(ne, sizeof(*ne));
1308         }
1309 }
1310
1311 void
1312 lnet_peer_primary_nid_locked(struct lnet_nid *nid, struct lnet_nid *result)
1313 {
1314         struct lnet_peer_ni *lpni;
1315
1316         *result = *nid;
1317         lpni = lnet_peer_ni_find_locked(nid);
1318         if (lpni) {
1319                 *result = lpni->lpni_peer_net->lpn_peer->lp_primary_nid;
1320                 lnet_peer_ni_decref_locked(lpni);
1321         }
1322 }
1323
1324 bool
1325 lnet_is_discovery_disabled_locked(struct lnet_peer *lp)
1326 __must_hold(&lp->lp_lock)
1327 {
1328         if (lnet_peer_discovery_disabled)
1329                 return true;
1330
1331         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL) ||
1332             (lp->lp_state & LNET_PEER_NO_DISCOVERY)) {
1333                 return true;
1334         }
1335
1336         return false;
1337 }
1338
1339 /*
1340  * Peer Discovery
1341  */
1342 bool
1343 lnet_is_discovery_disabled(struct lnet_peer *lp)
1344 {
1345         bool rc = false;
1346
1347         spin_lock(&lp->lp_lock);
1348         rc = lnet_is_discovery_disabled_locked(lp);
1349         spin_unlock(&lp->lp_lock);
1350
1351         return rc;
1352 }
1353
1354 int
1355 LNetAddPeer(struct lnet_nid *nids, u32 num_nids)
1356 {
1357         struct lnet_nid pnid = LNET_ANY_NID;
1358         bool mr;
1359         int i, rc;
1360         int flags = lock_prim_nid ? LNET_PEER_LOCK_PRIMARY : 0;
1361
1362         if (!nids || num_nids < 1)
1363                 return -EINVAL;
1364
1365         rc = LNetNIInit(LNET_PID_ANY);
1366         if (rc < 0)
1367                 return rc;
1368
1369         mutex_lock(&the_lnet.ln_api_mutex);
1370
1371         mr = lnet_peer_discovery_disabled == 0;
1372
1373         rc = 0;
1374         for (i = 0; i < num_nids; i++) {
1375                 if (nid_is_lo0(&nids[i]))
1376                         continue;
1377
1378                 if (LNET_NID_IS_ANY(&pnid)) {
1379                         pnid = nids[i];
1380                         rc = lnet_add_peer_ni(&pnid, &LNET_ANY_NID, mr, flags);
1381                         if (rc == -EALREADY) {
1382                                 struct lnet_peer *lp;
1383
1384                                 CDEBUG(D_NET, "A peer exists for NID %s\n",
1385                                        libcfs_nidstr(&pnid));
1386                                 rc = 0;
1387                                 /* Adds a refcount */
1388                                 lp = lnet_find_peer(&pnid);
1389                                 LASSERT(lp);
1390                                 pnid = lp->lp_primary_nid;
1391                                 /* Drop refcount from lookup */
1392                                 lnet_peer_decref_locked(lp);
1393                         }
1394                 } else if (lnet_peer_discovery_disabled) {
1395                         rc = lnet_add_peer_ni(&nids[i], &LNET_ANY_NID, mr,
1396                                               flags);
1397                 } else {
1398                         rc = lnet_add_peer_ni(&pnid, &nids[i], mr,
1399                                               flags);
1400                 }
1401
1402                 if (rc && rc != -EEXIST)
1403                         goto unlock;
1404         }
1405
1406 unlock:
1407         mutex_unlock(&the_lnet.ln_api_mutex);
1408
1409         LNetNIFini();
1410
1411         return rc == -EEXIST ? 0 : rc;
1412 }
1413 EXPORT_SYMBOL(LNetAddPeer);
1414
1415 void LNetPrimaryNID(struct lnet_nid *nid)
1416 {
1417         struct lnet_peer *lp;
1418         struct lnet_peer_ni *lpni;
1419         struct lnet_nid orig;
1420         int rc = 0;
1421         int cpt;
1422
1423         if (!nid || nid_is_lo0(nid))
1424                 return;
1425         orig = *nid;
1426
1427         cpt = lnet_net_lock_current();
1428         lpni = lnet_peerni_by_nid_locked(nid, NULL, cpt);
1429         if (IS_ERR(lpni)) {
1430                 rc = PTR_ERR(lpni);
1431                 goto out_unlock;
1432         }
1433         lp = lpni->lpni_peer_net->lpn_peer;
1434
1435         /* If discovery is disabled locally then we needn't bother running
1436          * discovery here because discovery will not modify whatever
1437          * primary NID is currently set for this peer. If the specified peer is
1438          * down then this discovery can introduce long delays into the mount
1439          * process, so skip it if it isn't necessary.
1440          */
1441 again:
1442         spin_lock(&lp->lp_lock);
1443         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY) && lock_prim_nid) {
1444                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1445                 lp->lp_prim_lock_ts = ktime_get_ns();
1446         }
1447
1448         /* DD disabled, nothing to do */
1449         if (lnet_peer_discovery_disabled) {
1450                 *nid = lp->lp_primary_nid;
1451                 spin_unlock(&lp->lp_lock);
1452                 goto out_decref;
1453         }
1454
1455         /* Peer already up to date, nothing to do */
1456         if (lnet_peer_is_uptodate_locked(lp)) {
1457                 *nid = lp->lp_primary_nid;
1458                 spin_unlock(&lp->lp_lock);
1459                 goto out_decref;
1460         }
1461         spin_unlock(&lp->lp_lock);
1462
1463         /* If primary nid locking is enabled, discovery is performed
1464          * in the background.
1465          * If primary nid locking is disabled, discovery blocks here.
1466          * Messages to the peer will not go through until the discovery is
1467          * complete.
1468          */
1469         if (lock_prim_nid)
1470                 rc = lnet_discover_peer_locked(lpni, cpt, false);
1471         else
1472                 rc = lnet_discover_peer_locked(lpni, cpt, true);
1473         if (rc)
1474                 goto out_decref;
1475
1476         /* The lpni (or lp) for this NID may have changed and our ref is
1477          * the only thing keeping the old one around. Release the ref
1478          * and lookup the lpni again
1479          */
1480         lnet_peer_ni_decref_locked(lpni);
1481         lpni = lnet_peer_ni_find_locked(nid);
1482         if (!lpni) {
1483                 rc = -ENOENT;
1484                 goto out_unlock;
1485         }
1486         lp = lpni->lpni_peer_net->lpn_peer;
1487
1488         if (!lock_prim_nid && !lnet_is_discovery_disabled(lp))
1489                 goto again;
1490         *nid = lp->lp_primary_nid;
1491 out_decref:
1492         lnet_peer_ni_decref_locked(lpni);
1493 out_unlock:
1494         lnet_net_unlock(cpt);
1495
1496         CDEBUG(D_NET, "NID %s primary NID %s rc %d\n", libcfs_nidstr(&orig),
1497                libcfs_nidstr(nid), rc);
1498 }
1499 EXPORT_SYMBOL(LNetPrimaryNID);
1500
1501 struct lnet_peer_net *
1502 lnet_peer_get_net_locked(struct lnet_peer *peer, __u32 net_id)
1503 {
1504         struct lnet_peer_net *peer_net;
1505         list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
1506                 if (peer_net->lpn_net_id == net_id)
1507                         return peer_net;
1508         }
1509         return NULL;
1510 }
1511
1512 /*
1513  * Attach a peer_ni to a peer_net and peer. This function assumes
1514  * peer_ni is not already attached to the peer_net/peer. The peer_ni
1515  * may be attached to a different peer, in which case it will be
1516  * properly detached first. The whole operation is done atomically.
1517  *
1518  * This function consumes the reference on lpni and Always returns 0.
1519  * This is the last function called from functions that do return an
1520  * int, so returning 0 here allows the compiler to do a tail call.
1521  */
1522 static int
1523 lnet_peer_attach_peer_ni(struct lnet_peer *lp,
1524                          struct lnet_peer_net *lpn,
1525                          struct lnet_peer_ni *lpni,
1526                          unsigned flags)
1527 {
1528         struct lnet_peer_table *ptable;
1529         bool new_lpn = false;
1530         int rc;
1531
1532         /* Install the new peer_ni */
1533         lnet_net_lock(LNET_LOCK_EX);
1534         /* Add peer_ni to global peer table hash, if necessary. */
1535         if (list_empty(&lpni->lpni_hashlist)) {
1536                 int hash = lnet_nid2peerhash(&lpni->lpni_nid);
1537
1538                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
1539                 list_add_tail(&lpni->lpni_hashlist, &ptable->pt_hash[hash]);
1540                 ptable->pt_version++;
1541                 lnet_peer_ni_addref_locked(lpni);
1542         }
1543
1544         /* Detach the peer_ni from an existing peer, if necessary. */
1545         if (lpni->lpni_peer_net) {
1546                 LASSERT(lpni->lpni_peer_net != lpn);
1547                 LASSERT(lpni->lpni_peer_net->lpn_peer != lp);
1548                 lnet_peer_detach_peer_ni_locked(lpni);
1549                 lnet_peer_net_decref_locked(lpni->lpni_peer_net);
1550                 lpni->lpni_peer_net = NULL;
1551         }
1552
1553         /* Add peer_ni to peer_net */
1554         lpni->lpni_peer_net = lpn;
1555         if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1556                 list_add(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1557         else
1558                 list_add_tail(&lpni->lpni_peer_nis, &lpn->lpn_peer_nis);
1559         lnet_update_peer_net_healthv(lpni);
1560         lnet_peer_net_addref_locked(lpn);
1561
1562         /* Add peer_net to peer */
1563         if (!lpn->lpn_peer) {
1564                 new_lpn = true;
1565                 lpn->lpn_peer = lp;
1566                 if (nid_same(&lp->lp_primary_nid, &lpni->lpni_nid))
1567                         list_add(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1568                 else
1569                         list_add_tail(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
1570                 lnet_peer_addref_locked(lp);
1571         }
1572
1573         /* Add peer to global peer list, if necessary */
1574         ptable = the_lnet.ln_peer_tables[lp->lp_cpt];
1575         if (list_empty(&lp->lp_peer_list)) {
1576                 list_add_tail(&lp->lp_peer_list, &ptable->pt_peer_list);
1577                 ptable->pt_peers++;
1578         }
1579
1580         /* Update peer state */
1581         spin_lock(&lp->lp_lock);
1582         if (flags & LNET_PEER_CONFIGURED) {
1583                 if (!(lp->lp_state & LNET_PEER_CONFIGURED))
1584                         lp->lp_state |= LNET_PEER_CONFIGURED;
1585         }
1586         if (flags & LNET_PEER_MULTI_RAIL) {
1587                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1588                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1589                         lnet_peer_clr_non_mr_pref_nids(lp);
1590                 }
1591         }
1592         if (flags & LNET_PEER_LOCK_PRIMARY) {
1593                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1594                 lp->lp_prim_lock_ts = ktime_get_ns();
1595         }
1596         spin_unlock(&lp->lp_lock);
1597
1598         lp->lp_nnis++;
1599
1600         /* apply UDSPs */
1601         if (new_lpn) {
1602                 rc = lnet_udsp_apply_policies_on_lpn(lpn);
1603                 if (rc)
1604                         CERROR("Failed to apply UDSPs on lpn %s\n",
1605                                libcfs_net2str(lpn->lpn_net_id));
1606         }
1607         rc = lnet_udsp_apply_policies_on_lpni(lpni);
1608         if (rc)
1609                 CERROR("Failed to apply UDSPs on lpni %s\n",
1610                        libcfs_nidstr(&lpni->lpni_nid));
1611
1612         CDEBUG(D_NET, "peer %s NID %s flags %#x\n",
1613                libcfs_nidstr(&lp->lp_primary_nid),
1614                libcfs_nidstr(&lpni->lpni_nid), flags);
1615         lnet_peer_ni_decref_locked(lpni);
1616         lnet_net_unlock(LNET_LOCK_EX);
1617
1618         return 0;
1619 }
1620
1621 /*
1622  * Create a new peer, with nid as its primary nid.
1623  *
1624  * Call with the lnet_api_mutex held.
1625  */
1626 static int
1627 lnet_peer_add(struct lnet_nid *nid, unsigned int flags)
1628 {
1629         struct lnet_peer *lp;
1630         struct lnet_peer_net *lpn;
1631         struct lnet_peer_ni *lpni;
1632         int rc = 0;
1633
1634         LASSERT(nid);
1635
1636         /*
1637          * No need for the lnet_net_lock here, because the
1638          * lnet_api_mutex is held.
1639          */
1640         lpni = lnet_peer_ni_find_locked(nid);
1641         if (lpni) {
1642                 /* A peer with this NID already exists. */
1643                 lp = lpni->lpni_peer_net->lpn_peer;
1644                 lnet_peer_ni_decref_locked(lpni);
1645                 /*
1646                  * This is an error if the peer was configured and the
1647                  * primary NID differs or an attempt is made to change
1648                  * the Multi-Rail flag. Otherwise the assumption is
1649                  * that an existing peer is being modified.
1650                  */
1651                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1652                         if (!nid_same(&lp->lp_primary_nid, nid))
1653                                 rc = -EEXIST;
1654                         else if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL)
1655                                 rc = -EPERM;
1656                         goto out;
1657                 } else if (lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
1658                         if (nid_same(&lp->lp_primary_nid, nid))
1659                                 rc = -EEXIST;
1660                         /* we're trying to recreate an existing peer which
1661                          * has already been created and its primary
1662                          * locked. This is likely due to two servers
1663                          * existing on the same node. So we'll just refer
1664                          * to that node with the primary NID which was
1665                          * first added by Lustre
1666                          */
1667                         else
1668                                 rc = -EALREADY;
1669                         goto out;
1670                 } else if (!(flags & (LNET_PEER_LOCK_PRIMARY | LNET_PEER_CONFIGURED))) {
1671                         /* if not recreating peer as configured and
1672                          * not locking primary nid, no need to
1673                          * do anything if primary nid is not being changed
1674                          */
1675                         if (nid_same(&lp->lp_primary_nid, nid)) {
1676                                 rc = -EEXIST;
1677                                 goto out;
1678                         }
1679                 }
1680                 /* Delete and recreate the peer.
1681                  * We can get here:
1682                  * 1. If the peer is being recreated as a configured NID
1683                  * 2. if there already exists a peer which
1684                  *    was discovered manually, but is recreated via Lustre
1685                  *    with PRIMARY_lock
1686                  */
1687                 rc = lnet_peer_del(lp);
1688                 if (rc)
1689                         goto out;
1690         }
1691
1692         /* Create peer, peer_net, and peer_ni. */
1693         rc = -ENOMEM;
1694         lp = lnet_peer_alloc(nid);
1695         if (!lp)
1696                 goto out;
1697         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1698         if (!lpn)
1699                 goto out_free_lp;
1700         lpni = lnet_peer_ni_alloc(nid);
1701         if (!lpni)
1702                 goto out_free_lpn;
1703
1704         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1705
1706 out_free_lpn:
1707         LIBCFS_FREE(lpn, sizeof(*lpn));
1708 out_free_lp:
1709         LIBCFS_FREE(lp, sizeof(*lp));
1710 out:
1711         CDEBUG(D_NET, "peer %s NID flags %#x: %d\n",
1712                libcfs_nidstr(nid), flags, rc);
1713         return rc;
1714 }
1715
1716 /*
1717  * Add a NID to a peer. Call with ln_api_mutex held.
1718  *
1719  * Error codes:
1720  *  -EPERM:    Non-DLC addition to a DLC-configured peer.
1721  *  -EEXIST:   The NID was configured by DLC for a different peer.
1722  *  -ENOMEM:   Out of memory.
1723  *  -ENOTUNIQ: Adding a second peer NID on a single network on a
1724  *             non-multi-rail peer.
1725  */
1726 static int
1727 lnet_peer_add_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1728                   unsigned int flags)
1729 {
1730         struct lnet_peer_net *lpn;
1731         struct lnet_peer_ni *lpni;
1732         int rc = 0;
1733
1734         LASSERT(lp);
1735         LASSERT(nid);
1736
1737         /* A configured peer can only be updated through configuration. */
1738         if (!(flags & LNET_PEER_CONFIGURED)) {
1739                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
1740                         rc = -EPERM;
1741                         goto out;
1742                 }
1743         }
1744
1745         /*
1746          * The MULTI_RAIL flag can be set but not cleared, because
1747          * that would leave the peer struct in an invalid state.
1748          */
1749         if (flags & LNET_PEER_MULTI_RAIL) {
1750                 spin_lock(&lp->lp_lock);
1751                 if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1752                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
1753                         lnet_peer_clr_non_mr_pref_nids(lp);
1754                 }
1755                 spin_unlock(&lp->lp_lock);
1756         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
1757                 rc = -EPERM;
1758                 goto out;
1759         }
1760
1761         lpni = lnet_peer_ni_find_locked(nid);
1762         if (lpni) {
1763                 /*
1764                  * A peer_ni already exists. This is only a problem if
1765                  * it is not connected to this peer and was configured
1766                  * by DLC.
1767                  */
1768                 if (lpni->lpni_peer_net->lpn_peer == lp)
1769                         goto out_free_lpni;
1770                 if (lnet_peer_ni_is_configured(lpni)) {
1771                         rc = -EEXIST;
1772                         goto out_free_lpni;
1773                 }
1774                 /* If this is the primary NID, destroy the peer. */
1775                 if (lnet_peer_ni_is_primary(lpni)) {
1776                         struct lnet_peer *lp2 =
1777                                 lpni->lpni_peer_net->lpn_peer;
1778                         int rtr_refcount = lp2->lp_rtr_refcount;
1779                         unsigned int peer2_state;
1780                         __u64 peer2_prim_lock_ts;
1781
1782                         /* If there's another peer that this NID belongs to
1783                          * and the primary NID for that peer is locked,
1784                          * then, unless it is the only NID, we don't want
1785                          * to mess with it.
1786                          * But the configuration is wrong at this point,
1787                          * so we should flag both of these peers as in a bad
1788                          * state
1789                          */
1790                         spin_lock(&lp2->lp_lock);
1791                         if (lp2->lp_state & LNET_PEER_LOCK_PRIMARY &&
1792                             lp2->lp_nnis > 1) {
1793                                 lp2->lp_state |= LNET_PEER_BAD_CONFIG;
1794                                 spin_unlock(&lp2->lp_lock);
1795                                 spin_lock(&lp->lp_lock);
1796                                 lp->lp_state |= LNET_PEER_BAD_CONFIG;
1797                                 spin_unlock(&lp->lp_lock);
1798                                 CERROR("Peer %s NID %s is already locked with peer %s\n",
1799                                         libcfs_nidstr(&lp->lp_primary_nid),
1800                                         libcfs_nidstr(nid),
1801                                         libcfs_nidstr(&lp2->lp_primary_nid));
1802                                 goto out_free_lpni;
1803                         }
1804                         peer2_state = lp2->lp_state;
1805                         peer2_prim_lock_ts = lp2->lp_prim_lock_ts;
1806                         spin_unlock(&lp2->lp_lock);
1807
1808                         /* NID which got locked the earliest should be
1809                          * kept as primary. In case if the peers were
1810                          * created by Lustre, this allows the
1811                          * first listed NID to stay primary as intended
1812                          * for the purpose of communicating with Lustre
1813                          * even if peer discovery succeeded using
1814                          * a different NID of MR peer.
1815                          */
1816                         spin_lock(&lp->lp_lock);
1817                         if (peer2_state & LNET_PEER_LOCK_PRIMARY &&
1818                             ((lp->lp_state & LNET_PEER_LOCK_PRIMARY &&
1819                             peer2_prim_lock_ts < lp->lp_prim_lock_ts) ||
1820                              !(lp->lp_state & LNET_PEER_LOCK_PRIMARY))) {
1821                                 lp->lp_prim_lock_ts = peer2_prim_lock_ts;
1822                                 lp->lp_primary_nid = *nid;
1823                                 lp->lp_state |= LNET_PEER_LOCK_PRIMARY;
1824                         }
1825                         spin_unlock(&lp->lp_lock);
1826                         /*
1827                          * if we're trying to delete a router it means
1828                          * we're moving this peer NI to a new peer so must
1829                          * transfer router properties to the new peer
1830                          */
1831                         if (rtr_refcount > 0) {
1832                                 flags |= LNET_PEER_RTR_NI_FORCE_DEL;
1833                                 lnet_rtr_transfer_to_peer(lp2, lp);
1834                         }
1835                         lnet_peer_del(lp2);
1836                         lnet_peer_ni_decref_locked(lpni);
1837                         lpni = lnet_peer_ni_alloc(nid);
1838                         if (!lpni) {
1839                                 rc = -ENOMEM;
1840                                 goto out_free_lpni;
1841                         }
1842                 }
1843         } else {
1844                 lpni = lnet_peer_ni_alloc(nid);
1845                 if (!lpni) {
1846                         rc = -ENOMEM;
1847                         goto out_free_lpni;
1848                 }
1849         }
1850
1851         /*
1852          * Get the peer_net. Check that we're not adding a second
1853          * peer_ni on a peer_net of a non-multi-rail peer.
1854          */
1855         lpn = lnet_peer_get_net_locked(lp, LNET_NID_NET(nid));
1856         if (!lpn) {
1857                 lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1858                 if (!lpn) {
1859                         rc = -ENOMEM;
1860                         goto out_free_lpni;
1861                 }
1862         } else if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
1863                 rc = -ENOTUNIQ;
1864                 goto out_free_lpni;
1865         }
1866
1867         return lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1868
1869 out_free_lpni:
1870         lnet_peer_ni_decref_locked(lpni);
1871 out:
1872         CDEBUG(D_NET, "peer %s NID %s flags %#x: %d\n",
1873                libcfs_nidstr(&lp->lp_primary_nid), libcfs_nidstr(nid),
1874                flags, rc);
1875         return rc;
1876 }
1877
1878 /*
1879  * Update the primary NID of a peer, if possible.
1880  *
1881  * Call with the lnet_api_mutex held.
1882  */
1883 static int
1884 lnet_peer_set_primary_nid(struct lnet_peer *lp, struct lnet_nid *nid,
1885                           unsigned int flags)
1886 {
1887         struct lnet_nid old = lp->lp_primary_nid;
1888         int rc = 0;
1889
1890         if (nid_same(&lp->lp_primary_nid, nid))
1891                 goto out;
1892
1893         if (!(lp->lp_state & LNET_PEER_LOCK_PRIMARY))
1894                 lp->lp_primary_nid = *nid;
1895
1896         rc = lnet_peer_add_nid(lp, nid, flags);
1897         if (rc) {
1898                 lp->lp_primary_nid = old;
1899                 goto out;
1900         }
1901 out:
1902         /* if this is a configured peer or the primary for that peer has
1903          * been locked, then we don't want to flag this scenario as
1904          * a failure
1905          */
1906         if (lp->lp_state & LNET_PEER_CONFIGURED ||
1907             lp->lp_state & LNET_PEER_LOCK_PRIMARY)
1908                 return 0;
1909
1910         CDEBUG(D_NET, "peer %s NID %s: %d\n",
1911                libcfs_nidstr(&old), libcfs_nidstr(nid), rc);
1912
1913         return rc;
1914 }
1915
1916 /*
1917  * lpni creation initiated due to traffic either sending or receiving.
1918  * Callers must hold ln_api_mutex
1919  * Ref taken on lnet_peer_ni returned by this function
1920  */
1921 static struct lnet_peer_ni *
1922 lnet_peer_ni_traffic_add(struct lnet_nid *nid, struct lnet_nid *pref)
1923 __must_hold(&the_lnet.ln_api_mutex)
1924 {
1925         struct lnet_peer *lp = NULL;
1926         struct lnet_peer_net *lpn = NULL;
1927         struct lnet_peer_ni *lpni;
1928         unsigned flags = 0;
1929         int rc = 0;
1930
1931         if (LNET_NID_IS_ANY(nid)) {
1932                 rc = -EINVAL;
1933                 goto out_err;
1934         }
1935
1936         /* lnet_net_lock is not needed here because ln_api_lock is held */
1937         lpni = lnet_peer_ni_find_locked(nid);
1938         if (lpni) {
1939                 /*
1940                  * We must have raced with another thread. Since we
1941                  * know next to nothing about a peer_ni created by
1942                  * traffic, we just assume everything is ok and
1943                  * return.
1944                  */
1945                 goto out;
1946         }
1947
1948         /* Create peer, peer_net, and peer_ni. */
1949         rc = -ENOMEM;
1950         lp = lnet_peer_alloc(nid);
1951         if (!lp)
1952                 goto out_err;
1953         lpn = lnet_peer_net_alloc(LNET_NID_NET(nid));
1954         if (!lpn)
1955                 goto out_err;
1956         lpni = lnet_peer_ni_alloc(nid);
1957         if (!lpni)
1958                 goto out_err;
1959         lnet_peer_ni_set_non_mr_pref_nid(lpni, pref);
1960
1961         /* lnet_peer_attach_peer_ni() always returns 0 */
1962         rc = lnet_peer_attach_peer_ni(lp, lpn, lpni, flags);
1963
1964         lnet_peer_ni_addref_locked(lpni);
1965
1966 out_err:
1967         if (rc) {
1968                 if (lpn)
1969                         LIBCFS_FREE(lpn, sizeof(*lpn));
1970                 if (lp)
1971                         LIBCFS_FREE(lp, sizeof(*lp));
1972                 lpni = ERR_PTR(rc);
1973         }
1974 out:
1975         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(nid), rc);
1976         return lpni;
1977 }
1978
1979 /*
1980  * Implementation of IOC_LIBCFS_ADD_PEER_NI.
1981  *
1982  * This API handles the following combinations:
1983  *   Create a peer with its primary NI if only the prim_nid is provided
1984  *   Add a NID to a peer identified by the prim_nid. The peer identified
1985  *   by the prim_nid must already exist.
1986  *   The peer being created may be non-MR.
1987  *
1988  * The caller must hold ln_api_mutex. This prevents the peer from
1989  * being created/modified/deleted by a different thread.
1990  */
1991 static int
1992 lnet_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid, bool mr,
1993                  unsigned int flags)
1994 __must_hold(&the_lnet.ln_api_mutex)
1995 {
1996         struct lnet_peer *lp = NULL;
1997         struct lnet_peer_ni *lpni;
1998
1999         /* The prim_nid must always be specified */
2000         if (LNET_NID_IS_ANY(prim_nid))
2001                 return -EINVAL;
2002
2003         if (mr)
2004                 flags |= LNET_PEER_MULTI_RAIL;
2005
2006         /*
2007          * If nid isn't specified, we must create a new peer with
2008          * prim_nid as its primary nid.
2009          */
2010         if (LNET_NID_IS_ANY(nid))
2011                 return lnet_peer_add(prim_nid, flags);
2012
2013         /* Look up the prim_nid, which must exist. */
2014         lpni = lnet_peer_ni_find_locked(prim_nid);
2015         if (!lpni)
2016                 return -ENOENT;
2017         lp = lpni->lpni_peer_net->lpn_peer;
2018         lnet_peer_ni_decref_locked(lpni);
2019
2020         /* Peer must have been configured. */
2021         if ((flags & LNET_PEER_CONFIGURED) &&
2022             !(lp->lp_state & LNET_PEER_CONFIGURED)) {
2023                 CDEBUG(D_NET, "peer %s was not configured\n",
2024                        libcfs_nidstr(prim_nid));
2025                 return -ENOENT;
2026         }
2027
2028         /* Primary NID must match */
2029         if (!nid_same(&lp->lp_primary_nid, prim_nid)) {
2030                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2031                        libcfs_nidstr(prim_nid),
2032                        libcfs_nidstr(&lp->lp_primary_nid));
2033                 return -ENODEV;
2034         }
2035
2036         /* Multi-Rail flag must match. */
2037         if ((lp->lp_state ^ flags) & LNET_PEER_MULTI_RAIL) {
2038                 CDEBUG(D_NET, "multi-rail state mismatch for peer %s\n",
2039                        libcfs_nidstr(prim_nid));
2040                 return -EPERM;
2041         }
2042
2043         if (lnet_peer_is_uptodate(lp) && !(flags & LNET_PEER_CONFIGURED)) {
2044                 CDEBUG(D_NET,
2045                        "Don't add temporary peer NI for uptodate peer %s\n",
2046                        libcfs_nidstr(&lp->lp_primary_nid));
2047                 return -EINVAL;
2048         }
2049
2050         return lnet_peer_add_nid(lp, nid, flags);
2051 }
2052
2053 int lnet_user_add_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2054                           bool mr, bool lock_prim)
2055 {
2056         int fl = LNET_PEER_CONFIGURED | (LNET_PEER_LOCK_PRIMARY * lock_prim);
2057
2058         return lnet_add_peer_ni(prim_nid, nid, mr, fl);
2059 }
2060
2061 static int
2062 lnet_reset_peer(struct lnet_peer *lp)
2063 {
2064         struct lnet_peer_net *lpn, *lpntmp;
2065         struct lnet_peer_ni *lpni, *lpnitmp;
2066         unsigned int flags;
2067         int rc;
2068
2069         lnet_peer_cancel_discovery(lp);
2070
2071         flags = LNET_PEER_CONFIGURED;
2072         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2073                 flags |= LNET_PEER_MULTI_RAIL;
2074
2075         list_for_each_entry_safe(lpn, lpntmp, &lp->lp_peer_nets, lpn_peer_nets) {
2076                 list_for_each_entry_safe(lpni, lpnitmp, &lpn->lpn_peer_nis,
2077                                          lpni_peer_nis) {
2078                         if (nid_same(&lpni->lpni_nid, &lp->lp_primary_nid))
2079                                 continue;
2080
2081                         rc = lnet_peer_del_nid(lp, &lpni->lpni_nid, flags);
2082                         if (rc) {
2083                                 CERROR("Failed to delete %s from peer %s\n",
2084                                        libcfs_nidstr(&lpni->lpni_nid),
2085                                        libcfs_nidstr(&lp->lp_primary_nid));
2086                         }
2087                 }
2088         }
2089
2090         /* mark it for discovery the next time we use it */
2091         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2092         return 0;
2093 }
2094
2095 /*
2096  * Implementation of IOC_LIBCFS_DEL_PEER_NI.
2097  *
2098  * This API handles the following combinations:
2099  *   Delete a NI from a peer if both prim_nid and nid are provided.
2100  *   Delete a peer if only prim_nid is provided.
2101  *   Delete a peer if its primary nid is provided.
2102  *
2103  * The caller must hold ln_api_mutex. This prevents the peer from
2104  * being modified/deleted by a different thread.
2105  */
2106 int
2107 lnet_del_peer_ni(struct lnet_nid *prim_nid, struct lnet_nid *nid,
2108                  int force)
2109 {
2110         struct lnet_peer *lp;
2111         struct lnet_peer_ni *lpni;
2112         unsigned int flags;
2113
2114         if (!prim_nid || LNET_NID_IS_ANY(prim_nid))
2115                 return -EINVAL;
2116
2117         lpni = lnet_peer_ni_find_locked(prim_nid);
2118         if (!lpni)
2119                 return -ENOENT;
2120         lp = lpni->lpni_peer_net->lpn_peer;
2121         lnet_peer_ni_decref_locked(lpni);
2122
2123         if (!nid_same(prim_nid, &lp->lp_primary_nid)) {
2124                 CDEBUG(D_NET, "prim_nid %s is not primary for peer %s\n",
2125                        libcfs_nidstr(prim_nid),
2126                        libcfs_nidstr(&lp->lp_primary_nid));
2127                 return -ENODEV;
2128         }
2129
2130         lnet_net_lock(LNET_LOCK_EX);
2131         if (lp->lp_rtr_refcount > 0) {
2132                 lnet_net_unlock(LNET_LOCK_EX);
2133                 CERROR("%s is a router. Can not be deleted\n",
2134                        libcfs_nidstr(prim_nid));
2135                 return -EBUSY;
2136         }
2137         lnet_net_unlock(LNET_LOCK_EX);
2138
2139         if (LNET_NID_IS_ANY(nid) || nid_same(nid, &lp->lp_primary_nid)) {
2140                 if (!force && lp->lp_state & LNET_PEER_LOCK_PRIMARY) {
2141                         CERROR("peer %s created by Lustre. Must preserve primary NID, but will remove other NIDs\n",
2142                                libcfs_nidstr(&lp->lp_primary_nid));
2143                         return lnet_reset_peer(lp);
2144                 } else {
2145                         return lnet_peer_del(lp);
2146                 }
2147         }
2148
2149         flags = LNET_PEER_CONFIGURED;
2150         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
2151                 flags |= LNET_PEER_MULTI_RAIL;
2152
2153         return lnet_peer_del_nid(lp, nid, flags);
2154 }
2155
2156 void
2157 lnet_destroy_peer_ni_locked(struct kref *ref)
2158 {
2159         struct lnet_peer_ni *lpni = container_of(ref, struct lnet_peer_ni,
2160                                                  lpni_kref);
2161         struct lnet_peer_table *ptable;
2162         struct lnet_peer_net *lpn;
2163
2164         CDEBUG(D_NET, "%p nid %s\n", lpni, libcfs_nidstr(&lpni->lpni_nid));
2165
2166         LASSERT(kref_read(&lpni->lpni_kref) == 0);
2167         LASSERT(list_empty(&lpni->lpni_txq));
2168         LASSERT(lpni->lpni_txqnob == 0);
2169         LASSERT(list_empty(&lpni->lpni_peer_nis));
2170         LASSERT(list_empty(&lpni->lpni_on_remote_peer_ni_list));
2171
2172         lpn = lpni->lpni_peer_net;
2173         lpni->lpni_peer_net = NULL;
2174         lpni->lpni_net = NULL;
2175
2176         if (!list_empty(&lpni->lpni_hashlist)) {
2177                 /* remove the peer ni from the zombie list */
2178                 ptable = the_lnet.ln_peer_tables[lpni->lpni_cpt];
2179                 spin_lock(&ptable->pt_zombie_lock);
2180                 list_del_init(&lpni->lpni_hashlist);
2181                 ptable->pt_zombies--;
2182                 spin_unlock(&ptable->pt_zombie_lock);
2183         }
2184
2185         if (lpni->lpni_pref_nnids > 1) {
2186                 struct lnet_nid_list *ne, *tmp;
2187
2188                 list_for_each_entry_safe(ne, tmp, &lpni->lpni_pref.nids,
2189                                          nl_list) {
2190                         list_del_init(&ne->nl_list);
2191                         LIBCFS_FREE(ne, sizeof(*ne));
2192                 }
2193         }
2194         LIBCFS_FREE(lpni, sizeof(*lpni));
2195
2196         if (lpn)
2197                 lnet_peer_net_decref_locked(lpn);
2198 }
2199
2200 struct lnet_peer_ni *
2201 lnet_nid2peerni_ex(struct lnet_nid *nid)
2202 __must_hold(&the_lnet.ln_api_mutex)
2203 {
2204         struct lnet_peer_ni *lpni = NULL;
2205
2206         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2207                 return ERR_PTR(-ESHUTDOWN);
2208
2209         /*
2210          * find if a peer_ni already exists.
2211          * If so then just return that.
2212          */
2213         lpni = lnet_peer_ni_find_locked(nid);
2214         if (lpni)
2215                 return lpni;
2216
2217         lnet_net_unlock(LNET_LOCK_EX);
2218
2219         lpni = lnet_peer_ni_traffic_add(nid, NULL);
2220
2221         lnet_net_lock(LNET_LOCK_EX);
2222
2223         return lpni;
2224 }
2225
2226 /*
2227  * Get a peer_ni for the given nid, create it if necessary. Takes a
2228  * hold on the peer_ni.
2229  */
2230 struct lnet_peer_ni *
2231 lnet_peerni_by_nid_locked(struct lnet_nid *nid,
2232                         struct lnet_nid *pref, int cpt)
2233 {
2234         struct lnet_peer_ni *lpni = NULL;
2235
2236         if (the_lnet.ln_state != LNET_STATE_RUNNING)
2237                 return ERR_PTR(-ESHUTDOWN);
2238
2239         /*
2240          * find if a peer_ni already exists.
2241          * If so then just return that.
2242          */
2243         lpni = lnet_peer_ni_find_locked(nid);
2244         if (lpni)
2245                 return lpni;
2246
2247         /*
2248          * Slow path:
2249          * use the lnet_api_mutex to serialize the creation of the peer_ni
2250          * and the creation/deletion of the local ni/net. When a local ni is
2251          * created, if there exists a set of peer_nis on that network,
2252          * they need to be traversed and updated. When a local NI is
2253          * deleted, which could result in a network being deleted, then
2254          * all peer nis on that network need to be removed as well.
2255          *
2256          * Creation through traffic should also be serialized with
2257          * creation through DLC.
2258          */
2259         lnet_net_unlock(cpt);
2260         mutex_lock(&the_lnet.ln_api_mutex);
2261         /*
2262          * the_lnet.ln_state is only modified under the ln_api_lock, so a single
2263          * check here is sufficent.
2264          */
2265         if (the_lnet.ln_state == LNET_STATE_RUNNING)
2266                 lpni = lnet_peer_ni_traffic_add(nid, pref);
2267
2268         mutex_unlock(&the_lnet.ln_api_mutex);
2269         lnet_net_lock(cpt);
2270
2271         /* Lock has been dropped, check again for shutdown. */
2272         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
2273                 if (!IS_ERR_OR_NULL(lpni))
2274                         lnet_peer_ni_decref_locked(lpni);
2275                 lpni = ERR_PTR(-ESHUTDOWN);
2276         }
2277
2278         return lpni;
2279 }
2280
2281 bool
2282 lnet_peer_gw_discovery(struct lnet_peer *lp)
2283 {
2284         bool rc = false;
2285
2286         spin_lock(&lp->lp_lock);
2287         if (lp->lp_state & LNET_PEER_RTR_DISCOVERY)
2288                 rc = true;
2289         spin_unlock(&lp->lp_lock);
2290
2291         return rc;
2292 }
2293
2294 bool
2295 lnet_peer_is_uptodate(struct lnet_peer *lp)
2296 {
2297         bool rc;
2298
2299         spin_lock(&lp->lp_lock);
2300         rc = lnet_peer_is_uptodate_locked(lp);
2301         spin_unlock(&lp->lp_lock);
2302         return rc;
2303 }
2304
2305 /*
2306  * Is a peer uptodate from the point of view of discovery?
2307  *
2308  * If it is currently being processed, obviously not.
2309  * A forced Ping or Push is also handled by the discovery thread.
2310  *
2311  * Otherwise look at whether the peer needs rediscovering.
2312  */
2313 bool
2314 lnet_peer_is_uptodate_locked(struct lnet_peer *lp)
2315 __must_hold(&lp->lp_lock)
2316 {
2317         bool rc;
2318
2319         if (lp->lp_state & (LNET_PEER_DISCOVERING |
2320                             LNET_PEER_FORCE_PING |
2321                             LNET_PEER_FORCE_PUSH)) {
2322                 rc = false;
2323         } else if (lp->lp_state & LNET_PEER_REDISCOVER) {
2324                 rc = false;
2325         } else if (lnet_peer_needs_push(lp)) {
2326                 rc = false;
2327         } else if (lp->lp_state & LNET_PEER_DISCOVERED) {
2328                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE)
2329                         rc = true;
2330                 else
2331                         rc = false;
2332         } else {
2333                 rc = false;
2334         }
2335
2336         return rc;
2337 }
2338
2339 /* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
2340 void
2341 lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
2342 {
2343         /* The discovery thread holds net_lock/EX and lp_lock when it splices
2344          * the lp_dc_pendq onto a local list for resending. Thus, we do the same
2345          * when adding to the list and queuing the peer to ensure that we do not
2346          * strand any messages on the lp_dc_pendq. This scheme ensures the
2347          * message will be resent even if the peer is already being discovered.
2348          * Therefore we needn't check the return value of
2349          * lnet_peer_queue_for_discovery(lp).
2350          */
2351         lnet_net_lock(LNET_LOCK_EX);
2352         spin_lock(&lp->lp_lock);
2353         list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
2354         spin_unlock(&lp->lp_lock);
2355         lnet_peer_queue_for_discovery(lp);
2356         lnet_net_unlock(LNET_LOCK_EX);
2357 }
2358
2359 /*
2360  * Queue a peer for the attention of the discovery thread.  Call with
2361  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
2362  * -EALREADY if the peer was already queued.
2363  */
2364 static int lnet_peer_queue_for_discovery(struct lnet_peer *lp)
2365 {
2366         int rc;
2367
2368         spin_lock(&lp->lp_lock);
2369         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2370                 lp->lp_state |= LNET_PEER_DISCOVERING;
2371         spin_unlock(&lp->lp_lock);
2372         if (list_empty(&lp->lp_dc_list)) {
2373                 lnet_peer_addref_locked(lp);
2374                 list_add_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2375                 wake_up(&the_lnet.ln_dc_waitq);
2376                 rc = 0;
2377         } else {
2378                 rc = -EALREADY;
2379         }
2380
2381         CDEBUG(D_NET, "Queue peer %s: %d\n",
2382                libcfs_nidstr(&lp->lp_primary_nid), rc);
2383
2384         return rc;
2385 }
2386
2387 /*
2388  * Discovery of a peer is complete. Wake all waiters on the peer.
2389  * Call with lnet_net_lock/EX held.
2390  */
2391 static void lnet_peer_discovery_complete(struct lnet_peer *lp, int dc_error)
2392 {
2393         struct lnet_msg *msg, *tmp;
2394         int rc = 0;
2395         LIST_HEAD(pending_msgs);
2396
2397         CDEBUG(D_NET, "Discovery complete. Dequeue peer %s\n",
2398                libcfs_nidstr(&lp->lp_primary_nid));
2399
2400         spin_lock(&lp->lp_lock);
2401         /* Our caller dropped lp_lock which may have allowed another thread to
2402          * set LNET_PEER_DISCOVERING, or it may be set if dc_error is non-zero.
2403          * Ensure it is cleared.
2404          */
2405         lp->lp_state &= ~LNET_PEER_DISCOVERING;
2406         if (dc_error) {
2407                 lp->lp_dc_error = dc_error;
2408                 lp->lp_state |= LNET_PEER_REDISCOVER;
2409         }
2410         list_splice_init(&lp->lp_dc_pendq, &pending_msgs);
2411         spin_unlock(&lp->lp_lock);
2412         list_del_init(&lp->lp_dc_list);
2413         wake_up(&lp->lp_dc_waitq);
2414
2415         if (lp->lp_rtr_refcount > 0)
2416                 lnet_router_discovery_complete(lp);
2417
2418         lnet_net_unlock(LNET_LOCK_EX);
2419
2420         /* iterate through all pending messages and send them again */
2421         list_for_each_entry_safe(msg, tmp, &pending_msgs, msg_list) {
2422                 list_del_init(&msg->msg_list);
2423                 if (dc_error) {
2424                         lnet_finalize(msg, dc_error);
2425                         continue;
2426                 }
2427
2428                 CDEBUG(D_NET, "sending pending message %s to target %s\n",
2429                        lnet_msgtyp2str(msg->msg_type),
2430                        libcfs_idstr(&msg->msg_target));
2431                 rc = lnet_send(&msg->msg_src_nid_param, msg,
2432                                &msg->msg_rtr_nid_param);
2433                 if (rc < 0) {
2434                         CNETERR("Error sending %s to %s: %d\n",
2435                                lnet_msgtyp2str(msg->msg_type),
2436                                libcfs_idstr(&msg->msg_target), rc);
2437                         lnet_finalize(msg, rc);
2438                 }
2439         }
2440         lnet_net_lock(LNET_LOCK_EX);
2441         lnet_peer_decref_locked(lp);
2442 }
2443
2444 /*
2445  * Handle inbound push.
2446  * Like any event handler, called with lnet_res_lock/CPT held.
2447  */
2448 void lnet_peer_push_event(struct lnet_event *ev)
2449 {
2450         struct lnet_ping_buffer *pbuf;
2451         struct lnet_peer *lp;
2452         int infobytes;
2453
2454         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start + ev->offset);
2455
2456         /* lnet_find_peer() adds a refcount */
2457         lp = lnet_find_peer(&ev->source.nid);
2458         if (!lp) {
2459                 CDEBUG(D_NET, "Push Put from unknown %s (source %s). Ignoring...\n",
2460                        libcfs_nidstr(&ev->initiator.nid),
2461                        libcfs_nidstr(&ev->source.nid));
2462                 pbuf->pb_needs_post = true;
2463                 return;
2464         }
2465
2466         /* Ensure peer state remains consistent while we modify it. */
2467         spin_lock(&lp->lp_lock);
2468
2469         /*
2470          * If some kind of error happened the contents of the message
2471          * cannot be used. Clear the NIDS_UPTODATE and set the
2472          * FORCE_PING flag to trigger a ping.
2473          */
2474         if (ev->status) {
2475                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2476                 lp->lp_state |= LNET_PEER_FORCE_PING;
2477                 CDEBUG(D_NET, "Push Put error %d from %s (source %s)\n",
2478                        ev->status,
2479                        libcfs_nidstr(&lp->lp_primary_nid),
2480                        libcfs_nidstr(&ev->source.nid));
2481                 goto out;
2482         }
2483
2484         /*
2485          * A push with invalid or corrupted info. Clear the UPTODATE
2486          * flag to trigger a ping.
2487          */
2488         if (lnet_ping_info_validate(&pbuf->pb_info)) {
2489                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2490                 lp->lp_state |= LNET_PEER_FORCE_PING;
2491                 CDEBUG(D_NET, "Corrupted Push from %s\n",
2492                        libcfs_nidstr(&lp->lp_primary_nid));
2493                 goto out;
2494         }
2495
2496         /* Make sure we'll allocate the correct size ping buffer when
2497          * pinging the peer.
2498          */
2499         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2500         if (lp->lp_data_bytes < infobytes)
2501                 lp->lp_data_bytes = infobytes;
2502
2503         /*
2504          * A non-Multi-Rail peer is not supposed to be capable of
2505          * sending a push.
2506          */
2507         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)) {
2508                 CERROR("Push from non-Multi-Rail peer %s dropped\n",
2509                        libcfs_nidstr(&lp->lp_primary_nid));
2510                 goto out;
2511         }
2512
2513         /*
2514          * The peer may have discovery disabled at its end. Set
2515          * NO_DISCOVERY as appropriate.
2516          */
2517         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY)) {
2518                 CDEBUG(D_NET, "Peer %s has discovery disabled\n",
2519                        libcfs_nidstr(&lp->lp_primary_nid));
2520                 /*
2521                  * Mark the peer for deletion if we already know about it
2522                  * and it's going from discovery set to no discovery set
2523                  */
2524                 if (!(lp->lp_state & (LNET_PEER_NO_DISCOVERY |
2525                                       LNET_PEER_DISCOVERING)) &&
2526                      lp->lp_state & LNET_PEER_DISCOVERED) {
2527                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2528                                libcfs_nidstr(&lp->lp_primary_nid),
2529                                lp->lp_state);
2530                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2531                 }
2532                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2533         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2534                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2535                        libcfs_nidstr(&lp->lp_primary_nid));
2536                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2537         }
2538
2539         /*
2540          * Update the MULTI_RAIL flag based on the push. If the peer
2541          * was configured with DLC then the setting should match what
2542          * DLC put in.
2543          * NB: We verified above that the MR feature bit is set in pi_features
2544          */
2545         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2546                 CDEBUG(D_NET, "peer %s(%p) is MR\n",
2547                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2548         } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2549                 CWARN("Push says %s is Multi-Rail, DLC says not\n",
2550                       libcfs_nidstr(&lp->lp_primary_nid));
2551         } else if (lnet_peer_discovery_disabled) {
2552                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled locally\n",
2553                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2554         } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2555                 CDEBUG(D_NET, "peer %s(%p) not MR: DD disabled remotely\n",
2556                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2557         } else {
2558                 CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2559                        libcfs_nidstr(&lp->lp_primary_nid), lp);
2560                 lp->lp_state |= LNET_PEER_MULTI_RAIL;
2561                 lnet_peer_clr_non_mr_pref_nids(lp);
2562         }
2563
2564         /* Check for truncation of the Put message. Clear the
2565          * NIDS_UPTODATE flag and set FORCE_PING to trigger a ping,
2566          * and tell discovery to allocate a bigger buffer.
2567          */
2568         if (ev->mlength < ev->rlength) {
2569                 if (the_lnet.ln_push_target_nbytes < infobytes)
2570                         the_lnet.ln_push_target_nbytes = infobytes;
2571                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2572                 lp->lp_state |= LNET_PEER_FORCE_PING;
2573                 CDEBUG(D_NET, "Truncated Push from %s (%d bytes)\n",
2574                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2575                 goto out;
2576         }
2577
2578         /* always assume new data */
2579         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2580         lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
2581
2582         /* If there is data present that hasn't been processed yet,
2583          * we'll replace it if the Put contained newer data and it
2584          * fits. We're racing with a Ping or earlier Push in this
2585          * case.
2586          */
2587         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
2588                 if (LNET_PING_BUFFER_SEQNO(pbuf) >
2589                         LNET_PING_BUFFER_SEQNO(lp->lp_data) &&
2590                     infobytes <= lp->lp_data->pb_nbytes) {
2591                         memcpy(&lp->lp_data->pb_info, &pbuf->pb_info,
2592                                infobytes);
2593                         CDEBUG(D_NET, "Ping/Push race from %s: %u vs %u\n",
2594                               libcfs_nidstr(&lp->lp_primary_nid),
2595                               LNET_PING_BUFFER_SEQNO(pbuf),
2596                               LNET_PING_BUFFER_SEQNO(lp->lp_data));
2597                 }
2598                 goto out;
2599         }
2600
2601         /*
2602          * Allocate a buffer to copy the data. On a failure we drop
2603          * the Push and set FORCE_PING to force the discovery
2604          * thread to fix the problem by pinging the peer.
2605          */
2606         lp->lp_data = lnet_ping_buffer_alloc(lp->lp_data_bytes, GFP_ATOMIC);
2607         if (!lp->lp_data) {
2608                 lp->lp_state |= LNET_PEER_FORCE_PING;
2609                 CDEBUG(D_NET, "Cannot allocate Push buffer for %s %u\n",
2610                        libcfs_nidstr(&lp->lp_primary_nid),
2611                        LNET_PING_BUFFER_SEQNO(pbuf));
2612                 goto out;
2613         }
2614
2615         /* Success */
2616         unsafe_memcpy(&lp->lp_data->pb_info, &pbuf->pb_info, infobytes,
2617                       FLEXIBLE_OBJECT);
2618         lp->lp_state |= LNET_PEER_DATA_PRESENT;
2619         CDEBUG(D_NET, "Received Push %s %u\n",
2620                libcfs_nidstr(&lp->lp_primary_nid),
2621                LNET_PING_BUFFER_SEQNO(pbuf));
2622
2623 out:
2624         /* We've processed this buffer. It can be reposted */
2625         pbuf->pb_needs_post = true;
2626
2627         /*
2628          * Queue the peer for discovery if not done, force it on the request
2629          * queue and wake the discovery thread if the peer was already queued,
2630          * because its status changed.
2631          */
2632         spin_unlock(&lp->lp_lock);
2633         lnet_net_lock(LNET_LOCK_EX);
2634         if (!lnet_peer_is_uptodate(lp) && lnet_peer_queue_for_discovery(lp)) {
2635                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
2636                 wake_up(&the_lnet.ln_dc_waitq);
2637         }
2638         /* Drop refcount from lookup */
2639         lnet_peer_decref_locked(lp);
2640         lnet_net_unlock(LNET_LOCK_EX);
2641 }
2642
2643 /*
2644  * Clear the discovery error state, unless we're already discovering
2645  * this peer, in which case the error is current.
2646  */
2647 static void lnet_peer_clear_discovery_error(struct lnet_peer *lp)
2648 {
2649         spin_lock(&lp->lp_lock);
2650         if (!(lp->lp_state & LNET_PEER_DISCOVERING))
2651                 lp->lp_dc_error = 0;
2652         spin_unlock(&lp->lp_lock);
2653 }
2654
2655 /*
2656  * Peer discovery slow path. The ln_api_mutex is held on entry, and
2657  * dropped/retaken within this function. An lnet_peer_ni is passed in
2658  * because discovery could tear down an lnet_peer.
2659  */
2660 int
2661 lnet_discover_peer_locked(struct lnet_peer_ni *lpni, int cpt, bool block)
2662 {
2663         DEFINE_WAIT(wait);
2664         struct lnet_peer *lp = NULL;
2665         int rc = 0;
2666         int count = 0;
2667
2668 again:
2669         if (lp)
2670                 lnet_peer_decref_locked(lp);
2671         lnet_net_unlock(cpt);
2672         lnet_net_lock(LNET_LOCK_EX);
2673         lp = lpni->lpni_peer_net->lpn_peer;
2674         lnet_peer_clear_discovery_error(lp);
2675
2676         /*
2677          * We're willing to be interrupted. The lpni can become a
2678          * zombie if we race with DLC, so we must check for that.
2679          */
2680         for (;;) {
2681                 /* Keep lp alive when the lnet_net_lock is unlocked */
2682                 lnet_peer_addref_locked(lp);
2683                 prepare_to_wait(&lp->lp_dc_waitq, &wait, TASK_INTERRUPTIBLE);
2684                 if (signal_pending(current))
2685                         break;
2686                 if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2687                         break;
2688                 /*
2689                  * Don't repeat discovery if discovery is disabled. This is
2690                  * done to ensure we can use discovery as a standard ping as
2691                  * well for backwards compatibility with routers which do not
2692                  * have discovery or have discovery disabled
2693                  */
2694                 if (lnet_is_discovery_disabled(lp) && count > 0)
2695                         break;
2696                 if (lp->lp_dc_error)
2697                         break;
2698                 if (lnet_peer_is_uptodate(lp))
2699                         break;
2700                 if (lp->lp_state & LNET_PEER_MARK_DELETED)
2701                         break;
2702                 lnet_peer_queue_for_discovery(lp);
2703                 count++;
2704                 CDEBUG(D_NET, "Discovery attempt # %d\n", count);
2705
2706                 /*
2707                  * If caller requested a non-blocking operation then
2708                  * return immediately. Once discovery is complete any
2709                  * pending messages that were stopped due to discovery
2710                  * will be transmitted.
2711                  */
2712                 if (!block)
2713                         break;
2714
2715                 lnet_net_unlock(LNET_LOCK_EX);
2716                 schedule();
2717                 finish_wait(&lp->lp_dc_waitq, &wait);
2718                 lnet_net_lock(LNET_LOCK_EX);
2719                 lnet_peer_decref_locked(lp);
2720                 /* Peer may have changed */
2721                 lp = lpni->lpni_peer_net->lpn_peer;
2722         }
2723         finish_wait(&lp->lp_dc_waitq, &wait);
2724
2725         lnet_net_unlock(LNET_LOCK_EX);
2726         lnet_net_lock(cpt);
2727         /*
2728          * The peer may have changed, so re-check and rediscover if that turns
2729          * out to have been the case. The reference count on lp ensured that
2730          * even if it was unlinked from lpni the memory could not be recycled.
2731          * Thus the check below is sufficient to determine whether the peer
2732          * changed. If the peer changed, then lp must not be dereferenced.
2733          */
2734         if (lp != lpni->lpni_peer_net->lpn_peer)
2735                 goto again;
2736
2737         if (signal_pending(current))
2738                 rc = -EINTR;
2739         else if (the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING)
2740                 rc = -ESHUTDOWN;
2741         else if (lp->lp_dc_error)
2742                 rc = lp->lp_dc_error;
2743         else if (!block)
2744                 CDEBUG(D_NET, "non-blocking discovery\n");
2745         else if (!lnet_peer_is_uptodate(lp) &&
2746                  !(lnet_is_discovery_disabled(lp) ||
2747                    (lp->lp_state & LNET_PEER_MARK_DELETED)))
2748                 goto again;
2749
2750         CDEBUG(D_NET, "peer %s NID %s: %d. %s\n",
2751                (lp ? libcfs_nidstr(&lp->lp_primary_nid) : "(none)"),
2752                libcfs_nidstr(&lpni->lpni_nid), rc,
2753                (!block) ? "pending discovery" : "discovery complete");
2754         lnet_peer_decref_locked(lp);
2755
2756         return rc;
2757 }
2758
2759 /* Handle an incoming ack for a push. */
2760 static void
2761 lnet_discovery_event_ack(struct lnet_peer *lp, struct lnet_event *ev)
2762 {
2763         struct lnet_ping_buffer *pbuf;
2764
2765         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2766         spin_lock(&lp->lp_lock);
2767         lp->lp_state &= ~LNET_PEER_PUSH_SENT;
2768         lp->lp_push_error = ev->status;
2769         if (ev->status)
2770                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
2771         else
2772                 lp->lp_node_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2773         spin_unlock(&lp->lp_lock);
2774
2775         CDEBUG(D_NET, "peer %s ev->status %d\n",
2776                libcfs_nidstr(&lp->lp_primary_nid), ev->status);
2777 }
2778
2779 static bool find_primary(struct lnet_nid *nid,
2780                          struct lnet_ping_buffer *pbuf)
2781 {
2782         struct lnet_ping_info *pi = &pbuf->pb_info;
2783         struct lnet_ping_iter piter;
2784         __u32 *stp;
2785
2786         if (pi->pi_features & LNET_PING_FEAT_PRIMARY_LARGE) {
2787                 /* First large nid is primary */
2788                 for (stp = ping_iter_first(&piter, pbuf, nid);
2789                      stp;
2790                      stp = ping_iter_next(&piter, nid)) {
2791                         if (nid_is_nid4(nid))
2792                                 continue;
2793                         /* nid has already been copied in */
2794                         return true;
2795                 }
2796                 /* no large nids ... weird ... ignore the flag
2797                  * and use first nid.
2798                  */
2799         }
2800         /* pi_nids[1] is primary */
2801         if (pi->pi_nnis < 2)
2802                 return false;
2803         lnet_nid4_to_nid(pbuf->pb_info.pi_ni[1].ns_nid, nid);
2804         return true;
2805 }
2806
2807 /* Handle a Reply message. This is the reply to a Ping message. */
2808 static void
2809 lnet_discovery_event_reply(struct lnet_peer *lp, struct lnet_event *ev)
2810 {
2811         struct lnet_ping_buffer *pbuf;
2812         struct lnet_nid primary;
2813         int infobytes;
2814         int rc;
2815         bool ping_feat_disc;
2816
2817         spin_lock(&lp->lp_lock);
2818
2819         lp->lp_disc_src_nid = ev->target.nid;
2820         lp->lp_disc_dst_nid = ev->source.nid;
2821
2822         /*
2823          * If some kind of error happened the contents of message
2824          * cannot be used. Set PING_FAILED to trigger a retry.
2825          */
2826         if (ev->status) {
2827                 lp->lp_state |= LNET_PEER_PING_FAILED;
2828                 lp->lp_ping_error = ev->status;
2829                 CDEBUG(D_NET, "Ping Reply error %d from %s (source %s)\n",
2830                        ev->status,
2831                        libcfs_nidstr(&lp->lp_primary_nid),
2832                        libcfs_nidstr(&ev->source.nid));
2833                 goto out;
2834         }
2835
2836         pbuf = LNET_PING_INFO_TO_BUFFER(ev->md_start);
2837         if (pbuf->pb_info.pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
2838                 lnet_swap_pinginfo(pbuf);
2839
2840         /*
2841          * A reply with invalid or corrupted info. Set PING_FAILED to
2842          * trigger a retry.
2843          */
2844         rc = lnet_ping_info_validate(&pbuf->pb_info);
2845         if (rc) {
2846                 lp->lp_state |= LNET_PEER_PING_FAILED;
2847                 lp->lp_ping_error = 0;
2848                 CDEBUG(D_NET, "Corrupted Ping Reply from %s: %d\n",
2849                        libcfs_nidstr(&lp->lp_primary_nid), rc);
2850                 goto out;
2851         }
2852
2853         /*
2854          * The peer may have discovery disabled at its end. Set
2855          * NO_DISCOVERY as appropriate.
2856          */
2857         ping_feat_disc = pbuf->pb_info.pi_features & LNET_PING_FEAT_DISCOVERY;
2858         if (!ping_feat_disc || lnet_peer_discovery_disabled) {
2859                 CDEBUG(D_NET, "Peer %s has discovery %s, local discovery %s\n",
2860                        libcfs_nidstr(&lp->lp_primary_nid),
2861                        ping_feat_disc ? "enabled" : "disabled",
2862                        lnet_peer_discovery_disabled ? "disabled" : "enabled");
2863
2864                 /* Detect whether this peer has toggled discovery from on to
2865                  * off and whether we can delete and re-create the peer. Peers
2866                  * that were manually configured cannot be deleted by discovery.
2867                  * We need to delete this peer and re-create it if the peer was
2868                  * not configured manually, is currently considered DD capable,
2869                  * and either:
2870                  * 1. We've already discovered the peer (the peer has toggled
2871                  *    the discovery feature from on to off), or
2872                  * 2. The peer is considered MR, but it was not user configured
2873                  *    (this was a "temporary" peer created via the kernel APIs
2874                  *     that we're discovering for the first time)
2875                  */
2876                 if (!(lp->lp_state & (LNET_PEER_CONFIGURED |
2877                                       LNET_PEER_NO_DISCOVERY)) &&
2878                     (lp->lp_state & (LNET_PEER_DISCOVERED |
2879                                      LNET_PEER_MULTI_RAIL))) {
2880                         CDEBUG(D_NET, "Marking %s:0x%x for deletion\n",
2881                                libcfs_nidstr(&lp->lp_primary_nid),
2882                                lp->lp_state);
2883                         lp->lp_state |= LNET_PEER_MARK_DELETION;
2884                 }
2885                 lp->lp_state |= LNET_PEER_NO_DISCOVERY;
2886         } else {
2887                 CDEBUG(D_NET, "Peer %s has discovery enabled\n",
2888                        libcfs_nidstr(&lp->lp_primary_nid));
2889                 lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
2890         }
2891
2892         /*
2893          * Update the MULTI_RAIL flag based on the reply. If the peer
2894          * was configured with DLC then the setting should match what
2895          * DLC put in.
2896          */
2897         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL) {
2898                 if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2899                         CDEBUG(D_NET, "peer %s(%p) is MR\n",
2900                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2901                 } else if (lp->lp_state & LNET_PEER_CONFIGURED) {
2902                         CWARN("Reply says %s is Multi-Rail, DLC says not\n",
2903                               libcfs_nidstr(&lp->lp_primary_nid));
2904                 } else if (lnet_peer_discovery_disabled) {
2905                         CDEBUG(D_NET,
2906                                "peer %s(%p) not MR: DD disabled locally\n",
2907                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2908                 } else if (lp->lp_state & LNET_PEER_NO_DISCOVERY) {
2909                         CDEBUG(D_NET,
2910                                "peer %s(%p) not MR: DD disabled remotely\n",
2911                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2912                 } else {
2913                         CDEBUG(D_NET, "peer %s(%p) is MR capable\n",
2914                                libcfs_nidstr(&lp->lp_primary_nid), lp);
2915                         lp->lp_state |= LNET_PEER_MULTI_RAIL;
2916                         lnet_peer_clr_non_mr_pref_nids(lp);
2917                 }
2918         } else if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
2919                 if (lp->lp_state & LNET_PEER_CONFIGURED) {
2920                         CWARN("DLC says %s is Multi-Rail, Reply says not\n",
2921                               libcfs_nidstr(&lp->lp_primary_nid));
2922                 } else {
2923                         CERROR("Multi-Rail state vanished from %s\n",
2924                                libcfs_nidstr(&lp->lp_primary_nid));
2925                         lp->lp_state &= ~LNET_PEER_MULTI_RAIL;
2926                 }
2927         }
2928
2929         infobytes = lnet_ping_info_size(&pbuf->pb_info);
2930         /*
2931          * Make sure we'll allocate the correct size ping buffer when
2932          * pinging the peer.
2933          */
2934         if (lp->lp_data_bytes < infobytes)
2935                 lp->lp_data_bytes = infobytes;
2936
2937         /* Check for truncation of the Reply. Clear PING_SENT and set
2938          * PING_FAILED to trigger a retry.
2939          */
2940         if (pbuf->pb_nbytes < infobytes) {
2941                 if (the_lnet.ln_push_target_nbytes < infobytes)
2942                         the_lnet.ln_push_target_nbytes = infobytes;
2943                 lp->lp_state |= LNET_PEER_PING_FAILED;
2944                 lp->lp_ping_error = 0;
2945                 CDEBUG(D_NET, "Truncated Reply from %s (%d bytes)\n",
2946                        libcfs_nidstr(&lp->lp_primary_nid), infobytes);
2947                 goto out;
2948         }
2949
2950         /*
2951          * Check the sequence numbers in the reply. These are only
2952          * available if the reply came from a Multi-Rail peer.
2953          */
2954         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL &&
2955             find_primary(&primary, pbuf) &&
2956             nid_same(&lp->lp_primary_nid, &primary)) {
2957                 if (LNET_PING_BUFFER_SEQNO(pbuf) < lp->lp_peer_seqno)
2958                         CDEBUG(D_NET, "peer %s: seq# got %u have %u. peer rebooted?\n",
2959                                 libcfs_nidstr(&lp->lp_primary_nid),
2960                                 LNET_PING_BUFFER_SEQNO(pbuf),
2961                                 lp->lp_peer_seqno);
2962
2963                 lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
2964         }
2965
2966         /* We're happy with the state of the data in the buffer. */
2967         CDEBUG(D_NET, "peer %s data present %u. state = 0x%x\n",
2968                libcfs_nidstr(&lp->lp_primary_nid), lp->lp_peer_seqno,
2969                lp->lp_state);
2970         if (lp->lp_state & LNET_PEER_DATA_PRESENT)
2971                 lnet_ping_buffer_decref(lp->lp_data);
2972         else
2973                 lp->lp_state |= LNET_PEER_DATA_PRESENT;
2974         lnet_ping_buffer_addref(pbuf);
2975         lp->lp_data = pbuf;
2976 out:
2977         lp->lp_state &= ~LNET_PEER_PING_SENT;
2978         spin_unlock(&lp->lp_lock);
2979 }
2980
2981 /*
2982  * Send event handling. Only matters for error cases, where we clean
2983  * up state on the peer and peer_ni that would otherwise be updated in
2984  * the REPLY event handler for a successful Ping, and the ACK event
2985  * handler for a successful Push.
2986  */
2987 static int
2988 lnet_discovery_event_send(struct lnet_peer *lp, struct lnet_event *ev)
2989 {
2990         int rc = 0;
2991
2992         if (!ev->status)
2993                 goto out;
2994
2995         spin_lock(&lp->lp_lock);
2996         if (ev->msg_type == LNET_MSG_GET) {
2997                 lp->lp_state &= ~LNET_PEER_PING_SENT;
2998                 lp->lp_state |= LNET_PEER_PING_FAILED;
2999                 lp->lp_ping_error = ev->status;
3000         } else { /* ev->msg_type == LNET_MSG_PUT */
3001                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3002                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3003                 lp->lp_push_error = ev->status;
3004         }
3005         spin_unlock(&lp->lp_lock);
3006         rc = LNET_REDISCOVER_PEER;
3007 out:
3008         CDEBUG(D_NET, "%s Send to %s: %d\n",
3009                 (ev->msg_type == LNET_MSG_GET ? "Ping" : "Push"),
3010                 libcfs_nidstr(&ev->target.nid), rc);
3011         return rc;
3012 }
3013
3014 /*
3015  * Unlink event handling. This event is only seen if a call to
3016  * LNetMDUnlink() caused the event to be unlinked. If this call was
3017  * made after the event was set up in LNetGet() or LNetPut() then we
3018  * assume the Ping or Push timed out.
3019  */
3020 static void
3021 lnet_discovery_event_unlink(struct lnet_peer *lp, struct lnet_event *ev)
3022 {
3023         spin_lock(&lp->lp_lock);
3024         /* We've passed through LNetGet() */
3025         if (lp->lp_state & LNET_PEER_PING_SENT) {
3026                 lp->lp_state &= ~LNET_PEER_PING_SENT;
3027                 lp->lp_state |= LNET_PEER_PING_FAILED;
3028                 lp->lp_ping_error = -ETIMEDOUT;
3029                 CDEBUG(D_NET, "Ping Unlink for message to peer %s\n",
3030                         libcfs_nidstr(&lp->lp_primary_nid));
3031         }
3032         /* We've passed through LNetPut() */
3033         if (lp->lp_state & LNET_PEER_PUSH_SENT) {
3034                 lp->lp_state &= ~LNET_PEER_PUSH_SENT;
3035                 lp->lp_state |= LNET_PEER_PUSH_FAILED;
3036                 lp->lp_push_error = -ETIMEDOUT;
3037                 CDEBUG(D_NET, "Push Unlink for message to peer %s\n",
3038                         libcfs_nidstr(&lp->lp_primary_nid));
3039         }
3040         spin_unlock(&lp->lp_lock);
3041 }
3042
3043 /*
3044  * Event handler for the discovery EQ.
3045  *
3046  * Called with lnet_res_lock(cpt) held. The cpt is the
3047  * lnet_cpt_of_cookie() of the md handle cookie.
3048  */
3049 static void lnet_discovery_event_handler(struct lnet_event *event)
3050 {
3051         struct lnet_peer *lp = event->md_user_ptr;
3052         struct lnet_ping_buffer *pbuf;
3053         int rc;
3054
3055         /* discovery needs to take another look */
3056         rc = LNET_REDISCOVER_PEER;
3057
3058         CDEBUG(D_NET, "Received event: %d\n", event->type);
3059
3060         switch (event->type) {
3061         case LNET_EVENT_ACK:
3062                 lnet_discovery_event_ack(lp, event);
3063                 break;
3064         case LNET_EVENT_REPLY:
3065                 lnet_discovery_event_reply(lp, event);
3066                 break;
3067         case LNET_EVENT_SEND:
3068                 /* Only send failure triggers a retry. */
3069                 rc = lnet_discovery_event_send(lp, event);
3070                 break;
3071         case LNET_EVENT_UNLINK:
3072                 /* LNetMDUnlink() was called */
3073                 lnet_discovery_event_unlink(lp, event);
3074                 break;
3075         default:
3076                 /* Invalid events. */
3077                 LBUG();
3078         }
3079         lnet_net_lock(LNET_LOCK_EX);
3080
3081         /* put peer back at end of request queue, if discovery not already
3082          * done */
3083         if (rc == LNET_REDISCOVER_PEER && !lnet_peer_is_uptodate(lp) &&
3084             lnet_peer_queue_for_discovery(lp)) {
3085                 list_move_tail(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3086                 wake_up(&the_lnet.ln_dc_waitq);
3087         }
3088         if (event->unlinked) {
3089                 pbuf = LNET_PING_INFO_TO_BUFFER(event->md_start);
3090                 lnet_ping_buffer_decref(pbuf);
3091                 lnet_peer_decref_locked(lp);
3092         }
3093         lnet_net_unlock(LNET_LOCK_EX);
3094 }
3095
3096 u32 *ping_iter_first(struct lnet_ping_iter *pi,
3097                      struct lnet_ping_buffer *pbuf,
3098                      struct lnet_nid *nid)
3099 {
3100         pi->pinfo = &pbuf->pb_info;
3101         pi->pos = &pbuf->pb_info.pi_ni;
3102         pi->end = (void *)pi->pinfo +
3103                   min_t(int, pbuf->pb_nbytes,
3104                         lnet_ping_info_size(pi->pinfo));
3105         /* lnet_ping_info_validiate ensures there will be one
3106          * lnet_ni_status at the start
3107          */
3108         if (nid)
3109                 lnet_nid4_to_nid(pbuf->pb_info.pi_ni[0].ns_nid, nid);
3110
3111         pi->pos += sizeof(struct lnet_ni_status);
3112         return &pbuf->pb_info.pi_ni[0].ns_status;
3113 }
3114
3115 u32 *ping_iter_next(struct lnet_ping_iter *pi, struct lnet_nid *nid)
3116 {
3117         int off = offsetof(struct lnet_ping_info, pi_ni[pi->pinfo->pi_nnis]);
3118
3119         if (pi->pos < ((void *)pi->pinfo + off)) {
3120                 struct lnet_ni_status *ns = pi->pos;
3121
3122                 pi->pos = ns + 1;
3123                 if (pi->pos > pi->end)
3124                         return NULL;
3125                 if (nid)
3126                         lnet_nid4_to_nid(ns->ns_nid, nid);
3127                 return &ns->ns_status;
3128         }
3129
3130         while (pi->pinfo->pi_features & LNET_PING_FEAT_LARGE_ADDR) {
3131                 struct lnet_ni_large_status *lns = pi->pos;
3132
3133                 if (pi->pos + 8 > pi->end)
3134                         /* Not safe to examine next */
3135                         return NULL;
3136                 pi->pos = lnet_ping_sts_next(lns);
3137                 if (pi->pos > pi->end)
3138                         return NULL;
3139                 if (NID_BYTES(&lns->ns_nid) > sizeof(struct lnet_nid))
3140                         continue;
3141                 if (nid)
3142                         *nid = lns->ns_nid;
3143                 return &lns->ns_status;
3144         }
3145         return NULL;
3146 }
3147
3148 int ping_info_count_entries(struct lnet_ping_buffer *pbuf)
3149 {
3150         struct lnet_ping_iter pi;
3151         u32 *st;
3152         int nnis = 0;
3153
3154         for (st = ping_iter_first(&pi, pbuf, NULL); st;
3155              st = ping_iter_next(&pi, NULL))
3156                 nnis += 1;
3157
3158         return nnis;
3159 }
3160
3161 static inline void handle_disc_lpni_health(struct lnet_peer_ni *lpni)
3162 {
3163         if (lpni->lpni_ns_status == LNET_NI_STATUS_DOWN)
3164                 lnet_handle_remote_failure_locked(lpni);
3165         else if (lpni->lpni_ns_status == LNET_NI_STATUS_UP &&
3166                  !lpni->lpni_last_alive)
3167                 atomic_set(&lpni->lpni_healthv, LNET_MAX_HEALTH_VALUE);
3168 }
3169
3170 /*
3171  * Build a peer from incoming data.
3172  *
3173  * The NIDs in the incoming data are supposed to be structured as follows:
3174  *  - loopback
3175  *  - primary NID
3176  *  - other NIDs in same net
3177  *  - NIDs in second net
3178  *  - NIDs in third net
3179  *  - ...
3180  * This due to the way the list of NIDs in the data is created.
3181  *
3182  * Note that this function will mark the peer uptodate unless an
3183  * ENOMEM is encontered. All other errors are due to a conflict
3184  * between the DLC configuration and what discovery sees. We treat DLC
3185  * as binding, and therefore set the NIDS_UPTODATE flag to prevent the
3186  * peer from becoming stuck in discovery.
3187  */
3188 static int lnet_peer_merge_data(struct lnet_peer *lp,
3189                                 struct lnet_ping_buffer *pbuf)
3190 {
3191         struct lnet_peer_net *lpn;
3192         struct lnet_peer_ni *lpni;
3193         struct lnet_nid *curnis = NULL;
3194         struct lnet_ni_large_status *addnis = NULL;
3195         struct lnet_nid *delnis = NULL;
3196         struct lnet_ping_iter pi;
3197         struct lnet_nid nid;
3198         u32 *stp;
3199         struct lnet_nid primary = {};
3200         bool want_large_primary;
3201         unsigned int flags;
3202         int ncurnis;
3203         int naddnis;
3204         int ndelnis;
3205         int nnis = 0;
3206         int i;
3207         int j;
3208         int rc;
3209         __u32 old_st;
3210
3211         flags = LNET_PEER_DISCOVERED;
3212         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3213                 flags |= LNET_PEER_MULTI_RAIL;
3214
3215         /*
3216          * Cache the routing feature for the peer; whether it is enabled
3217          * for disabled as reported by the remote peer.
3218          */
3219         spin_lock(&lp->lp_lock);
3220         if (!(pbuf->pb_info.pi_features & LNET_PING_FEAT_RTE_DISABLED))
3221                 lp->lp_state |= LNET_PEER_ROUTER_ENABLED;
3222         else
3223                 lp->lp_state &= ~LNET_PEER_ROUTER_ENABLED;
3224         spin_unlock(&lp->lp_lock);
3225
3226         nnis = ping_info_count_entries(pbuf);
3227         nnis = max_t(int, lp->lp_nnis, nnis);
3228         CFS_ALLOC_PTR_ARRAY(curnis, nnis);
3229         CFS_ALLOC_PTR_ARRAY(addnis, nnis);
3230         CFS_ALLOC_PTR_ARRAY(delnis, nnis);
3231         if (!curnis || !addnis || !delnis) {
3232                 rc = -ENOMEM;
3233                 goto out;
3234         }
3235         ncurnis = 0;
3236         naddnis = 0;
3237         ndelnis = 0;
3238
3239         /* Construct the list of NIDs present in peer. */
3240         lpni = NULL;
3241         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
3242                 curnis[ncurnis++] = lpni->lpni_nid;
3243
3244         /* Check for NIDs in pbuf not present in curnis[].
3245          * Skip the first, which is loop-back.  Take second as
3246          * primary, unless a large primary is found.
3247          */
3248         ping_iter_first(&pi, pbuf, NULL);
3249         stp = ping_iter_next(&pi, &nid);
3250         if (stp)
3251                 primary = nid;
3252         want_large_primary = (pbuf->pb_info.pi_features &
3253                               LNET_PING_FEAT_PRIMARY_LARGE);
3254         for (; stp; stp = ping_iter_next(&pi, &nid)) {
3255                 for (j = 0; j < ncurnis; j++)
3256                         if (nid_same(&nid, &curnis[j]))
3257                                 break;
3258                 if (j == ncurnis) {
3259                         addnis[naddnis].ns_nid = nid;
3260                         addnis[naddnis].ns_status = *stp;
3261                         naddnis += 1;
3262                 }
3263                 if (want_large_primary && nid.nid_size) {
3264                         primary = nid;
3265                         want_large_primary = false;
3266                 }
3267         }
3268         /*
3269          * Check for NIDs in curnis[] not present in pbuf.
3270          * The nested loop starts at 1 to skip the loopback NID.
3271          *
3272          * But never add the loopback NID to delnis[]: if it is
3273          * present in curnis[] then this peer is for this node.
3274          */
3275         for (i = 0; i < ncurnis; i++) {
3276                 if (nid_is_lo0(&curnis[i]))
3277                         continue;
3278                 ping_iter_first(&pi, pbuf, NULL);
3279                 while ((stp = ping_iter_next(&pi, &nid)) != NULL) {
3280                         if (nid_same(&curnis[i], &nid)) {
3281                                 /*
3282                                  * update the information we cache for the
3283                                  * peer with the latest information we
3284                                  * received
3285                                  */
3286                                 lpni = lnet_peer_ni_find_locked(&curnis[i]);
3287                                 if (lpni) {
3288                                         old_st = lpni->lpni_ns_status;
3289                                         lpni->lpni_ns_status = *stp;
3290                                         if (old_st != lpni->lpni_ns_status)
3291                                                 handle_disc_lpni_health(lpni);
3292                                         lnet_peer_ni_decref_locked(lpni);
3293                                 }
3294                                 break;
3295                         }
3296                 }
3297                 if (!stp)
3298                         delnis[ndelnis++] = curnis[i];
3299         }
3300
3301         /*
3302          * If we get here and the discovery is disabled then we don't want
3303          * to add or delete any NIs. We just updated the ones we have some
3304          * information on, and call it a day
3305          */
3306         rc = 0;
3307         if (lnet_is_discovery_disabled(lp))
3308                 goto out;
3309
3310         for (i = 0; i < naddnis; i++) {
3311                 rc = lnet_peer_add_nid(lp, &addnis[i].ns_nid, flags);
3312                 if (rc) {
3313                         CERROR("Error adding NID %s to peer %s: %d\n",
3314                                libcfs_nidstr(&addnis[i].ns_nid),
3315                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3316                         if (rc == -ENOMEM)
3317                                 goto out;
3318                 }
3319                 lpni = lnet_peer_ni_find_locked(&addnis[i].ns_nid);
3320                 if (lpni) {
3321                         lpni->lpni_ns_status = addnis[i].ns_status;
3322                         handle_disc_lpni_health(lpni);
3323                         lnet_peer_ni_decref_locked(lpni);
3324                 }
3325         }
3326
3327         for (i = 0; i < ndelnis; i++) {
3328                 /*
3329                  * for routers it's okay to delete the primary_nid because
3330                  * the upper layers don't really rely on it. So if we're
3331                  * being told that the router changed its primary_nid
3332                  * then it's okay to delete it.
3333                  */
3334                 if (lp->lp_rtr_refcount > 0)
3335                         flags |= LNET_PEER_RTR_NI_FORCE_DEL;
3336                 rc = lnet_peer_del_nid(lp, &delnis[i], flags);
3337                 if (rc) {
3338                         CERROR("Error deleting NID %s from peer %s: %d\n",
3339                                libcfs_nidstr(&delnis[i]),
3340                                libcfs_nidstr(&lp->lp_primary_nid), rc);
3341                         if (rc == -ENOMEM)
3342                                 goto out;
3343                 }
3344         }
3345
3346         /* The peer net for the primary NID should be the first entry in the
3347          * peer's lp_peer_nets list, and the peer NI for the primary NID should
3348          * be the first entry in its peer net's lpn_peer_nis list.
3349          */
3350         find_primary(&nid, pbuf);
3351         lpni = lnet_peer_ni_find_locked(&nid);
3352         if (!lpni) {
3353                 CERROR("Internal error: Failed to lookup peer NI for primary NID: %s\n",
3354                        libcfs_nidstr(&nid));
3355                 goto out;
3356         }
3357
3358         lpn = lpni->lpni_peer_net;
3359         if (lpn->lpn_peer_nets.prev != &lp->lp_peer_nets)
3360                 list_move(&lpn->lpn_peer_nets, &lp->lp_peer_nets);
3361
3362         if (lpni->lpni_peer_nis.prev != &lpni->lpni_peer_net->lpn_peer_nis)
3363                 list_move(&lpni->lpni_peer_nis,
3364                           &lpni->lpni_peer_net->lpn_peer_nis);
3365
3366         lnet_peer_ni_decref_locked(lpni);
3367         /*
3368          * Errors other than -ENOMEM are due to peers having been
3369          * configured with DLC. Ignore these because DLC overrides
3370          * Discovery.
3371          */
3372         rc = 0;
3373 out:
3374         /* If this peer is a gateway, invoke the routing callback to update
3375          * the associated route status
3376          */
3377         if (lp->lp_rtr_refcount > 0)
3378                 lnet_router_discovery_ping_reply(lp, pbuf);
3379
3380         CFS_FREE_PTR_ARRAY(curnis, nnis);
3381         CFS_FREE_PTR_ARRAY(addnis, nnis);
3382         CFS_FREE_PTR_ARRAY(delnis, nnis);
3383         lnet_ping_buffer_decref(pbuf);
3384         CDEBUG(D_NET, "peer %s (%p): %d\n",
3385                libcfs_nidstr(&lp->lp_primary_nid), lp, rc);
3386
3387         if (rc) {
3388                 spin_lock(&lp->lp_lock);
3389                 lp->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
3390                 lp->lp_state |= LNET_PEER_FORCE_PING;
3391                 spin_unlock(&lp->lp_lock);
3392         }
3393         return rc;
3394 }
3395
3396 /*
3397  * The data in pbuf says lp is its primary peer, but the data was
3398  * received by a different peer. Try to update lp with the data.
3399  */
3400 static int
3401 lnet_peer_set_primary_data(struct lnet_peer *lp, struct lnet_ping_buffer *pbuf)
3402 {
3403         struct lnet_handle_md mdh;
3404
3405         /* Queue lp for discovery, and force it on the request queue. */
3406         lnet_net_lock(LNET_LOCK_EX);
3407         if (lnet_peer_queue_for_discovery(lp))
3408                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_request);
3409         lnet_net_unlock(LNET_LOCK_EX);
3410
3411         LNetInvalidateMDHandle(&mdh);
3412
3413         /*
3414          * Decide whether we can move the peer to the DATA_PRESENT state.
3415          *
3416          * We replace stale data for a multi-rail peer, repair PING_FAILED
3417          * status, and preempt FORCE_PING.
3418          *
3419          * If after that we have DATA_PRESENT, we merge it into this peer.
3420          */
3421         spin_lock(&lp->lp_lock);
3422         if (lp->lp_state & LNET_PEER_MULTI_RAIL) {
3423                 if (lp->lp_peer_seqno < LNET_PING_BUFFER_SEQNO(pbuf)) {
3424                         lp->lp_peer_seqno = LNET_PING_BUFFER_SEQNO(pbuf);
3425                 } else if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3426                         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3427                         lnet_ping_buffer_decref(pbuf);
3428                         pbuf = lp->lp_data;
3429                         lp->lp_data = NULL;
3430                 }
3431         }
3432         if (lp->lp_state & LNET_PEER_DATA_PRESENT) {
3433                 lnet_ping_buffer_decref(lp->lp_data);
3434                 lp->lp_data = NULL;
3435                 lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3436         }
3437         if (lp->lp_state & LNET_PEER_PING_FAILED) {
3438                 mdh = lp->lp_ping_mdh;
3439                 LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3440                 lp->lp_state &= ~LNET_PEER_PING_FAILED;
3441                 lp->lp_ping_error = 0;
3442         }
3443         if (lp->lp_state & LNET_PEER_FORCE_PING)
3444                 lp->lp_state &= ~LNET_PEER_FORCE_PING;
3445         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3446         spin_unlock(&lp->lp_lock);
3447
3448         if (!LNetMDHandleIsInvalid(mdh))
3449                 LNetMDUnlink(mdh);
3450
3451         if (pbuf)
3452                 return lnet_peer_merge_data(lp, pbuf);
3453
3454         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3455         return 0;
3456 }
3457
3458 static bool lnet_is_nid_in_ping_info(struct lnet_nid *nid,
3459                                      struct lnet_ping_buffer *pbuf)
3460 {
3461         struct lnet_ping_iter pi;
3462         struct lnet_nid pnid;
3463         u32 *st;
3464
3465         for (st = ping_iter_first(&pi, pbuf, &pnid);
3466              st;
3467              st = ping_iter_next(&pi, &pnid))
3468                 if (nid_same(nid, &pnid))
3469                         return true;
3470         return false;
3471 }
3472
3473 /* Delete a peer that has been marked for deletion. NB: when this peer was added
3474  * to the discovery queue a reference was taken that will prevent the peer from
3475  * actually being freed by this function. After this function exits the
3476  * discovery thread should call lnet_peer_discovery_complete() which will
3477  * drop that reference as well as wake any waiters that may also be holding a
3478  * ref on the peer
3479  */
3480 static int lnet_peer_deletion(struct lnet_peer *lp)
3481 __must_hold(&lp->lp_lock)
3482 {
3483         struct list_head rlist;
3484         struct lnet_route *route, *tmp;
3485         int sensitivity = lp->lp_health_sensitivity;
3486         int rc = 0;
3487
3488         INIT_LIST_HEAD(&rlist);
3489
3490         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
3491                libcfs_nidstr(&lp->lp_primary_nid), lp, lp->lp_state);
3492
3493         /* no-op if lnet_peer_del() has already been called on this peer */
3494         if (lp->lp_state & LNET_PEER_MARK_DELETED)
3495                 goto clear_discovering;
3496
3497         spin_unlock(&lp->lp_lock);
3498
3499         mutex_lock(&the_lnet.ln_api_mutex);
3500         if (the_lnet.ln_state != LNET_STATE_RUNNING ||
3501             the_lnet.ln_dc_state != LNET_DC_STATE_RUNNING) {
3502                 mutex_unlock(&the_lnet.ln_api_mutex);
3503                 spin_lock(&lp->lp_lock);
3504                 rc = -ESHUTDOWN;
3505                 goto clear_discovering;
3506         }
3507
3508         lnet_peer_cancel_discovery(lp);
3509         lnet_net_lock(LNET_LOCK_EX);
3510         list_for_each_entry_safe(route, tmp,
3511                                  &lp->lp_routes,
3512                                  lr_gwlist)
3513                 lnet_move_route(route, NULL, &rlist);
3514
3515         /* lnet_peer_del_locked() deletes all the peer NIs owned by this peer */
3516         rc = lnet_peer_del_locked(lp);
3517         if (rc)
3518                 CNETERR("Internal error: Unable to delete peer %s rc %d\n",
3519                         libcfs_nidstr(&lp->lp_primary_nid), rc);
3520
3521         lnet_net_unlock(LNET_LOCK_EX);
3522
3523         list_for_each_entry_safe(route, tmp,
3524                                  &rlist, lr_list) {
3525                 /* re-add these routes */
3526                 lnet_add_route(route->lr_net,
3527                                route->lr_hops,
3528                                &route->lr_nid,
3529                                route->lr_priority,
3530                                sensitivity);
3531                 LIBCFS_FREE(route, sizeof(*route));
3532         }
3533
3534         mutex_unlock(&the_lnet.ln_api_mutex);
3535
3536         spin_lock(&lp->lp_lock);
3537
3538         rc = 0;
3539
3540 clear_discovering:
3541         lp->lp_state &= ~(LNET_PEER_DISCOVERING | LNET_PEER_FORCE_PING |
3542                           LNET_PEER_FORCE_PUSH);
3543
3544         return rc;
3545 }
3546
3547 /*
3548  * Update a peer using the data received.
3549  */
3550 static int lnet_peer_data_present(struct lnet_peer *lp)
3551 __must_hold(&lp->lp_lock)
3552 {
3553         struct lnet_ping_buffer *pbuf;
3554         struct lnet_peer_ni *lpni;
3555         struct lnet_nid nid;
3556         unsigned int flags;
3557         int rc = 0;
3558
3559         pbuf = lp->lp_data;
3560         lp->lp_data = NULL;
3561         lp->lp_state &= ~LNET_PEER_DATA_PRESENT;
3562         lp->lp_state |= LNET_PEER_NIDS_UPTODATE;
3563         spin_unlock(&lp->lp_lock);
3564
3565         /*
3566          * Modifications of peer structures are done while holding the
3567          * ln_api_mutex. A global lock is required because we may be
3568          * modifying multiple peer structures, and a mutex greatly
3569          * simplifies memory management.
3570          *
3571          * The actual changes to the data structures must also protect
3572          * against concurrent lookups, for which the lnet_net_lock in
3573          * LNET_LOCK_EX mode is used.
3574          */
3575         mutex_lock(&the_lnet.ln_api_mutex);
3576         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
3577                 rc = -ESHUTDOWN;
3578                 goto out;
3579         }
3580
3581         /*
3582          * If this peer is not on the peer list then it is being torn
3583          * down, and our reference count may be all that is keeping it
3584          * alive. Don't do any work on it.
3585          */
3586         if (list_empty(&lp->lp_peer_list)) {
3587                 lnet_ping_buffer_decref(pbuf);
3588                 goto out;
3589         }
3590
3591         flags = LNET_PEER_DISCOVERED;
3592         if (pbuf->pb_info.pi_features & LNET_PING_FEAT_MULTI_RAIL)
3593                 flags |= LNET_PEER_MULTI_RAIL;
3594
3595         /*
3596          * Check whether the primary NID in the message matches the
3597          * primary NID of the peer. If it does, update the peer, if
3598          * it it does not, check whether there is already a peer with
3599          * that primary NID. If no such peer exists, try to update
3600          * the primary NID of the current peer (allowed if it was
3601          * created due to message traffic) and complete the update.
3602          * If the peer did exist, hand off the data to it.
3603          *
3604          * The peer for the loopback interface is a special case: this
3605          * is the peer for the local node, and we want to set its
3606          * primary NID to the correct value here. Moreover, this peer
3607          * can show up with only the loopback NID in the ping buffer.
3608          */
3609         if (!find_primary(&nid, pbuf)) {
3610                 lnet_ping_buffer_decref(pbuf);
3611                 goto out;
3612         }
3613         if (nid_is_lo0(&lp->lp_primary_nid)) {
3614                 rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3615                 if (rc)
3616                         lnet_ping_buffer_decref(pbuf);
3617                 else
3618                         rc = lnet_peer_merge_data(lp, pbuf);
3619         /*
3620          * if the primary nid of the peer is present in the ping info returned
3621          * from the peer, but it's not the local primary peer we have
3622          * cached and discovery is disabled, then we don't want to update
3623          * our local peer info, by adding or removing NIDs, we just want
3624          * to update the status of the nids that we currently have
3625          * recorded in that peer.
3626          */
3627         } else if (nid_same(&lp->lp_primary_nid, &nid) ||
3628                    (lnet_is_nid_in_ping_info(&lp->lp_primary_nid, pbuf) &&
3629                     lnet_is_discovery_disabled(lp))) {
3630                 rc = lnet_peer_merge_data(lp, pbuf);
3631         } else {
3632                 lpni = lnet_peer_ni_find_locked(&nid);
3633                 if (!lpni || lp == lpni->lpni_peer_net->lpn_peer) {
3634                         rc = lnet_peer_set_primary_nid(lp, &nid, flags);
3635                         if (rc) {
3636                                 CERROR("Primary NID error %s versus %s: %d\n",
3637                                        libcfs_nidstr(&lp->lp_primary_nid),
3638                                        libcfs_nidstr(&nid), rc);
3639                                 lnet_ping_buffer_decref(pbuf);
3640                         } else {
3641                                 rc = lnet_peer_merge_data(lp, pbuf);
3642                         }
3643                         if (lpni)
3644                                 lnet_peer_ni_decref_locked(lpni);
3645                 } else {
3646                         struct lnet_peer *new_lp;
3647                         new_lp = lpni->lpni_peer_net->lpn_peer;
3648                         /*
3649                          * if lp has discovery/MR enabled that means new_lp
3650                          * should have discovery/MR enabled as well, since
3651                          * it's the same peer, which we're about to merge
3652                          */
3653                         spin_lock(&lp->lp_lock);
3654                         spin_lock(&new_lp->lp_lock);
3655                         if (!(lp->lp_state & LNET_PEER_NO_DISCOVERY))
3656                                 new_lp->lp_state &= ~LNET_PEER_NO_DISCOVERY;
3657                         if (lp->lp_state & LNET_PEER_MULTI_RAIL)
3658                                 new_lp->lp_state |= LNET_PEER_MULTI_RAIL;
3659                         /* If we're processing a ping reply then we may be
3660                          * about to send a push to the peer that we ping'd.
3661                          * Since the ping reply that we're processing was
3662                          * received by lp, we need to set the discovery source
3663                          * NID for new_lp to the NID stored in lp.
3664                          */
3665                         if (!LNET_NID_IS_ANY(&lp->lp_disc_src_nid)) {
3666                                 new_lp->lp_disc_src_nid = lp->lp_disc_src_nid;
3667                                 new_lp->lp_disc_dst_nid = lp->lp_disc_dst_nid;
3668                         }
3669                         spin_unlock(&new_lp->lp_lock);
3670                         spin_unlock(&lp->lp_lock);
3671
3672                         rc = lnet_peer_set_primary_data(new_lp, pbuf);
3673                         lnet_consolidate_routes_locked(lp, new_lp);
3674                         lnet_peer_ni_decref_locked(lpni);
3675                 }
3676         }
3677 out:
3678         CDEBUG(D_NET, "peer %s(%p): %d. state = 0x%x\n",
3679                libcfs_nidstr(&lp->lp_primary_nid), lp, rc,
3680                lp->lp_state);
3681         mutex_unlock(&the_lnet.ln_api_mutex);
3682
3683         spin_lock(&lp->lp_lock);
3684         /* Tell discovery to re-check the peer immediately. */
3685         if (!rc)
3686                 rc = LNET_REDISCOVER_PEER;
3687         return rc;
3688 }
3689
3690 /*
3691  * A ping failed. Clear the PING_FAILED state and set the
3692  * FORCE_PING state, to ensure a retry even if discovery is
3693  * disabled. This avoids being left with incorrect state.
3694  */
3695 static int lnet_peer_ping_failed(struct lnet_peer *lp)
3696 __must_hold(&lp->lp_lock)
3697 {
3698         struct lnet_handle_md mdh;
3699         int rc;
3700
3701         mdh = lp->lp_ping_mdh;
3702         LNetInvalidateMDHandle(&lp->lp_ping_mdh);
3703         lp->lp_state &= ~LNET_PEER_PING_FAILED;
3704         lp->lp_state |= LNET_PEER_FORCE_PING;
3705         rc = lp->lp_ping_error;
3706         lp->lp_ping_error = 0;
3707         spin_unlock(&lp->lp_lock);
3708
3709         if (!LNetMDHandleIsInvalid(mdh))
3710                 LNetMDUnlink(mdh);
3711
3712         CDEBUG(D_NET, "peer %s:%d\n",
3713                libcfs_nidstr(&lp->lp_primary_nid), rc);
3714
3715         spin_lock(&lp->lp_lock);
3716         return rc ? rc : LNET_REDISCOVER_PEER;
3717 }
3718
3719 /* Active side of ping. */
3720 static int lnet_peer_send_ping(struct lnet_peer *lp)
3721 __must_hold(&lp->lp_lock)
3722 {
3723         int bytes;
3724         int rc;
3725         int cpt;
3726
3727         lp->lp_state |= LNET_PEER_PING_SENT;
3728         lp->lp_state &= ~LNET_PEER_FORCE_PING;
3729         spin_unlock(&lp->lp_lock);
3730
3731         cpt = lnet_net_lock_current();
3732         /* Refcount for MD. */
3733         lnet_peer_addref_locked(lp);
3734         lnet_net_unlock(cpt);
3735
3736         bytes = max_t(int, lp->lp_data_bytes, LNET_PING_INFO_MIN_SIZE);
3737
3738         rc = lnet_send_ping(&lp->lp_primary_nid, &lp->lp_ping_mdh, bytes, lp,
3739                             the_lnet.ln_dc_handler, false);
3740         /* if LNetMDBind in lnet_send_ping fails we need to decrement the
3741          * refcount on the peer, otherwise LNetMDUnlink will be called
3742          * which will eventually do that.
3743          */
3744         if (rc > 0) {
3745                 lnet_net_lock(cpt);
3746                 lnet_peer_decref_locked(lp);
3747                 lnet_net_unlock(cpt);
3748                 rc = -rc; /* change the rc to negative value */
3749                 goto fail_error;
3750         } else if (rc < 0) {
3751                 goto fail_error;
3752         }
3753
3754         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3755
3756         spin_lock(&lp->lp_lock);
3757         return 0;
3758
3759 fail_error:
3760         CDEBUG(D_NET, "peer %s: %d\n", libcfs_nidstr(&lp->lp_primary_nid), rc);
3761         /*
3762          * The errors that get us here are considered hard errors and
3763          * cause Discovery to terminate. So we clear PING_SENT, but do
3764          * not set either PING_FAILED or FORCE_PING. In fact we need
3765          * to clear PING_FAILED, because the unlink event handler will
3766          * have set it if we called LNetMDUnlink() above.
3767          */
3768         spin_lock(&lp->lp_lock);
3769         lp->lp_state &= ~(LNET_PEER_PING_SENT | LNET_PEER_PING_FAILED);
3770         return rc;
3771 }
3772
3773 /*
3774  * This function exists because you cannot call LNetMDUnlink() from an
3775  * event handler.
3776  */
3777 static int lnet_peer_push_failed(struct lnet_peer *lp)
3778 __must_hold(&lp->lp_lock)
3779 {
3780         struct lnet_handle_md mdh;
3781         int rc;
3782
3783         mdh = lp->lp_push_mdh;
3784         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3785         lp->lp_state &= ~LNET_PEER_PUSH_FAILED;
3786         rc = lp->lp_push_error;
3787         lp->lp_push_error = 0;
3788         spin_unlock(&lp->lp_lock);
3789
3790         if (!LNetMDHandleIsInvalid(mdh))
3791                 LNetMDUnlink(mdh);
3792
3793         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3794         spin_lock(&lp->lp_lock);
3795         return rc ? rc : LNET_REDISCOVER_PEER;
3796 }
3797
3798 /*
3799  * Mark the peer as discovered.
3800  */
3801 static int lnet_peer_discovered(struct lnet_peer *lp)
3802 __must_hold(&lp->lp_lock)
3803 {
3804         lp->lp_state |= LNET_PEER_DISCOVERED;
3805         lp->lp_state &= ~(LNET_PEER_DISCOVERING |
3806                           LNET_PEER_REDISCOVER);
3807
3808         lp->lp_dc_error = 0;
3809
3810         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3811
3812         return 0;
3813 }
3814
3815 /* Active side of push. */
3816 static int lnet_peer_send_push(struct lnet_peer *lp)
3817 __must_hold(&lp->lp_lock)
3818 {
3819         struct lnet_ping_buffer *pbuf;
3820         struct lnet_processid id;
3821         struct lnet_md md;
3822         int cpt;
3823         int rc;
3824
3825         /* Don't push to a non-multi-rail peer. */
3826         if (!(lp->lp_state & LNET_PEER_MULTI_RAIL)) {
3827                 lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3828                 /* if peer's NIDs are uptodate then peer is discovered */
3829                 if (lp->lp_state & LNET_PEER_NIDS_UPTODATE) {
3830                         rc = lnet_peer_discovered(lp);
3831                         return rc;
3832                 }
3833
3834                 return 0;
3835         }
3836
3837         lp->lp_state |= LNET_PEER_PUSH_SENT;
3838         lp->lp_state &= ~LNET_PEER_FORCE_PUSH;
3839         spin_unlock(&lp->lp_lock);
3840
3841         cpt = lnet_net_lock_current();
3842         pbuf = the_lnet.ln_ping_target;
3843         lnet_ping_buffer_addref(pbuf);
3844         lnet_net_unlock(cpt);
3845
3846         /* Push source MD */
3847         md.start     = &pbuf->pb_info;
3848         md.length    = pbuf->pb_nbytes;
3849         md.threshold = 2; /* Put/Ack */
3850         md.max_size  = 0;
3851         md.options   = LNET_MD_TRACK_RESPONSE;
3852         md.handler   = the_lnet.ln_dc_handler;
3853         md.user_ptr  = lp;
3854
3855         rc = LNetMDBind(&md, LNET_UNLINK, &lp->lp_push_mdh);
3856         if (rc) {
3857                 lnet_ping_buffer_decref(pbuf);
3858                 CERROR("Can't bind push source MD: %d\n", rc);
3859                 goto fail_error;
3860         }
3861
3862         cpt = lnet_net_lock_current();
3863         /* Refcount for MD. */
3864         lnet_peer_addref_locked(lp);
3865         id.pid = LNET_PID_LUSTRE;
3866         if (!LNET_NID_IS_ANY(&lp->lp_disc_dst_nid))
3867                 id.nid = lp->lp_disc_dst_nid;
3868         else
3869                 id.nid = lp->lp_primary_nid;
3870         lnet_net_unlock(cpt);
3871
3872         rc = LNetPut(&lp->lp_disc_src_nid, lp->lp_push_mdh,
3873                      LNET_ACK_REQ, &id, LNET_RESERVED_PORTAL,
3874                      LNET_PROTO_PING_MATCHBITS, 0, 0);
3875
3876         /*
3877          * reset the discovery nid. There is no need to restrict sending
3878          * from that source, if we call lnet_push_update_to_peers(). It'll
3879          * get set to a specific NID, if we initiate discovery from the
3880          * scratch
3881          */
3882         lp->lp_disc_src_nid = LNET_ANY_NID;
3883         lp->lp_disc_dst_nid = LNET_ANY_NID;
3884
3885         if (rc)
3886                 goto fail_unlink;
3887
3888         CDEBUG(D_NET, "peer %s\n", libcfs_nidstr(&lp->lp_primary_nid));
3889
3890         spin_lock(&lp->lp_lock);
3891         return 0;
3892
3893 fail_unlink:
3894         LNetMDUnlink(lp->lp_push_mdh);
3895         LNetInvalidateMDHandle(&lp->lp_push_mdh);
3896 fail_error:
3897         CDEBUG(D_NET, "peer %s(%p): %d\n", libcfs_nidstr(&lp->lp_primary_nid),
3898                lp, rc);
3899         /*
3900          * The errors that get us here are considered hard errors and
3901          * cause Discovery to terminate. So we clear PUSH_SENT, but do
3902          * not set PUSH_FAILED. In fact we need to clear PUSH_FAILED,
3903          * because the unlink event handler will have set it if we
3904          * called LNetMDUnlink() above.
3905          */
3906         spin_lock(&lp->lp_lock);
3907         lp->lp_state &= ~(LNET_PEER_PUSH_SENT | LNET_PEER_PUSH_FAILED);
3908         return rc;
3909 }
3910
3911 /*
3912  * Wait for work to be queued or some other change that must be
3913  * attended to. Returns non-zero if the discovery thread should shut
3914  * down.
3915  */
3916 static int lnet_peer_discovery_wait_for_work(void)
3917 {
3918         int cpt;
3919         int rc = 0;
3920
3921         DEFINE_WAIT(wait);
3922
3923         cpt = lnet_net_lock_current();
3924         for (;;) {
3925                 prepare_to_wait(&the_lnet.ln_dc_waitq, &wait,
3926                                 TASK_INTERRUPTIBLE);
3927                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3928                         break;
3929                 if (lnet_push_target_resize_needed() ||
3930                     the_lnet.ln_push_target->pb_needs_post)
3931                         break;
3932                 if (!list_empty(&the_lnet.ln_dc_request))
3933                         break;
3934                 if (!list_empty(&the_lnet.ln_msg_resend))
3935                         break;
3936                 lnet_net_unlock(cpt);
3937
3938                 /*
3939                  * wakeup max every second to check if there are peers that
3940                  * have been stuck on the working queue for greater than
3941                  * the peer timeout.
3942                  */
3943                 schedule_timeout(cfs_time_seconds(1));
3944                 finish_wait(&the_lnet.ln_dc_waitq, &wait);
3945                 cpt = lnet_net_lock_current();
3946         }
3947         finish_wait(&the_lnet.ln_dc_waitq, &wait);
3948
3949         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
3950                 rc = -ESHUTDOWN;
3951
3952         lnet_net_unlock(cpt);
3953
3954         CDEBUG(D_NET, "woken: %d\n", rc);
3955
3956         return rc;
3957 }
3958
3959 /*
3960  * Messages that were pending on a destroyed peer will be put on a global
3961  * resend list. The message resend list will be checked by
3962  * the discovery thread when it wakes up, and will resend messages. These
3963  * messages can still be sendable in the case the lpni which was the initial
3964  * cause of the message re-queue was transfered to another peer.
3965  *
3966  * It is possible that LNet could be shutdown while we're iterating
3967  * through the list. lnet_shudown_lndnets() will attempt to access the
3968  * resend list, but will have to wait until the spinlock is released, by
3969  * which time there shouldn't be any more messages on the resend list.
3970  * During shutdown lnet_send() will fail and lnet_finalize() will be called
3971  * for the messages so they can be released. The other case is that
3972  * lnet_shudown_lndnets() can finalize all the messages before this
3973  * function can visit the resend list, in which case this function will be
3974  * a no-op.
3975  */
3976 static void lnet_resend_msgs(void)
3977 {
3978         struct lnet_msg *msg, *tmp;
3979         LIST_HEAD(resend);
3980         int rc;
3981
3982         spin_lock(&the_lnet.ln_msg_resend_lock);
3983         list_splice(&the_lnet.ln_msg_resend, &resend);
3984         spin_unlock(&the_lnet.ln_msg_resend_lock);
3985
3986         list_for_each_entry_safe(msg, tmp, &resend, msg_list) {
3987                 list_del_init(&msg->msg_list);
3988                 rc = lnet_send(&msg->msg_src_nid_param, msg,
3989                                &msg->msg_rtr_nid_param);
3990                 if (rc < 0) {
3991                         CNETERR("Error sending %s to %s: %d\n",
3992                                lnet_msgtyp2str(msg->msg_type),
3993                                libcfs_idstr(&msg->msg_target), rc);
3994                         lnet_finalize(msg, rc);
3995                 }
3996         }
3997 }
3998
3999 /* The discovery thread. */
4000 static int lnet_peer_discovery(void *arg)
4001 {
4002         struct lnet_peer *lp;
4003         int rc;
4004
4005         wait_for_completion(&the_lnet.ln_started);
4006
4007         CDEBUG(D_NET, "started\n");
4008
4009         for (;;) {
4010                 if (lnet_peer_discovery_wait_for_work())
4011                         break;
4012
4013                 if (lnet_push_target_resize_needed())
4014                         lnet_push_target_resize();
4015                 else if (the_lnet.ln_push_target->pb_needs_post)
4016                         lnet_push_target_post(the_lnet.ln_push_target,
4017                                               &the_lnet.ln_push_target_md);
4018
4019                 lnet_resend_msgs();
4020
4021                 lnet_net_lock(LNET_LOCK_EX);
4022                 if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING) {
4023                         lnet_net_unlock(LNET_LOCK_EX);
4024                         break;
4025                 }
4026
4027                 /*
4028                  * Process all incoming discovery work requests.  When
4029                  * discovery must wait on a peer to change state, it
4030                  * is added to the tail of the ln_dc_working queue. A
4031                  * timestamp keeps track of when the peer was added,
4032                  * so we can time out discovery requests that take too
4033                  * long.
4034                  */
4035                 while (!list_empty(&the_lnet.ln_dc_request)) {
4036                         lp = list_first_entry(&the_lnet.ln_dc_request,
4037                                               struct lnet_peer, lp_dc_list);
4038                         list_move(&lp->lp_dc_list, &the_lnet.ln_dc_working);
4039                         /*
4040                          * set the time the peer was put on the dc_working
4041                          * queue. It shouldn't remain on the queue
4042                          * forever, in case the GET message (for ping)
4043                          * doesn't get a REPLY or the PUT message (for
4044                          * push) doesn't get an ACK.
4045                          */
4046                         lp->lp_last_queued = ktime_get_real_seconds();
4047                         lnet_net_unlock(LNET_LOCK_EX);
4048
4049                         if (lnet_push_target_resize_needed())
4050                                 lnet_push_target_resize();
4051                         else if (the_lnet.ln_push_target->pb_needs_post)
4052                                 lnet_push_target_post(the_lnet.ln_push_target,
4053                                                       &the_lnet.ln_push_target_md);
4054
4055                         /*
4056                          * Select an action depending on the state of
4057                          * the peer and whether discovery is disabled.
4058                          * The check whether discovery is disabled is
4059                          * done after the code that handles processing
4060                          * for arrived data, cleanup for failures, and
4061                          * forcing a Ping or Push.
4062                          */
4063                         spin_lock(&lp->lp_lock);
4064                         CDEBUG(D_NET, "peer %s(%p) state %#x\n",
4065                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4066                                 lp->lp_state);
4067                         if (lp->lp_state & (LNET_PEER_MARK_DELETION |
4068                                             LNET_PEER_MARK_DELETED))
4069                                 rc = lnet_peer_deletion(lp);
4070                         else if (lp->lp_state & LNET_PEER_DATA_PRESENT)
4071                                 rc = lnet_peer_data_present(lp);
4072                         else if (lp->lp_state & LNET_PEER_PING_FAILED)
4073                                 rc = lnet_peer_ping_failed(lp);
4074                         else if (lp->lp_state & LNET_PEER_PUSH_FAILED)
4075                                 rc = lnet_peer_push_failed(lp);
4076                         else if (lp->lp_state & LNET_PEER_FORCE_PING)
4077                                 rc = lnet_peer_send_ping(lp);
4078                         else if (lp->lp_state & LNET_PEER_FORCE_PUSH)
4079                                 rc = lnet_peer_send_push(lp);
4080                         else if (!(lp->lp_state & LNET_PEER_NIDS_UPTODATE))
4081                                 rc = lnet_peer_send_ping(lp);
4082                         else if (lnet_peer_needs_push(lp))
4083                                 rc = lnet_peer_send_push(lp);
4084                         else
4085                                 rc = lnet_peer_discovered(lp);
4086                         CDEBUG(D_NET, "peer %s(%p) state %#x rc %d\n",
4087                                 libcfs_nidstr(&lp->lp_primary_nid), lp,
4088                                 lp->lp_state, rc);
4089
4090                         if (rc == LNET_REDISCOVER_PEER) {
4091                                 spin_unlock(&lp->lp_lock);
4092                                 lnet_net_lock(LNET_LOCK_EX);
4093                                 list_move(&lp->lp_dc_list,
4094                                           &the_lnet.ln_dc_request);
4095                         } else if (rc ||
4096                                    !(lp->lp_state & LNET_PEER_DISCOVERING)) {
4097                                 spin_unlock(&lp->lp_lock);
4098                                 lnet_net_lock(LNET_LOCK_EX);
4099                                 lnet_peer_discovery_complete(lp, rc);
4100                         } else {
4101                                 spin_unlock(&lp->lp_lock);
4102                                 lnet_net_lock(LNET_LOCK_EX);
4103                         }
4104
4105                         if (the_lnet.ln_dc_state == LNET_DC_STATE_STOPPING)
4106                                 break;
4107
4108                 }
4109
4110                 lnet_net_unlock(LNET_LOCK_EX);
4111         }
4112
4113         CDEBUG(D_NET, "stopping\n");
4114         /*
4115          * Clean up before telling lnet_peer_discovery_stop() that
4116          * we're done. Use wake_up() below to somewhat reduce the
4117          * size of the thundering herd if there are multiple threads
4118          * waiting on discovery of a single peer.
4119          */
4120
4121         /* Queue cleanup 1: stop all pending pings and pushes. */
4122         lnet_net_lock(LNET_LOCK_EX);
4123         while (!list_empty(&the_lnet.ln_dc_working)) {
4124                 lp = list_first_entry(&the_lnet.ln_dc_working,
4125                                       struct lnet_peer, lp_dc_list);
4126                 list_move(&lp->lp_dc_list, &the_lnet.ln_dc_expired);
4127                 lnet_net_unlock(LNET_LOCK_EX);
4128                 lnet_peer_cancel_discovery(lp);
4129                 lnet_net_lock(LNET_LOCK_EX);
4130         }
4131         lnet_net_unlock(LNET_LOCK_EX);
4132
4133         /* Queue cleanup 2: wait for the expired queue to clear. */
4134         while (!list_empty(&the_lnet.ln_dc_expired))
4135                 schedule_timeout_uninterruptible(cfs_time_seconds(1));
4136
4137         /* Queue cleanup 3: clear the request queue. */
4138         lnet_net_lock(LNET_LOCK_EX);
4139         while (!list_empty(&the_lnet.ln_dc_request)) {
4140                 lp = list_first_entry(&the_lnet.ln_dc_request,
4141                                       struct lnet_peer, lp_dc_list);
4142                 lnet_peer_discovery_complete(lp, -ESHUTDOWN);
4143         }
4144         lnet_net_unlock(LNET_LOCK_EX);
4145
4146         lnet_assert_handler_unused(the_lnet.ln_dc_handler);
4147         the_lnet.ln_dc_handler = NULL;
4148
4149         the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4150         wake_up(&the_lnet.ln_dc_waitq);
4151
4152         CDEBUG(D_NET, "stopped\n");
4153
4154         return 0;
4155 }
4156
4157 /* ln_api_mutex is held on entry. */
4158 int lnet_peer_discovery_start(void)
4159 {
4160         struct task_struct *task;
4161         int rc = 0;
4162
4163         if (the_lnet.ln_dc_state != LNET_DC_STATE_SHUTDOWN)
4164                 return -EALREADY;
4165
4166         the_lnet.ln_dc_handler = lnet_discovery_event_handler;
4167         the_lnet.ln_dc_state = LNET_DC_STATE_RUNNING;
4168         task = kthread_run(lnet_peer_discovery, NULL, "lnet_discovery");
4169         if (IS_ERR(task)) {
4170                 rc = PTR_ERR(task);
4171                 CERROR("Can't start peer discovery thread: %d\n", rc);
4172
4173                 the_lnet.ln_dc_handler = NULL;
4174
4175                 the_lnet.ln_dc_state = LNET_DC_STATE_SHUTDOWN;
4176         }
4177
4178         CDEBUG(D_NET, "discovery start: %d\n", rc);
4179
4180         return rc;
4181 }
4182
4183 /* ln_api_mutex is held on entry. */
4184 void lnet_peer_discovery_stop(void)
4185 {
4186         if (the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN)
4187                 return;
4188
4189         LASSERT(the_lnet.ln_dc_state == LNET_DC_STATE_RUNNING);
4190         the_lnet.ln_dc_state = LNET_DC_STATE_STOPPING;
4191
4192         /* In the LNetNIInit() path we may be stopping discovery before it
4193          * entered its work loop
4194          */
4195         if (!completion_done(&the_lnet.ln_started))
4196                 complete(&the_lnet.ln_started);
4197         else
4198                 wake_up(&the_lnet.ln_dc_waitq);
4199
4200         mutex_unlock(&the_lnet.ln_api_mutex);
4201         wait_event(the_lnet.ln_dc_waitq,
4202                    the_lnet.ln_dc_state == LNET_DC_STATE_SHUTDOWN);
4203         mutex_lock(&the_lnet.ln_api_mutex);
4204
4205         LASSERT(list_empty(&the_lnet.ln_dc_request));
4206         LASSERT(list_empty(&the_lnet.ln_dc_working));
4207         LASSERT(list_empty(&the_lnet.ln_dc_expired));
4208
4209         CDEBUG(D_NET, "discovery stopped\n");
4210 }
4211
4212 /* Debugging */
4213
4214 void
4215 lnet_debug_peer(struct lnet_nid *nid)
4216 {
4217         char                    *aliveness = "NA";
4218         struct lnet_peer_ni     *lp;
4219         int                     cpt;
4220
4221         cpt = lnet_nid2cpt(nid, NULL);
4222         lnet_net_lock(cpt);
4223
4224         lp = lnet_peerni_by_nid_locked(nid, NULL, cpt);
4225         if (IS_ERR(lp)) {
4226                 lnet_net_unlock(cpt);
4227                 CDEBUG(D_WARNING, "No peer %s\n", libcfs_nidstr(nid));
4228                 return;
4229         }
4230
4231         if (lnet_isrouter(lp) || lnet_peer_aliveness_enabled(lp))
4232                 aliveness = (lnet_is_peer_ni_alive(lp)) ? "up" : "down";
4233
4234         CDEBUG(D_WARNING, "%-24s %4d %5s %5d %5d %5d %5d %5d %ld\n",
4235                libcfs_nidstr(&lp->lpni_nid), kref_read(&lp->lpni_kref),
4236                aliveness, lp->lpni_net->net_tunables.lct_peer_tx_credits,
4237                lp->lpni_rtrcredits, lp->lpni_minrtrcredits,
4238                lp->lpni_txcredits, lp->lpni_mintxcredits, lp->lpni_txqnob);
4239
4240         lnet_peer_ni_decref_locked(lp);
4241
4242         lnet_net_unlock(cpt);
4243 }
4244
4245 /* Gathering information for userspace. */
4246
4247 int lnet_get_peer_ni_info(__u32 peer_index, __u64 *nid,
4248                           char aliveness[LNET_MAX_STR_LEN],
4249                           __u32 *cpt_iter, __u32 *refcount,
4250                           __u32 *ni_peer_tx_credits, __u32 *peer_tx_credits,
4251                           __u32 *peer_rtr_credits, __u32 *peer_min_rtr_credits,
4252                           __u32 *peer_tx_qnob)
4253 {
4254         struct lnet_peer_table          *peer_table;
4255         struct lnet_peer_ni             *lp;
4256         int                             j;
4257         int                             lncpt;
4258         bool                            found = false;
4259
4260         /* get the number of CPTs */
4261         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4262
4263         /* if the cpt number to be examined is >= the number of cpts in
4264          * the system then indicate that there are no more cpts to examin
4265          */
4266         if (*cpt_iter >= lncpt)
4267                 return -ENOENT;
4268
4269         /* get the current table */
4270         peer_table = the_lnet.ln_peer_tables[*cpt_iter];
4271         /* if the ptable is NULL then there are no more cpts to examine */
4272         if (peer_table == NULL)
4273                 return -ENOENT;
4274
4275         lnet_net_lock(*cpt_iter);
4276
4277         for (j = 0; j < LNET_PEER_HASH_SIZE && !found; j++) {
4278                 struct list_head *peers = &peer_table->pt_hash[j];
4279
4280                 list_for_each_entry(lp, peers, lpni_hashlist) {
4281                         if (!nid_is_nid4(&lp->lpni_nid))
4282                                 continue;
4283                         if (peer_index-- > 0)
4284                                 continue;
4285
4286                         snprintf(aliveness, LNET_MAX_STR_LEN, "NA");
4287                         if (lnet_isrouter(lp) ||
4288                                 lnet_peer_aliveness_enabled(lp))
4289                                 snprintf(aliveness, LNET_MAX_STR_LEN,
4290                                          lnet_is_peer_ni_alive(lp) ? "up" : "down");
4291
4292                         *nid = lnet_nid_to_nid4(&lp->lpni_nid);
4293                         *refcount = kref_read(&lp->lpni_kref);
4294                         *ni_peer_tx_credits =
4295                                 lp->lpni_net->net_tunables.lct_peer_tx_credits;
4296                         *peer_tx_credits = lp->lpni_txcredits;
4297                         *peer_rtr_credits = lp->lpni_rtrcredits;
4298                         *peer_min_rtr_credits = lp->lpni_mintxcredits;
4299                         *peer_tx_qnob = lp->lpni_txqnob;
4300
4301                         found = true;
4302                 }
4303
4304         }
4305         lnet_net_unlock(*cpt_iter);
4306
4307         *cpt_iter = lncpt;
4308
4309         return found ? 0 : -ENOENT;
4310 }
4311
4312 /* ln_api_mutex is held, which keeps the peer list stable */
4313 int lnet_get_peer_info(struct lnet_ioctl_peer_cfg *cfg, void __user *bulk)
4314 {
4315         struct lnet_ioctl_element_stats *lpni_stats;
4316         struct lnet_ioctl_element_msg_stats *lpni_msg_stats;
4317         struct lnet_ioctl_peer_ni_hstats *lpni_hstats;
4318         struct lnet_peer_ni_credit_info *lpni_info;
4319         struct lnet_peer_ni *lpni;
4320         struct lnet_peer *lp;
4321         lnet_nid_t nid4;
4322         struct lnet_nid nid;
4323         __u32 size;
4324         int rc;
4325
4326         lnet_nid4_to_nid(cfg->prcfg_prim_nid, &nid);
4327         lp = lnet_find_peer(&nid);
4328         if (!lp) {
4329                 rc = -ENOENT;
4330                 goto out;
4331         }
4332
4333         size = sizeof(nid4) + sizeof(*lpni_info) + sizeof(*lpni_stats)
4334                 + sizeof(*lpni_msg_stats) + sizeof(*lpni_hstats);
4335         size *= lp->lp_nnis;
4336         if (size > cfg->prcfg_size) {
4337                 cfg->prcfg_size = size;
4338                 rc = -E2BIG;
4339                 goto out_lp_decref;
4340         }
4341
4342         cfg->prcfg_prim_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4343         cfg->prcfg_mr = lnet_peer_is_multi_rail(lp);
4344         cfg->prcfg_cfg_nid = lnet_nid_to_nid4(&lp->lp_primary_nid);
4345         cfg->prcfg_count = lp->lp_nnis;
4346         cfg->prcfg_size = size;
4347         cfg->prcfg_state = lp->lp_state;
4348
4349         /* Allocate helper buffers. */
4350         rc = -ENOMEM;
4351         LIBCFS_ALLOC(lpni_info, sizeof(*lpni_info));
4352         if (!lpni_info)
4353                 goto out_lp_decref;
4354         LIBCFS_ALLOC(lpni_stats, sizeof(*lpni_stats));
4355         if (!lpni_stats)
4356                 goto out_free_info;
4357         LIBCFS_ALLOC(lpni_msg_stats, sizeof(*lpni_msg_stats));
4358         if (!lpni_msg_stats)
4359                 goto out_free_stats;
4360         LIBCFS_ALLOC(lpni_hstats, sizeof(*lpni_hstats));
4361         if (!lpni_hstats)
4362                 goto out_free_msg_stats;
4363
4364
4365         lpni = NULL;
4366         rc = -EFAULT;
4367         while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL) {
4368                 if (!nid_is_nid4(&lpni->lpni_nid))
4369                         continue;
4370                 nid4 = lnet_nid_to_nid4(&lpni->lpni_nid);
4371                 if (copy_to_user(bulk, &nid4, sizeof(nid4)))
4372                         goto out_free_hstats;
4373                 bulk += sizeof(nid4);
4374
4375                 memset(lpni_info, 0, sizeof(*lpni_info));
4376                 snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN, "NA");
4377                 if (lnet_isrouter(lpni) ||
4378                         lnet_peer_aliveness_enabled(lpni))
4379                         snprintf(lpni_info->cr_aliveness, LNET_MAX_STR_LEN,
4380                                 lnet_is_peer_ni_alive(lpni) ? "up" : "down");
4381
4382                 lpni_info->cr_refcount = kref_read(&lpni->lpni_kref);
4383                 lpni_info->cr_ni_peer_tx_credits = (lpni->lpni_net != NULL) ?
4384                         lpni->lpni_net->net_tunables.lct_peer_tx_credits : 0;
4385                 lpni_info->cr_peer_tx_credits = lpni->lpni_txcredits;
4386                 lpni_info->cr_peer_rtr_credits = lpni->lpni_rtrcredits;
4387                 lpni_info->cr_peer_min_rtr_credits = lpni->lpni_minrtrcredits;
4388                 lpni_info->cr_peer_min_tx_credits = lpni->lpni_mintxcredits;
4389                 lpni_info->cr_peer_tx_qnob = lpni->lpni_txqnob;
4390                 if (copy_to_user(bulk, lpni_info, sizeof(*lpni_info)))
4391                         goto out_free_hstats;
4392                 bulk += sizeof(*lpni_info);
4393
4394                 memset(lpni_stats, 0, sizeof(*lpni_stats));
4395                 lpni_stats->iel_send_count = lnet_sum_stats(&lpni->lpni_stats,
4396                                                             LNET_STATS_TYPE_SEND);
4397                 lpni_stats->iel_recv_count = lnet_sum_stats(&lpni->lpni_stats,
4398                                                             LNET_STATS_TYPE_RECV);
4399                 lpni_stats->iel_drop_count = lnet_sum_stats(&lpni->lpni_stats,
4400                                                             LNET_STATS_TYPE_DROP);
4401                 if (copy_to_user(bulk, lpni_stats, sizeof(*lpni_stats)))
4402                         goto out_free_hstats;
4403                 bulk += sizeof(*lpni_stats);
4404                 lnet_usr_translate_stats(lpni_msg_stats, &lpni->lpni_stats);
4405                 if (copy_to_user(bulk, lpni_msg_stats, sizeof(*lpni_msg_stats)))
4406                         goto out_free_hstats;
4407                 bulk += sizeof(*lpni_msg_stats);
4408                 lpni_hstats->hlpni_network_timeout =
4409                   atomic_read(&lpni->lpni_hstats.hlt_network_timeout);
4410                 lpni_hstats->hlpni_remote_dropped =
4411                   atomic_read(&lpni->lpni_hstats.hlt_remote_dropped);
4412                 lpni_hstats->hlpni_remote_timeout =
4413                   atomic_read(&lpni->lpni_hstats.hlt_remote_timeout);
4414                 lpni_hstats->hlpni_remote_error =
4415                   atomic_read(&lpni->lpni_hstats.hlt_remote_error);
4416                 lpni_hstats->hlpni_health_value =
4417                   atomic_read(&lpni->lpni_healthv);
4418                 lpni_hstats->hlpni_ping_count = lpni->lpni_ping_count;
4419                 lpni_hstats->hlpni_next_ping = lpni->lpni_next_ping;
4420                 if (copy_to_user(bulk, lpni_hstats, sizeof(*lpni_hstats)))
4421                         goto out_free_hstats;
4422                 bulk += sizeof(*lpni_hstats);
4423         }
4424         rc = 0;
4425
4426 out_free_hstats:
4427         LIBCFS_FREE(lpni_hstats, sizeof(*lpni_hstats));
4428 out_free_msg_stats:
4429         LIBCFS_FREE(lpni_msg_stats, sizeof(*lpni_msg_stats));
4430 out_free_stats:
4431         LIBCFS_FREE(lpni_stats, sizeof(*lpni_stats));
4432 out_free_info:
4433         LIBCFS_FREE(lpni_info, sizeof(*lpni_info));
4434 out_lp_decref:
4435         lnet_peer_decref_locked(lp);
4436 out:
4437         return rc;
4438 }
4439
4440 /* must hold net_lock/0 */
4441 void
4442 lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
4443                                      struct list_head *recovery_queue,
4444                                      time64_t now)
4445 {
4446         /* the mt could've shutdown and cleaned up the queues */
4447         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
4448                 return;
4449
4450         if (!list_empty(&lpni->lpni_recovery))
4451                 return;
4452
4453         if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
4454                 return;
4455
4456         if (!lpni->lpni_last_alive) {
4457                 CDEBUG(D_NET,
4458                        "lpni %s(%p) not eligible for recovery last alive %lld\n",
4459                        libcfs_nidstr(&lpni->lpni_nid), lpni,
4460                        lpni->lpni_last_alive);
4461                 return;
4462         }
4463
4464         if (lnet_recovery_limit &&
4465             now > lpni->lpni_last_alive + lnet_recovery_limit) {
4466                 CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
4467                        libcfs_nidstr(&lpni->lpni_nid),
4468                        lpni->lpni_last_alive);
4469                 /* Reset the ping count so that if this peer NI is added back to
4470                  * the recovery queue we will send the first ping right away.
4471                  */
4472                 lpni->lpni_ping_count = 0;
4473                 return;
4474         }
4475
4476         /* This peer NI is going on the recovery queue, so take a ref on it */
4477         lnet_peer_ni_addref_locked(lpni);
4478
4479         lnet_peer_ni_set_next_ping(lpni, now);
4480
4481         CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld last alive: %lld health: %d\n",
4482                libcfs_nidstr(&lpni->lpni_nid),
4483                lpni->lpni_ping_count,
4484                lpni->lpni_next_ping,
4485                lpni->lpni_last_alive,
4486                atomic_read(&lpni->lpni_healthv));
4487
4488         list_add_tail(&lpni->lpni_recovery, recovery_queue);
4489 }
4490
4491 /* Call with the ln_api_mutex held */
4492 void
4493 lnet_peer_ni_set_healthv(struct lnet_nid *nid, int value, bool all)
4494 {
4495         struct lnet_peer_table *ptable;
4496         struct lnet_peer *lp;
4497         struct lnet_peer_net *lpn;
4498         struct lnet_peer_ni *lpni;
4499         int lncpt;
4500         int cpt;
4501         time64_t now;
4502
4503         if (the_lnet.ln_state != LNET_STATE_RUNNING)
4504                 return;
4505
4506         now = ktime_get_seconds();
4507
4508         if (!all) {
4509                 lnet_net_lock(LNET_LOCK_EX);
4510                 lpni = lnet_peer_ni_find_locked(nid);
4511                 if (!lpni) {
4512                         lnet_net_unlock(LNET_LOCK_EX);
4513                         return;
4514                 }
4515                 lnet_set_lpni_healthv_locked(lpni, value);
4516                 lnet_peer_ni_add_to_recoveryq_locked(lpni,
4517                                              &the_lnet.ln_mt_peerNIRecovq, now);
4518                 lnet_peer_ni_decref_locked(lpni);
4519                 lnet_net_unlock(LNET_LOCK_EX);
4520                 return;
4521         }
4522
4523         lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
4524
4525         /*
4526          * Walk all the peers and reset the health value for each one to the
4527          * specified value.
4528          */
4529         lnet_net_lock(LNET_LOCK_EX);
4530         for (cpt = 0; cpt < lncpt; cpt++) {
4531                 ptable = the_lnet.ln_peer_tables[cpt];
4532                 list_for_each_entry(lp, &ptable->pt_peer_list, lp_peer_list) {
4533                         list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
4534                                 list_for_each_entry(lpni, &lpn->lpn_peer_nis,
4535                                                     lpni_peer_nis) {
4536                                         lnet_set_lpni_healthv_locked(lpni,
4537                                                                      value);
4538                                         lnet_peer_ni_add_to_recoveryq_locked(lpni,
4539                                              &the_lnet.ln_mt_peerNIRecovq, now);
4540                                 }
4541                         }
4542                 }
4543         }
4544         lnet_net_unlock(LNET_LOCK_EX);
4545 }
4546