Whamcloud - gitweb
LU-11300 lnet: peer aliveness
[fs/lustre-release.git] / lnet / lnet / router.c
1 /*
2  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
3  *
4  * Copyright (c) 2011, 2017, Intel Corporation.
5  *
6  *   This file is part of Lustre, https://wiki.whamcloud.com/
7  *
8  *   Portals is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Portals is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Portals; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_LNET
24
25 #include <linux/random.h>
26 #include <lnet/lib-lnet.h>
27
28 #define LNET_NRB_TINY_MIN       512     /* min value for each CPT */
29 #define LNET_NRB_TINY           (LNET_NRB_TINY_MIN * 4)
30 #define LNET_NRB_SMALL_MIN      4096    /* min value for each CPT */
31 #define LNET_NRB_SMALL          (LNET_NRB_SMALL_MIN * 4)
32 #define LNET_NRB_SMALL_PAGES    1
33 #define LNET_NRB_LARGE_MIN      256     /* min value for each CPT */
34 #define LNET_NRB_LARGE          (LNET_NRB_LARGE_MIN * 4)
35 #define LNET_NRB_LARGE_PAGES    ((LNET_MTU + PAGE_SIZE - 1) >> \
36                                   PAGE_SHIFT)
37
38 static char *forwarding = "";
39 module_param(forwarding, charp, 0444);
40 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
41
42 static int tiny_router_buffers;
43 module_param(tiny_router_buffers, int, 0444);
44 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
45 static int small_router_buffers;
46 module_param(small_router_buffers, int, 0444);
47 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
48 static int large_router_buffers;
49 module_param(large_router_buffers, int, 0444);
50 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
51 static int peer_buffer_credits;
52 module_param(peer_buffer_credits, int, 0444);
53 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
54
55 static int auto_down = 1;
56 module_param(auto_down, int, 0444);
57 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
58
59 int
60 lnet_peer_buffer_credits(struct lnet_net *net)
61 {
62         /* NI option overrides LNet default */
63         if (net->net_tunables.lct_peer_rtr_credits > 0)
64                 return net->net_tunables.lct_peer_rtr_credits;
65         if (peer_buffer_credits > 0)
66                 return peer_buffer_credits;
67
68         /* As an approximation, allow this peer the same number of router
69          * buffers as it is allowed outstanding sends */
70         return net->net_tunables.lct_peer_tx_credits;
71 }
72
73 static int check_routers_before_use;
74 module_param(check_routers_before_use, int, 0444);
75 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
76
77 int avoid_asym_router_failure = 1;
78 module_param(avoid_asym_router_failure, int, 0644);
79 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
80
81 static int dead_router_check_interval = 60;
82 module_param(dead_router_check_interval, int, 0644);
83 MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
84
85 static int live_router_check_interval = 60;
86 module_param(live_router_check_interval, int, 0644);
87 MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
88
89 static int router_ping_timeout = 50;
90 module_param(router_ping_timeout, int, 0644);
91 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
92
93 /*
94  * A value between 0 and 100. 0 meaning that even if router's interfaces
95  * have the worse health still consider the gateway usable.
96  * 100 means that at least one interface on the route's remote net is 100%
97  * healthy to consider the route alive.
98  * The default is set to 100 to ensure we maintain the original behavior.
99  */
100 unsigned int router_sensitivity_percentage = 100;
101 static int rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
102 static struct kernel_param_ops param_ops_rtr_sensitivity = {
103         .set = rtr_sensitivity_set,
104         .get = param_get_int,
105 };
106 #define param_check_rtr_sensitivity(name, p) \
107                 __param_check(name, p, int)
108 #ifdef HAVE_KERNEL_PARAM_OPS
109 module_param(router_sensitivity_percentage, rtr_sensitivity, S_IRUGO|S_IWUSR);
110 #else
111 module_param_call(router_sensitivity_percentage, rtr_sensitivity_set, param_get_int,
112                   &router_sensitivity_percentage, S_IRUGO|S_IWUSR);
113 #endif
114 MODULE_PARM_DESC(router_sensitivity_percentage,
115                 "How healthy a gateway should be to be used in percent");
116
117 static int
118 rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
119 {
120         int rc;
121         unsigned *sen = (unsigned *)kp->arg;
122         unsigned long value;
123
124         rc = kstrtoul(val, 0, &value);
125         if (rc) {
126                 CERROR("Invalid module parameter value for 'router_sensitivity_percentage'\n");
127                 return rc;
128         }
129
130         if (value < 0 || value > 100) {
131                 CERROR("Invalid value: %lu for 'router_sensitivity_percentage'\n", value);
132                 return -EINVAL;
133         }
134
135         /*
136          * The purpose of locking the api_mutex here is to ensure that
137          * the correct value ends up stored properly.
138          */
139         mutex_lock(&the_lnet.ln_api_mutex);
140
141         *sen = value;
142
143         mutex_unlock(&the_lnet.ln_api_mutex);
144
145         return 0;
146 }
147
148 int
149 lnet_peers_start_down(void)
150 {
151         return check_routers_before_use;
152 }
153
154 void
155 lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
156                    time64_t when)
157 {
158         if (lp->lpni_timestamp > when) { /* out of date information */
159                 CDEBUG(D_NET, "Out of date\n");
160                 return;
161         }
162
163         /*
164          * This function can be called with different cpt locks being
165          * held. lpni_alive_count modification needs to be properly protected.
166          * Significant reads to lpni_alive_count are also protected with
167          * the same lock
168          */
169         spin_lock(&lp->lpni_lock);
170
171         lp->lpni_timestamp = when; /* update timestamp */
172
173         /* got old news */
174         if (lp->lpni_alive_count != 0 &&
175             /* new date for old news */
176             (!lnet_is_peer_ni_alive(lp)) == (!alive)) {
177                 spin_unlock(&lp->lpni_lock);
178                 CDEBUG(D_NET, "Old news\n");
179                 return;
180         }
181
182         /* Flag that notification is outstanding */
183
184         lp->lpni_alive_count++;
185         lp->lpni_notify = 1;
186         lp->lpni_notifylnd = notifylnd;
187         if (lnet_is_peer_ni_alive(lp))
188                 lp->lpni_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
189
190         spin_unlock(&lp->lpni_lock);
191
192         CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lpni_nid), alive);
193 }
194
195 /*
196  * This function will always be called with lp->lpni_cpt lock held.
197  */
198 static void
199 lnet_ni_notify_locked(struct lnet_ni *ni, struct lnet_peer_ni *lp)
200 {
201         int alive;
202         int notifylnd;
203
204         /* Notify only in 1 thread at any time to ensure ordered notification.
205          * NB individual events can be missed; the only guarantee is that you
206          * always get the most recent news */
207
208         spin_lock(&lp->lpni_lock);
209
210         if (lp->lpni_notifying || ni == NULL) {
211                 spin_unlock(&lp->lpni_lock);
212                 return;
213         }
214
215         lp->lpni_notifying = 1;
216
217         /*
218          * lp->lpni_notify needs to be protected because it can be set in
219          * lnet_notify_locked().
220          */
221         while (lp->lpni_notify) {
222                 alive     = lnet_is_peer_ni_alive(lp);
223                 notifylnd = lp->lpni_notifylnd;
224
225                 lp->lpni_notifylnd = 0;
226                 lp->lpni_notify    = 0;
227
228                 if (notifylnd && ni->ni_net->net_lnd->lnd_notify != NULL) {
229                         spin_unlock(&lp->lpni_lock);
230                         lnet_net_unlock(lp->lpni_cpt);
231
232                         /* A new notification could happen now; I'll handle it
233                          * when control returns to me */
234
235                         (ni->ni_net->net_lnd->lnd_notify)(ni, lp->lpni_nid,
236                                                           alive);
237
238                         lnet_net_lock(lp->lpni_cpt);
239                         spin_lock(&lp->lpni_lock);
240                 }
241         }
242
243         lp->lpni_notifying = 0;
244         spin_unlock(&lp->lpni_lock);
245 }
246
247 static void
248 lnet_rtr_addref_locked(struct lnet_peer *lp)
249 {
250         LASSERT(lp->lp_rtr_refcount >= 0);
251
252         /* lnet_net_lock must be exclusively locked */
253         lp->lp_rtr_refcount++;
254         if (lp->lp_rtr_refcount == 1) {
255                 list_add_tail(&lp->lp_rtr_list, &the_lnet.ln_routers);
256                 /* addref for the_lnet.ln_routers */
257                 lnet_peer_addref_locked(lp);
258                 the_lnet.ln_routers_version++;
259         }
260 }
261
262 static void
263 lnet_rtr_decref_locked(struct lnet_peer *lp)
264 {
265         LASSERT(atomic_read(&lp->lp_refcount) > 0);
266         LASSERT(lp->lp_rtr_refcount > 0);
267
268         /* lnet_net_lock must be exclusively locked */
269         lp->lp_rtr_refcount--;
270         if (lp->lp_rtr_refcount == 0) {
271                 LASSERT(list_empty(&lp->lp_routes));
272
273                 list_del(&lp->lp_rtr_list);
274                 /* decref for the_lnet.ln_routers */
275                 lnet_peer_decref_locked(lp);
276                 the_lnet.ln_routers_version++;
277         }
278 }
279
280 struct lnet_remotenet *
281 lnet_find_rnet_locked(__u32 net)
282 {
283         struct lnet_remotenet *rnet;
284         struct list_head *tmp;
285         struct list_head *rn_list;
286
287         LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
288
289         rn_list = lnet_net2rnethash(net);
290         list_for_each(tmp, rn_list) {
291                 rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
292
293                 if (rnet->lrn_net == net)
294                         return rnet;
295         }
296         return NULL;
297 }
298
299 static void lnet_shuffle_seed(void)
300 {
301         static int seeded;
302         struct lnet_ni *ni = NULL;
303
304         if (seeded)
305                 return;
306
307         /* Nodes with small feet have little entropy
308          * the NID for this node gives the most entropy in the low bits */
309         while ((ni = lnet_get_next_ni_locked(NULL, ni)))
310                 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
311
312         seeded = 1;
313         return;
314 }
315
316 /* NB expects LNET_LOCK held */
317 static void
318 lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
319 {
320         unsigned int len = 0;
321         unsigned int offset = 0;
322         struct list_head *e;
323
324         lnet_shuffle_seed();
325
326         list_for_each(e, &rnet->lrn_routes)
327                 len++;
328
329         /*
330          * Randomly adding routes to the list is done to ensure that when
331          * different nodes are using the same list of routers, they end up
332          * preferring different routers.
333          */
334         offset = cfs_rand() % (len + 1);
335         list_for_each(e, &rnet->lrn_routes) {
336                 if (offset == 0)
337                         break;
338                 offset--;
339         }
340         list_add(&route->lr_list, e);
341         /*
342          * force a router check on the gateway to make sure the route is
343          * alive
344          */
345         route->lr_gateway->lp_rtrcheck_timestamp = 0;
346
347         the_lnet.ln_remote_nets_version++;
348
349         /* add the route on the gateway list */
350         list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
351
352         /* take a router reference count on the gateway */
353         lnet_rtr_addref_locked(route->lr_gateway);
354 }
355
356 int
357 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
358                unsigned int priority)
359 {
360         struct list_head *route_entry;
361         struct lnet_remotenet *rnet;
362         struct lnet_remotenet *rnet2;
363         struct lnet_route *route;
364         struct lnet_peer_ni *lpni;
365         struct lnet_peer *gw;
366         int add_route;
367         int rc;
368
369         CDEBUG(D_NET, "Add route: remote net %s hops %d priority %u gw %s\n",
370                libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
371
372         if (gateway == LNET_NID_ANY ||
373             LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
374             net == LNET_NIDNET(LNET_NID_ANY) ||
375             LNET_NETTYP(net) == LOLND ||
376             LNET_NIDNET(gateway) == net ||
377             (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
378                 return -EINVAL;
379
380         /* it's a local network */
381         if (lnet_islocalnet(net))
382                 return -EEXIST;
383
384         /* Assume net, route, all new */
385         LIBCFS_ALLOC(route, sizeof(*route));
386         LIBCFS_ALLOC(rnet, sizeof(*rnet));
387         if (route == NULL || rnet == NULL) {
388                 CERROR("Out of memory creating route %s %d %s\n",
389                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
390                 if (route != NULL)
391                         LIBCFS_FREE(route, sizeof(*route));
392                 if (rnet != NULL)
393                         LIBCFS_FREE(rnet, sizeof(*rnet));
394                 return -ENOMEM;
395         }
396
397         INIT_LIST_HEAD(&rnet->lrn_routes);
398         rnet->lrn_net = net;
399         /* store the local and remote net that the route represents */
400         route->lr_lnet = LNET_NIDNET(gateway);
401         route->lr_net = net;
402         route->lr_priority = priority;
403         route->lr_hops = hops;
404
405         lnet_net_lock(LNET_LOCK_EX);
406
407         /*
408          * lnet_nid2peerni_ex() grabs a ref on the lpni. We will need to
409          * lose that once we're done
410          */
411         lpni = lnet_nid2peerni_ex(gateway, LNET_LOCK_EX);
412         if (IS_ERR(lpni)) {
413                 lnet_net_unlock(LNET_LOCK_EX);
414
415                 LIBCFS_FREE(route, sizeof(*route));
416                 LIBCFS_FREE(rnet, sizeof(*rnet));
417
418                 rc = PTR_ERR(lpni);
419                 CERROR("Error %d creating route %s %d %s\n", rc,
420                         libcfs_net2str(net), hops,
421                         libcfs_nid2str(gateway));
422                 return rc;
423         }
424
425         LASSERT(lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer);
426         gw = lpni->lpni_peer_net->lpn_peer;
427
428         route->lr_gateway = gw;
429
430         rnet2 = lnet_find_rnet_locked(net);
431         if (rnet2 == NULL) {
432                 /* new network */
433                 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
434                 rnet2 = rnet;
435         }
436
437         /* Search for a duplicate route (it's a NOOP if it is) */
438         add_route = 1;
439         list_for_each(route_entry, &rnet2->lrn_routes) {
440                 struct lnet_route *route2;
441
442                 route2 = list_entry(route_entry, struct lnet_route, lr_list);
443                 if (route2->lr_gateway == route->lr_gateway) {
444                         add_route = 0;
445                         break;
446                 }
447
448                 /* our lookups must be true */
449                 LASSERT(route2->lr_gateway->lp_primary_nid != gateway);
450         }
451
452         /*
453          * It is possible to add multiple routes through the same peer,
454          * but it'll be using a different NID of that peer. When the
455          * gateway is discovered, discovery will consolidate the different
456          * peers into one peer. In this case the discovery code will have
457          * to move the routes from the peer that's being deleted to the
458          * consolidated peer lp_routes list
459          */
460         if (add_route)
461                 lnet_add_route_to_rnet(rnet2, route);
462
463         /*
464          * get rid of the reference on the lpni.
465          */
466         lnet_peer_ni_decref_locked(lpni);
467         lnet_net_unlock(LNET_LOCK_EX);
468
469         rc = 0;
470
471         if (!add_route) {
472                 rc = -EEXIST;
473                 LIBCFS_FREE(route, sizeof(*route));
474         }
475
476         if (rnet != rnet2)
477                 LIBCFS_FREE(rnet, sizeof(*rnet));
478
479         /* kick start the monitor thread to handle the added route */
480         wake_up(&the_lnet.ln_mt_waitq);
481
482         return rc;
483 }
484
485 static void
486 lnet_del_route_from_rnet(lnet_nid_t gw_nid, struct list_head *route_list,
487                          struct list_head *zombies)
488 {
489         struct lnet_peer *gateway;
490         struct lnet_route *route;
491         struct lnet_route *tmp;
492
493         list_for_each_entry_safe(route, tmp, route_list, lr_list) {
494                 gateway = route->lr_gateway;
495                 if (gw_nid != LNET_NID_ANY &&
496                     gw_nid != gateway->lp_primary_nid)
497                         continue;
498
499                 /*
500                  * move to zombie to delete outside the lock
501                  * Note that this function is called with the
502                  * ln_api_mutex held as well as the exclusive net
503                  * lock. Adding to the remote net list happens
504                  * under the same conditions. Same goes for the
505                  * gateway router list
506                  */
507                 list_move(&route->lr_list, zombies);
508                 the_lnet.ln_remote_nets_version++;
509
510                 list_del(&route->lr_gwlist);
511                 lnet_rtr_decref_locked(gateway);
512         }
513 }
514
515 int
516 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
517 {
518         struct list_head rnet_zombies;
519         struct lnet_remotenet *rnet;
520         struct lnet_remotenet *tmp;
521         struct list_head *rn_list;
522         struct lnet_peer_ni *lpni;
523         struct lnet_route *route;
524         struct list_head zombies;
525         struct lnet_peer *lp;
526         int i = 0;
527
528         INIT_LIST_HEAD(&rnet_zombies);
529         INIT_LIST_HEAD(&zombies);
530
531         CDEBUG(D_NET, "Del route: net %s : gw %s\n",
532                libcfs_net2str(net), libcfs_nid2str(gw_nid));
533
534         /* NB Caller may specify either all routes via the given gateway
535          * or a specific route entry actual NIDs) */
536
537         lnet_net_lock(LNET_LOCK_EX);
538
539         lpni = lnet_find_peer_ni_locked(gw_nid);
540         if (lpni) {
541                 lp = lpni->lpni_peer_net->lpn_peer;
542                 LASSERT(lp);
543                 gw_nid = lp->lp_primary_nid;
544                 lnet_peer_ni_decref_locked(lpni);
545         }
546
547         if (net != LNET_NIDNET(LNET_NID_ANY)) {
548                 rnet = lnet_find_rnet_locked(net);
549                 if (!rnet) {
550                         lnet_net_unlock(LNET_LOCK_EX);
551                         return -ENOENT;
552                 }
553                 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
554                                          &zombies);
555                 if (list_empty(&rnet->lrn_routes))
556                         list_move(&rnet->lrn_list, &rnet_zombies);
557                 goto delete_zombies;
558         }
559
560         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
561                 rn_list = &the_lnet.ln_remote_nets_hash[i];
562
563                 list_for_each_entry_safe(rnet, tmp, rn_list, lrn_list) {
564                         lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
565                                                  &zombies);
566                         if (list_empty(&rnet->lrn_routes))
567                                 list_move(&rnet->lrn_list, &rnet_zombies);
568                 }
569         }
570
571 delete_zombies:
572         lnet_net_unlock(LNET_LOCK_EX);
573
574         while (!list_empty(&zombies)) {
575                 route = list_first_entry(&zombies, struct lnet_route, lr_list);
576                 list_del(&route->lr_list);
577                 LIBCFS_FREE(route, sizeof(*route));
578         }
579
580         while (!list_empty(&rnet_zombies)) {
581                 rnet = list_first_entry(&rnet_zombies, struct lnet_remotenet,
582                                         lrn_list);
583                 list_del(&rnet->lrn_list);
584                 LIBCFS_FREE(rnet, sizeof(*rnet));
585         }
586
587         return 0;
588 }
589
590 void
591 lnet_destroy_routes (void)
592 {
593         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
594 }
595
596 int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
597 {
598         struct lnet_rtrbufpool *rbp;
599         int i, rc = -ENOENT, j;
600
601         if (the_lnet.ln_rtrpools == NULL)
602                 return rc;
603
604
605         cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
606                 if (i != cpt)
607                         continue;
608
609                 lnet_net_lock(i);
610                 for (j = 0; j < LNET_NRBPOOLS; j++) {
611                         pool_cfg->pl_pools[j].pl_npages = rbp[j].rbp_npages;
612                         pool_cfg->pl_pools[j].pl_nbuffers = rbp[j].rbp_nbuffers;
613                         pool_cfg->pl_pools[j].pl_credits = rbp[j].rbp_credits;
614                         pool_cfg->pl_pools[j].pl_mincredits = rbp[j].rbp_mincredits;
615                 }
616                 lnet_net_unlock(i);
617                 rc = 0;
618                 break;
619         }
620
621         lnet_net_lock(LNET_LOCK_EX);
622         pool_cfg->pl_routing = the_lnet.ln_routing;
623         lnet_net_unlock(LNET_LOCK_EX);
624
625         return rc;
626 }
627
628 int
629 lnet_get_route(int idx, __u32 *net, __u32 *hops,
630                lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
631 {
632         struct list_head *e1;
633         struct list_head *e2;
634         struct lnet_remotenet *rnet;
635         struct lnet_route        *route;
636         int               cpt;
637         int               i;
638         struct list_head *rn_list;
639
640         cpt = lnet_net_lock_current();
641
642         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
643                 rn_list = &the_lnet.ln_remote_nets_hash[i];
644                 list_for_each(e1, rn_list) {
645                         rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
646
647                         list_for_each(e2, &rnet->lrn_routes) {
648                                 route = list_entry(e2, struct lnet_route,
649                                                    lr_list);
650
651                                 if (idx-- == 0) {
652                                         *net      = rnet->lrn_net;
653                                         *hops     = route->lr_hops;
654                                         *priority = route->lr_priority;
655                                         *gateway  = route->lr_gateway->lp_primary_nid;
656                                         *alive    = lnet_is_route_alive(route);
657                                         lnet_net_unlock(cpt);
658                                         return 0;
659                                 }
660                         }
661                 }
662         }
663
664         lnet_net_unlock(cpt);
665         return -ENOENT;
666 }
667
668 void
669 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
670 {
671         struct lnet_ni_status *stat;
672         int nnis;
673         int i;
674
675         __swab32s(&pbuf->pb_info.pi_magic);
676         __swab32s(&pbuf->pb_info.pi_features);
677         __swab32s(&pbuf->pb_info.pi_pid);
678         __swab32s(&pbuf->pb_info.pi_nnis);
679         nnis = pbuf->pb_info.pi_nnis;
680         if (nnis > pbuf->pb_nnis)
681                 nnis = pbuf->pb_nnis;
682         for (i = 0; i < nnis; i++) {
683                 stat = &pbuf->pb_info.pi_ni[i];
684                 __swab64s(&stat->ns_nid);
685                 __swab32s(&stat->ns_status);
686         }
687         return;
688 }
689
690 /**
691  * TODO: re-implement
692  */
693 static void
694 lnet_parse_rc_info(struct lnet_rc_data *rcd)
695 {
696         rcd = rcd;
697 }
698
699 static void
700 lnet_router_checker_event(struct lnet_event *event)
701 {
702         struct lnet_rc_data *rcd = event->md.user_ptr;
703         struct lnet_peer_ni *lp;
704
705         LASSERT(rcd != NULL);
706
707         if (event->unlinked) {
708                 LNetInvalidateMDHandle(&rcd->rcd_mdh);
709                 return;
710         }
711
712         LASSERT(event->type == LNET_EVENT_SEND ||
713                 event->type == LNET_EVENT_REPLY);
714
715         lp = rcd->rcd_gateway;
716         LASSERT(lp != NULL);
717
718          /* NB: it's called with holding lnet_res_lock, we have a few
719           * places need to hold both locks at the same time, please take
720           * care of lock ordering */
721         lnet_net_lock(lp->lpni_cpt);
722         if (!lnet_isrouter(lp) || lp->lpni_rcd != rcd) {
723                 /* ignore if no longer a router or rcd is replaced */
724                 goto out;
725         }
726
727         if (event->type == LNET_EVENT_SEND) {
728                 if (event->status == 0)
729                         goto out;
730         }
731
732         /* LNET_EVENT_REPLY */
733         /* A successful REPLY means the router is up.  If _any_ comms
734          * to the router fail I assume it's down (this will happen if
735          * we ping alive routers to try to detect router death before
736          * apps get burned). */
737
738         lnet_notify_locked(lp, 1, !event->status, ktime_get_seconds());
739         /* The router checker will wake up very shortly and do the
740          * actual notification.
741          * XXX If 'lp' stops being a router before then, it will still
742          * have the notification pending!!! */
743
744         if (avoid_asym_router_failure && event->status == 0)
745                 lnet_parse_rc_info(rcd);
746
747  out:
748         lnet_net_unlock(lp->lpni_cpt);
749 }
750
751 static void
752 lnet_wait_known_routerstate(void)
753 {
754         struct lnet_peer *rtr;
755         struct list_head *entry;
756         int all_known;
757
758         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
759
760         for (;;) {
761                 int cpt = lnet_net_lock_current();
762
763                 all_known = 1;
764                 list_for_each(entry, &the_lnet.ln_routers) {
765                         rtr = list_entry(entry, struct lnet_peer,
766                                          lp_rtr_list);
767
768                         spin_lock(&rtr->lp_lock);
769
770                         if ((rtr->lp_state & LNET_PEER_DISCOVERED) == 0) {
771                                 all_known = 0;
772                                 spin_unlock(&rtr->lp_lock);
773                                 break;
774                         }
775                         spin_unlock(&rtr->lp_lock);
776                 }
777
778                 lnet_net_unlock(cpt);
779
780                 if (all_known)
781                         return;
782
783                 set_current_state(TASK_UNINTERRUPTIBLE);
784                 schedule_timeout(cfs_time_seconds(1));
785         }
786 }
787
788 /* TODO: reimplement */
789 void
790 lnet_router_ni_update_locked(struct lnet_peer_ni *gw, __u32 net)
791 {
792         struct lnet_route *rte;
793         struct lnet_peer *lp;
794
795         if ((gw->lpni_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0)
796                 lp = gw->lpni_peer_net->lpn_peer;
797         else
798                 return;
799
800         list_for_each_entry(rte, &lp->lp_routes, lr_gwlist) {
801                 if (rte->lr_net == net) {
802                         rte->lr_downis = 0;
803                         break;
804                 }
805         }
806 }
807
808 static void
809 lnet_update_ni_status_locked(void)
810 {
811         struct lnet_ni *ni = NULL;
812         time64_t now;
813         time64_t timeout;
814
815         LASSERT(the_lnet.ln_routing);
816
817         timeout = router_ping_timeout +
818                   MAX(live_router_check_interval, dead_router_check_interval);
819
820         now = ktime_get_real_seconds();
821         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
822                 if (ni->ni_net->net_lnd->lnd_type == LOLND)
823                         continue;
824
825                 if (now < ni->ni_last_alive + timeout)
826                         continue;
827
828                 lnet_ni_lock(ni);
829                 /* re-check with lock */
830                 if (now < ni->ni_last_alive + timeout) {
831                         lnet_ni_unlock(ni);
832                         continue;
833                 }
834
835                 LASSERT(ni->ni_status != NULL);
836
837                 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
838                         CDEBUG(D_NET, "NI(%s:%lld) status changed to down\n",
839                                libcfs_nid2str(ni->ni_nid), timeout);
840                         /* NB: so far, this is the only place to set
841                          * NI status to "down" */
842                         ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
843                 }
844                 lnet_ni_unlock(ni);
845         }
846 }
847
848 int lnet_router_pre_mt_start(void)
849 {
850         int rc;
851
852         if (check_routers_before_use &&
853             dead_router_check_interval <= 0) {
854                 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
855                                    " set if 'check_routers_before_use' is set"
856                                    "\n");
857                 return -EINVAL;
858         }
859
860         rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
861         if (rc != 0) {
862                 CERROR("Can't allocate EQ(0): %d\n", rc);
863                 return -ENOMEM;
864         }
865
866         return 0;
867 }
868
869 void lnet_router_post_mt_start(void)
870 {
871         if (check_routers_before_use) {
872                 /* Note that a helpful side-effect of pinging all known routers
873                  * at startup is that it makes them drop stale connections they
874                  * may have to a previous instance of me. */
875                 lnet_wait_known_routerstate();
876         }
877 }
878
879 void
880 lnet_router_cleanup(void)
881 {
882         int rc;
883
884         rc = LNetEQFree(the_lnet.ln_rc_eqh);
885         LASSERT(rc == 0);
886         return;
887 }
888
889 void
890 lnet_prune_rc_data(int wait_unlink)
891 {
892         wait_unlink = wait_unlink;
893 }
894
895 /*
896  * This function is called from the monitor thread to check if there are
897  * any active routers that need to be checked.
898  */
899 inline bool
900 lnet_router_checker_active(void)
901 {
902         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
903                 return true;
904
905         /* Router Checker thread needs to run when routing is enabled in
906          * order to call lnet_update_ni_status_locked() */
907         if (the_lnet.ln_routing)
908                 return true;
909
910         /* if there are routers that need to be cleaned up then do so */
911         if (!list_empty(&the_lnet.ln_rcd_deathrow) ||
912             !list_empty(&the_lnet.ln_rcd_zombie))
913                 return true;
914
915         return !list_empty(&the_lnet.ln_routers) &&
916                 (live_router_check_interval > 0 ||
917                  dead_router_check_interval > 0);
918 }
919
920 void
921 lnet_check_routers(void)
922 {
923         struct lnet_peer *rtr;
924         struct list_head *entry;
925         __u64   version;
926         int     cpt;
927
928         cpt = lnet_net_lock_current();
929 rescan:
930         version = the_lnet.ln_routers_version;
931
932         list_for_each(entry, &the_lnet.ln_routers) {
933                 rtr = list_entry(entry, struct lnet_peer,
934                                  lp_rtr_list);
935
936                 /* TODO use discovery to determine if router is alive */
937
938                 /* NB dropped lock */
939                 if (version != the_lnet.ln_routers_version) {
940                         /* the routers list has changed */
941                         goto rescan;
942                 }
943         }
944
945         if (the_lnet.ln_routing)
946                 lnet_update_ni_status_locked();
947
948         lnet_net_unlock(cpt);
949
950         lnet_prune_rc_data(0); /* don't wait for UNLINK */
951 }
952
953 void
954 lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
955 {
956         int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
957
958         while (--npages >= 0)
959                 __free_page(rb->rb_kiov[npages].kiov_page);
960
961         LIBCFS_FREE(rb, sz);
962 }
963
964 static struct lnet_rtrbuf *
965 lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
966 {
967         int            npages = rbp->rbp_npages;
968         int            sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
969         struct page   *page;
970         struct lnet_rtrbuf *rb;
971         int            i;
972
973         LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
974         if (rb == NULL)
975                 return NULL;
976
977         rb->rb_pool = rbp;
978
979         for (i = 0; i < npages; i++) {
980                 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
981                                           GFP_KERNEL | __GFP_ZERO);
982                 if (page == NULL) {
983                         while (--i >= 0)
984                                 __free_page(rb->rb_kiov[i].kiov_page);
985
986                         LIBCFS_FREE(rb, sz);
987                         return NULL;
988                 }
989
990                 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
991                 rb->rb_kiov[i].kiov_offset = 0;
992                 rb->rb_kiov[i].kiov_page = page;
993         }
994
995         return rb;
996 }
997
998 static void
999 lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
1000 {
1001         int npages = rbp->rbp_npages;
1002         struct lnet_rtrbuf *rb;
1003         struct list_head tmp;
1004
1005         if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1006                 return;
1007
1008         INIT_LIST_HEAD(&tmp);
1009
1010         lnet_net_lock(cpt);
1011         list_splice_init(&rbp->rbp_msgs, &tmp);
1012         lnet_drop_routed_msgs_locked(&tmp, cpt);
1013         list_splice_init(&rbp->rbp_bufs, &tmp);
1014         rbp->rbp_req_nbuffers = 0;
1015         rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1016         rbp->rbp_mincredits = 0;
1017         lnet_net_unlock(cpt);
1018
1019         /* Free buffers on the free list. */
1020         while (!list_empty(&tmp)) {
1021                 rb = list_entry(tmp.next, struct lnet_rtrbuf, rb_list);
1022                 list_del(&rb->rb_list);
1023                 lnet_destroy_rtrbuf(rb, npages);
1024         }
1025 }
1026
1027 static int
1028 lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
1029 {
1030         struct list_head rb_list;
1031         struct lnet_rtrbuf *rb;
1032         int             num_rb;
1033         int             num_buffers = 0;
1034         int             old_req_nbufs;
1035         int             npages = rbp->rbp_npages;
1036
1037         lnet_net_lock(cpt);
1038         /* If we are called for less buffers than already in the pool, we
1039          * just lower the req_nbuffers number and excess buffers will be
1040          * thrown away as they are returned to the free list.  Credits
1041          * then get adjusted as well.
1042          * If we already have enough buffers allocated to serve the
1043          * increase requested, then we can treat that the same way as we
1044          * do the decrease. */
1045         num_rb = nbufs - rbp->rbp_nbuffers;
1046         if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1047                 rbp->rbp_req_nbuffers = nbufs;
1048                 lnet_net_unlock(cpt);
1049                 return 0;
1050         }
1051         /* store the older value of rbp_req_nbuffers and then set it to
1052          * the new request to prevent lnet_return_rx_credits_locked() from
1053          * freeing buffers that we need to keep around */
1054         old_req_nbufs = rbp->rbp_req_nbuffers;
1055         rbp->rbp_req_nbuffers = nbufs;
1056         lnet_net_unlock(cpt);
1057
1058         INIT_LIST_HEAD(&rb_list);
1059
1060         /* allocate the buffers on a local list first.  If all buffers are
1061          * allocated successfully then join this list to the rbp buffer
1062          * list.  If not then free all allocated buffers. */
1063         while (num_rb-- > 0) {
1064                 rb = lnet_new_rtrbuf(rbp, cpt);
1065                 if (rb == NULL) {
1066                         CERROR("Failed to allocate %d route bufs of %d pages\n",
1067                                nbufs, npages);
1068
1069                         lnet_net_lock(cpt);
1070                         rbp->rbp_req_nbuffers = old_req_nbufs;
1071                         lnet_net_unlock(cpt);
1072
1073                         goto failed;
1074                 }
1075
1076                 list_add(&rb->rb_list, &rb_list);
1077                 num_buffers++;
1078         }
1079
1080         lnet_net_lock(cpt);
1081
1082         list_splice_tail(&rb_list, &rbp->rbp_bufs);
1083         rbp->rbp_nbuffers += num_buffers;
1084         rbp->rbp_credits += num_buffers;
1085         rbp->rbp_mincredits = rbp->rbp_credits;
1086         /* We need to schedule blocked msg using the newly
1087          * added buffers. */
1088         while (!list_empty(&rbp->rbp_bufs) &&
1089                !list_empty(&rbp->rbp_msgs))
1090                 lnet_schedule_blocked_locked(rbp);
1091
1092         lnet_net_unlock(cpt);
1093
1094         return 0;
1095
1096 failed:
1097         while (!list_empty(&rb_list)) {
1098                 rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
1099                 list_del(&rb->rb_list);
1100                 lnet_destroy_rtrbuf(rb, npages);
1101         }
1102
1103         return -ENOMEM;
1104 }
1105
1106 static void
1107 lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
1108 {
1109         INIT_LIST_HEAD(&rbp->rbp_msgs);
1110         INIT_LIST_HEAD(&rbp->rbp_bufs);
1111
1112         rbp->rbp_npages = npages;
1113         rbp->rbp_credits = 0;
1114         rbp->rbp_mincredits = 0;
1115 }
1116
1117 void
1118 lnet_rtrpools_free(int keep_pools)
1119 {
1120         struct lnet_rtrbufpool *rtrp;
1121         int               i;
1122
1123         if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1124                 return;
1125
1126         cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1127                 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1128                 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1129                 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1130         }
1131
1132         if (!keep_pools) {
1133                 cfs_percpt_free(the_lnet.ln_rtrpools);
1134                 the_lnet.ln_rtrpools = NULL;
1135         }
1136 }
1137
1138 static int
1139 lnet_nrb_tiny_calculate(void)
1140 {
1141         int     nrbs = LNET_NRB_TINY;
1142
1143         if (tiny_router_buffers < 0) {
1144                 LCONSOLE_ERROR_MSG(0x10c,
1145                                    "tiny_router_buffers=%d invalid when "
1146                                    "routing enabled\n", tiny_router_buffers);
1147                 return -EINVAL;
1148         }
1149
1150         if (tiny_router_buffers > 0)
1151                 nrbs = tiny_router_buffers;
1152
1153         nrbs /= LNET_CPT_NUMBER;
1154         return max(nrbs, LNET_NRB_TINY_MIN);
1155 }
1156
1157 static int
1158 lnet_nrb_small_calculate(void)
1159 {
1160         int     nrbs = LNET_NRB_SMALL;
1161
1162         if (small_router_buffers < 0) {
1163                 LCONSOLE_ERROR_MSG(0x10c,
1164                                    "small_router_buffers=%d invalid when "
1165                                    "routing enabled\n", small_router_buffers);
1166                 return -EINVAL;
1167         }
1168
1169         if (small_router_buffers > 0)
1170                 nrbs = small_router_buffers;
1171
1172         nrbs /= LNET_CPT_NUMBER;
1173         return max(nrbs, LNET_NRB_SMALL_MIN);
1174 }
1175
1176 static int
1177 lnet_nrb_large_calculate(void)
1178 {
1179         int     nrbs = LNET_NRB_LARGE;
1180
1181         if (large_router_buffers < 0) {
1182                 LCONSOLE_ERROR_MSG(0x10c,
1183                                    "large_router_buffers=%d invalid when "
1184                                    "routing enabled\n", large_router_buffers);
1185                 return -EINVAL;
1186         }
1187
1188         if (large_router_buffers > 0)
1189                 nrbs = large_router_buffers;
1190
1191         nrbs /= LNET_CPT_NUMBER;
1192         return max(nrbs, LNET_NRB_LARGE_MIN);
1193 }
1194
1195 int
1196 lnet_rtrpools_alloc(int im_a_router)
1197 {
1198         struct lnet_rtrbufpool *rtrp;
1199         int     nrb_tiny;
1200         int     nrb_small;
1201         int     nrb_large;
1202         int     rc;
1203         int     i;
1204
1205         if (!strcmp(forwarding, "")) {
1206                 /* not set either way */
1207                 if (!im_a_router)
1208                         return 0;
1209         } else if (!strcmp(forwarding, "disabled")) {
1210                 /* explicitly disabled */
1211                 return 0;
1212         } else if (!strcmp(forwarding, "enabled")) {
1213                 /* explicitly enabled */
1214         } else {
1215                 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1216                                    "'enabled' or 'disabled'\n");
1217                 return -EINVAL;
1218         }
1219
1220         nrb_tiny = lnet_nrb_tiny_calculate();
1221         if (nrb_tiny < 0)
1222                 return -EINVAL;
1223
1224         nrb_small = lnet_nrb_small_calculate();
1225         if (nrb_small < 0)
1226                 return -EINVAL;
1227
1228         nrb_large = lnet_nrb_large_calculate();
1229         if (nrb_large < 0)
1230                 return -EINVAL;
1231
1232         the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1233                                                 LNET_NRBPOOLS *
1234                                                 sizeof(struct lnet_rtrbufpool));
1235         if (the_lnet.ln_rtrpools == NULL) {
1236                 LCONSOLE_ERROR_MSG(0x10c,
1237                                    "Failed to initialize router buffe pool\n");
1238                 return -ENOMEM;
1239         }
1240
1241         cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1242                 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1243                 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1244                                               nrb_tiny, i);
1245                 if (rc != 0)
1246                         goto failed;
1247
1248                 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1249                                   LNET_NRB_SMALL_PAGES);
1250                 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1251                                               nrb_small, i);
1252                 if (rc != 0)
1253                         goto failed;
1254
1255                 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1256                                   LNET_NRB_LARGE_PAGES);
1257                 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1258                                               nrb_large, i);
1259                 if (rc != 0)
1260                         goto failed;
1261         }
1262
1263         lnet_net_lock(LNET_LOCK_EX);
1264         the_lnet.ln_routing = 1;
1265         lnet_net_unlock(LNET_LOCK_EX);
1266         wake_up(&the_lnet.ln_mt_waitq);
1267         return 0;
1268
1269  failed:
1270         lnet_rtrpools_free(0);
1271         return rc;
1272 }
1273
1274 static int
1275 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1276 {
1277         int nrb = 0;
1278         int rc = 0;
1279         int i;
1280         struct lnet_rtrbufpool *rtrp;
1281
1282         /* If the provided values for each buffer pool are different than the
1283          * configured values, we need to take action. */
1284         if (tiny >= 0) {
1285                 tiny_router_buffers = tiny;
1286                 nrb = lnet_nrb_tiny_calculate();
1287                 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1288                         rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1289                                                       nrb, i);
1290                         if (rc != 0)
1291                                 return rc;
1292                 }
1293         }
1294         if (small >= 0) {
1295                 small_router_buffers = small;
1296                 nrb = lnet_nrb_small_calculate();
1297                 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1298                         rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1299                                                       nrb, i);
1300                         if (rc != 0)
1301                                 return rc;
1302                 }
1303         }
1304         if (large >= 0) {
1305                 large_router_buffers = large;
1306                 nrb = lnet_nrb_large_calculate();
1307                 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1308                         rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1309                                                       nrb, i);
1310                         if (rc != 0)
1311                                 return rc;
1312                 }
1313         }
1314
1315         return 0;
1316 }
1317
1318 int
1319 lnet_rtrpools_adjust(int tiny, int small, int large)
1320 {
1321         /* this function doesn't revert the changes if adding new buffers
1322          * failed.  It's up to the user space caller to revert the
1323          * changes. */
1324
1325         if (!the_lnet.ln_routing)
1326                 return 0;
1327
1328         return lnet_rtrpools_adjust_helper(tiny, small, large);
1329 }
1330
1331 int
1332 lnet_rtrpools_enable(void)
1333 {
1334         int rc = 0;
1335
1336         if (the_lnet.ln_routing)
1337                 return 0;
1338
1339         if (the_lnet.ln_rtrpools == NULL)
1340                 /* If routing is turned off, and we have never
1341                  * initialized the pools before, just call the
1342                  * standard buffer pool allocation routine as
1343                  * if we are just configuring this for the first
1344                  * time. */
1345                 rc = lnet_rtrpools_alloc(1);
1346         else
1347                 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1348         if (rc != 0)
1349                 return rc;
1350
1351         lnet_net_lock(LNET_LOCK_EX);
1352         the_lnet.ln_routing = 1;
1353
1354         the_lnet.ln_ping_target->pb_info.pi_features &=
1355                 ~LNET_PING_FEAT_RTE_DISABLED;
1356         lnet_net_unlock(LNET_LOCK_EX);
1357
1358         return rc;
1359 }
1360
1361 void
1362 lnet_rtrpools_disable(void)
1363 {
1364         if (!the_lnet.ln_routing)
1365                 return;
1366
1367         lnet_net_lock(LNET_LOCK_EX);
1368         the_lnet.ln_routing = 0;
1369         the_lnet.ln_ping_target->pb_info.pi_features |=
1370                 LNET_PING_FEAT_RTE_DISABLED;
1371
1372         tiny_router_buffers = 0;
1373         small_router_buffers = 0;
1374         large_router_buffers = 0;
1375         lnet_net_unlock(LNET_LOCK_EX);
1376         lnet_rtrpools_free(1);
1377 }
1378
1379 int
1380 lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, time64_t when)
1381 {
1382         struct lnet_peer_ni *lp = NULL;
1383         time64_t now = ktime_get_seconds();
1384         int cpt = lnet_cpt_of_nid(nid, ni);
1385
1386         LASSERT (!in_interrupt ());
1387
1388         CDEBUG (D_NET, "%s notifying %s: %s\n",
1389                 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1390                 libcfs_nid2str(nid),
1391                 alive ? "up" : "down");
1392
1393         if (ni != NULL &&
1394             LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1395                 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1396                       libcfs_nid2str(nid), alive ? "birth" : "death",
1397                       libcfs_nid2str(ni->ni_nid));
1398                 return -EINVAL;
1399         }
1400
1401         /* can't do predictions... */
1402         if (when > now) {
1403                 CWARN("Ignoring prediction from %s of %s %s "
1404                       "%lld seconds in the future\n",
1405                       (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1406                       libcfs_nid2str(nid), alive ? "up" : "down", when - now);
1407                 return -EINVAL;
1408         }
1409
1410         if (ni != NULL && !alive &&             /* LND telling me she's down */
1411             !auto_down) {                       /* auto-down disabled */
1412                 CDEBUG(D_NET, "Auto-down disabled\n");
1413                 return 0;
1414         }
1415
1416         lnet_net_lock(cpt);
1417
1418         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1419                 lnet_net_unlock(cpt);
1420                 return -ESHUTDOWN;
1421         }
1422
1423         lp = lnet_find_peer_ni_locked(nid);
1424         if (lp == NULL) {
1425                 /* nid not found */
1426                 lnet_net_unlock(cpt);
1427                 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1428                 return 0;
1429         }
1430
1431         /*
1432          * It is possible for this function to be called for the same peer
1433          * but with different NIs. We want to synchronize the notification
1434          * between the different calls. So we will use the lpni_cpt to
1435          * grab the net lock.
1436          */
1437         if (lp->lpni_cpt != cpt) {
1438                 lnet_net_unlock(cpt);
1439                 cpt = lp->lpni_cpt;
1440                 lnet_net_lock(cpt);
1441         }
1442
1443         /* We can't fully trust LND on reporting exact peer last_alive
1444          * if he notifies us about dead peer. For example ksocklnd can
1445          * call us with when == _time_when_the_node_was_booted_ if
1446          * no connections were successfully established */
1447         if (ni != NULL && !alive && when < lp->lpni_last_alive)
1448                 when = lp->lpni_last_alive;
1449
1450         lnet_notify_locked(lp, ni == NULL, alive, when);
1451
1452         if (ni != NULL)
1453                 lnet_ni_notify_locked(ni, lp);
1454
1455         lnet_peer_ni_decref_locked(lp);
1456
1457         lnet_net_unlock(cpt);
1458         return 0;
1459 }
1460 EXPORT_SYMBOL(lnet_notify);