Whamcloud - gitweb
ce2f12d3a737bd103e1b1cf9a5155c810148f649
[fs/lustre-release.git] / lnet / lnet / router.c
1 /*
2  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
3  *
4  * Copyright (c) 2011, 2017, Intel Corporation.
5  *
6  *   This file is part of Lustre, https://wiki.whamcloud.com/
7  *
8  *   Portals is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Portals is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Portals; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_LNET
24
25 #include <linux/random.h>
26 #include <lnet/lib-lnet.h>
27
28 #define LNET_NRB_TINY_MIN       512     /* min value for each CPT */
29 #define LNET_NRB_TINY           (LNET_NRB_TINY_MIN * 4)
30 #define LNET_NRB_SMALL_MIN      4096    /* min value for each CPT */
31 #define LNET_NRB_SMALL          (LNET_NRB_SMALL_MIN * 4)
32 #define LNET_NRB_SMALL_PAGES    1
33 #define LNET_NRB_LARGE_MIN      256     /* min value for each CPT */
34 #define LNET_NRB_LARGE          (LNET_NRB_LARGE_MIN * 4)
35 #define LNET_NRB_LARGE_PAGES    ((LNET_MTU + PAGE_SIZE - 1) >> \
36                                   PAGE_SHIFT)
37
38 static char *forwarding = "";
39 module_param(forwarding, charp, 0444);
40 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
41
42 static int tiny_router_buffers;
43 module_param(tiny_router_buffers, int, 0444);
44 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
45 static int small_router_buffers;
46 module_param(small_router_buffers, int, 0444);
47 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
48 static int large_router_buffers;
49 module_param(large_router_buffers, int, 0444);
50 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
51 static int peer_buffer_credits;
52 module_param(peer_buffer_credits, int, 0444);
53 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
54
55 static int auto_down = 1;
56 module_param(auto_down, int, 0444);
57 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
58
59 int
60 lnet_peer_buffer_credits(struct lnet_net *net)
61 {
62         /* NI option overrides LNet default */
63         if (net->net_tunables.lct_peer_rtr_credits > 0)
64                 return net->net_tunables.lct_peer_rtr_credits;
65         if (peer_buffer_credits > 0)
66                 return peer_buffer_credits;
67
68         /* As an approximation, allow this peer the same number of router
69          * buffers as it is allowed outstanding sends */
70         return net->net_tunables.lct_peer_tx_credits;
71 }
72
73 static int check_routers_before_use;
74 module_param(check_routers_before_use, int, 0444);
75 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
76
77 int avoid_asym_router_failure = 1;
78 module_param(avoid_asym_router_failure, int, 0644);
79 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
80
81 static int dead_router_check_interval = 60;
82 module_param(dead_router_check_interval, int, 0644);
83 MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
84
85 static int live_router_check_interval = 60;
86 module_param(live_router_check_interval, int, 0644);
87 MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
88
89 static int router_ping_timeout = 50;
90 module_param(router_ping_timeout, int, 0644);
91 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
92
93 /*
94  * A value between 0 and 100. 0 meaning that even if router's interfaces
95  * have the worse health still consider the gateway usable.
96  * 100 means that at least one interface on the route's remote net is 100%
97  * healthy to consider the route alive.
98  * The default is set to 100 to ensure we maintain the original behavior.
99  */
100 unsigned int router_sensitivity_percentage = 100;
101 static int rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
102 static struct kernel_param_ops param_ops_rtr_sensitivity = {
103         .set = rtr_sensitivity_set,
104         .get = param_get_int,
105 };
106 #define param_check_rtr_sensitivity(name, p) \
107                 __param_check(name, p, int)
108 #ifdef HAVE_KERNEL_PARAM_OPS
109 module_param(router_sensitivity_percentage, rtr_sensitivity, S_IRUGO|S_IWUSR);
110 #else
111 module_param_call(router_sensitivity_percentage, rtr_sensitivity_set, param_get_int,
112                   &router_sensitivity_percentage, S_IRUGO|S_IWUSR);
113 #endif
114 MODULE_PARM_DESC(router_sensitivity_percentage,
115                 "How healthy a gateway should be to be used in percent");
116
117 static int
118 rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
119 {
120         int rc;
121         unsigned *sen = (unsigned *)kp->arg;
122         unsigned long value;
123
124         rc = kstrtoul(val, 0, &value);
125         if (rc) {
126                 CERROR("Invalid module parameter value for 'router_sensitivity_percentage'\n");
127                 return rc;
128         }
129
130         if (value < 0 || value > 100) {
131                 CERROR("Invalid value: %lu for 'router_sensitivity_percentage'\n", value);
132                 return -EINVAL;
133         }
134
135         /*
136          * The purpose of locking the api_mutex here is to ensure that
137          * the correct value ends up stored properly.
138          */
139         mutex_lock(&the_lnet.ln_api_mutex);
140
141         *sen = value;
142
143         mutex_unlock(&the_lnet.ln_api_mutex);
144
145         return 0;
146 }
147
148 int
149 lnet_peers_start_down(void)
150 {
151         return check_routers_before_use;
152 }
153
154 void
155 lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
156                    time64_t when)
157 {
158         if (lp->lpni_timestamp > when) { /* out of date information */
159                 CDEBUG(D_NET, "Out of date\n");
160                 return;
161         }
162
163         /*
164          * This function can be called with different cpt locks being
165          * held. lpni_alive_count modification needs to be properly protected.
166          * Significant reads to lpni_alive_count are also protected with
167          * the same lock
168          */
169         spin_lock(&lp->lpni_lock);
170
171         lp->lpni_timestamp = when; /* update timestamp */
172
173         if (lp->lpni_alive_count != 0 &&          /* got old news */
174             (!lp->lpni_alive) == (!alive)) {      /* new date for old news */
175                 spin_unlock(&lp->lpni_lock);
176                 CDEBUG(D_NET, "Old news\n");
177                 return;
178         }
179
180         /* Flag that notification is outstanding */
181
182         lp->lpni_alive_count++;
183         lp->lpni_alive = (alive) ? 1 : 0;
184         lp->lpni_notify = 1;
185         lp->lpni_notifylnd = notifylnd;
186         if (lp->lpni_alive)
187                 lp->lpni_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
188
189         spin_unlock(&lp->lpni_lock);
190
191         CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lpni_nid), alive);
192 }
193
194 /*
195  * This function will always be called with lp->lpni_cpt lock held.
196  */
197 static void
198 lnet_ni_notify_locked(struct lnet_ni *ni, struct lnet_peer_ni *lp)
199 {
200         int alive;
201         int notifylnd;
202
203         /* Notify only in 1 thread at any time to ensure ordered notification.
204          * NB individual events can be missed; the only guarantee is that you
205          * always get the most recent news */
206
207         spin_lock(&lp->lpni_lock);
208
209         if (lp->lpni_notifying || ni == NULL) {
210                 spin_unlock(&lp->lpni_lock);
211                 return;
212         }
213
214         lp->lpni_notifying = 1;
215
216         /*
217          * lp->lpni_notify needs to be protected because it can be set in
218          * lnet_notify_locked().
219          */
220         while (lp->lpni_notify) {
221                 alive     = lp->lpni_alive;
222                 notifylnd = lp->lpni_notifylnd;
223
224                 lp->lpni_notifylnd = 0;
225                 lp->lpni_notify    = 0;
226
227                 if (notifylnd && ni->ni_net->net_lnd->lnd_notify != NULL) {
228                         spin_unlock(&lp->lpni_lock);
229                         lnet_net_unlock(lp->lpni_cpt);
230
231                         /* A new notification could happen now; I'll handle it
232                          * when control returns to me */
233
234                         (ni->ni_net->net_lnd->lnd_notify)(ni, lp->lpni_nid,
235                                                           alive);
236
237                         lnet_net_lock(lp->lpni_cpt);
238                         spin_lock(&lp->lpni_lock);
239                 }
240         }
241
242         lp->lpni_notifying = 0;
243         spin_unlock(&lp->lpni_lock);
244 }
245
246 static void
247 lnet_rtr_addref_locked(struct lnet_peer *lp)
248 {
249         LASSERT(lp->lp_rtr_refcount >= 0);
250
251         /* lnet_net_lock must be exclusively locked */
252         lp->lp_rtr_refcount++;
253         if (lp->lp_rtr_refcount == 1) {
254                 list_add_tail(&lp->lp_rtr_list, &the_lnet.ln_routers);
255                 /* addref for the_lnet.ln_routers */
256                 lnet_peer_addref_locked(lp);
257                 the_lnet.ln_routers_version++;
258         }
259 }
260
261 static void
262 lnet_rtr_decref_locked(struct lnet_peer *lp)
263 {
264         LASSERT(atomic_read(&lp->lp_refcount) > 0);
265         LASSERT(lp->lp_rtr_refcount > 0);
266
267         /* lnet_net_lock must be exclusively locked */
268         lp->lp_rtr_refcount--;
269         if (lp->lp_rtr_refcount == 0) {
270                 LASSERT(list_empty(&lp->lp_routes));
271
272                 list_del(&lp->lp_rtr_list);
273                 /* decref for the_lnet.ln_routers */
274                 lnet_peer_decref_locked(lp);
275                 the_lnet.ln_routers_version++;
276         }
277 }
278
279 struct lnet_remotenet *
280 lnet_find_rnet_locked(__u32 net)
281 {
282         struct lnet_remotenet *rnet;
283         struct list_head *tmp;
284         struct list_head *rn_list;
285
286         LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
287
288         rn_list = lnet_net2rnethash(net);
289         list_for_each(tmp, rn_list) {
290                 rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
291
292                 if (rnet->lrn_net == net)
293                         return rnet;
294         }
295         return NULL;
296 }
297
298 static void lnet_shuffle_seed(void)
299 {
300         static int seeded;
301         struct lnet_ni *ni = NULL;
302
303         if (seeded)
304                 return;
305
306         /* Nodes with small feet have little entropy
307          * the NID for this node gives the most entropy in the low bits */
308         while ((ni = lnet_get_next_ni_locked(NULL, ni)))
309                 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
310
311         seeded = 1;
312         return;
313 }
314
315 /* NB expects LNET_LOCK held */
316 static void
317 lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
318 {
319         unsigned int len = 0;
320         unsigned int offset = 0;
321         struct list_head *e;
322
323         lnet_shuffle_seed();
324
325         list_for_each(e, &rnet->lrn_routes)
326                 len++;
327
328         /*
329          * Randomly adding routes to the list is done to ensure that when
330          * different nodes are using the same list of routers, they end up
331          * preferring different routers.
332          */
333         offset = cfs_rand() % (len + 1);
334         list_for_each(e, &rnet->lrn_routes) {
335                 if (offset == 0)
336                         break;
337                 offset--;
338         }
339         list_add(&route->lr_list, e);
340         /*
341          * force a router check on the gateway to make sure the route is
342          * alive
343          */
344         route->lr_gateway->lp_rtrcheck_timestamp = 0;
345
346         the_lnet.ln_remote_nets_version++;
347
348         /* add the route on the gateway list */
349         list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
350
351         /* take a router reference count on the gateway */
352         lnet_rtr_addref_locked(route->lr_gateway);
353 }
354
355 int
356 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
357                unsigned int priority)
358 {
359         struct list_head *route_entry;
360         struct lnet_remotenet *rnet;
361         struct lnet_remotenet *rnet2;
362         struct lnet_route *route;
363         struct lnet_peer_ni *lpni;
364         struct lnet_peer *gw;
365         int add_route;
366         int rc;
367
368         CDEBUG(D_NET, "Add route: remote net %s hops %d priority %u gw %s\n",
369                libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
370
371         if (gateway == LNET_NID_ANY ||
372             LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
373             net == LNET_NIDNET(LNET_NID_ANY) ||
374             LNET_NETTYP(net) == LOLND ||
375             LNET_NIDNET(gateway) == net ||
376             (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
377                 return -EINVAL;
378
379         /* it's a local network */
380         if (lnet_islocalnet(net))
381                 return -EEXIST;
382
383         /* Assume net, route, all new */
384         LIBCFS_ALLOC(route, sizeof(*route));
385         LIBCFS_ALLOC(rnet, sizeof(*rnet));
386         if (route == NULL || rnet == NULL) {
387                 CERROR("Out of memory creating route %s %d %s\n",
388                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
389                 if (route != NULL)
390                         LIBCFS_FREE(route, sizeof(*route));
391                 if (rnet != NULL)
392                         LIBCFS_FREE(rnet, sizeof(*rnet));
393                 return -ENOMEM;
394         }
395
396         INIT_LIST_HEAD(&rnet->lrn_routes);
397         rnet->lrn_net = net;
398         /* store the local and remote net that the route represents */
399         route->lr_lnet = LNET_NIDNET(gateway);
400         route->lr_net = net;
401         route->lr_priority = priority;
402         route->lr_hops = hops;
403
404         lnet_net_lock(LNET_LOCK_EX);
405
406         /*
407          * lnet_nid2peerni_ex() grabs a ref on the lpni. We will need to
408          * lose that once we're done
409          */
410         lpni = lnet_nid2peerni_ex(gateway, LNET_LOCK_EX);
411         if (IS_ERR(lpni)) {
412                 lnet_net_unlock(LNET_LOCK_EX);
413
414                 LIBCFS_FREE(route, sizeof(*route));
415                 LIBCFS_FREE(rnet, sizeof(*rnet));
416
417                 rc = PTR_ERR(lpni);
418                 CERROR("Error %d creating route %s %d %s\n", rc,
419                         libcfs_net2str(net), hops,
420                         libcfs_nid2str(gateway));
421                 return rc;
422         }
423
424         LASSERT(lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer);
425         gw = lpni->lpni_peer_net->lpn_peer;
426
427         route->lr_gateway = gw;
428
429         rnet2 = lnet_find_rnet_locked(net);
430         if (rnet2 == NULL) {
431                 /* new network */
432                 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
433                 rnet2 = rnet;
434         }
435
436         /* Search for a duplicate route (it's a NOOP if it is) */
437         add_route = 1;
438         list_for_each(route_entry, &rnet2->lrn_routes) {
439                 struct lnet_route *route2;
440
441                 route2 = list_entry(route_entry, struct lnet_route, lr_list);
442                 if (route2->lr_gateway == route->lr_gateway) {
443                         add_route = 0;
444                         break;
445                 }
446
447                 /* our lookups must be true */
448                 LASSERT(route2->lr_gateway->lp_primary_nid != gateway);
449         }
450
451         /*
452          * It is possible to add multiple routes through the same peer,
453          * but it'll be using a different NID of that peer. When the
454          * gateway is discovered, discovery will consolidate the different
455          * peers into one peer. In this case the discovery code will have
456          * to move the routes from the peer that's being deleted to the
457          * consolidated peer lp_routes list
458          */
459         if (add_route)
460                 lnet_add_route_to_rnet(rnet2, route);
461
462         /*
463          * get rid of the reference on the lpni.
464          */
465         lnet_peer_ni_decref_locked(lpni);
466         lnet_net_unlock(LNET_LOCK_EX);
467
468         rc = 0;
469
470         if (!add_route) {
471                 rc = -EEXIST;
472                 LIBCFS_FREE(route, sizeof(*route));
473         }
474
475         if (rnet != rnet2)
476                 LIBCFS_FREE(rnet, sizeof(*rnet));
477
478         /* kick start the monitor thread to handle the added route */
479         wake_up(&the_lnet.ln_mt_waitq);
480
481         return rc;
482 }
483
484 static void
485 lnet_del_route_from_rnet(lnet_nid_t gw_nid, struct list_head *route_list,
486                          struct list_head *zombies)
487 {
488         struct lnet_peer *gateway;
489         struct lnet_route *route;
490         struct lnet_route *tmp;
491
492         list_for_each_entry_safe(route, tmp, route_list, lr_list) {
493                 gateway = route->lr_gateway;
494                 if (gw_nid != LNET_NID_ANY &&
495                     gw_nid != gateway->lp_primary_nid)
496                         continue;
497
498                 /*
499                  * move to zombie to delete outside the lock
500                  * Note that this function is called with the
501                  * ln_api_mutex held as well as the exclusive net
502                  * lock. Adding to the remote net list happens
503                  * under the same conditions. Same goes for the
504                  * gateway router list
505                  */
506                 list_move(&route->lr_list, zombies);
507                 the_lnet.ln_remote_nets_version++;
508
509                 list_del(&route->lr_gwlist);
510                 lnet_rtr_decref_locked(gateway);
511         }
512 }
513
514 int
515 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
516 {
517         struct list_head rnet_zombies;
518         struct lnet_remotenet *rnet;
519         struct lnet_remotenet *tmp;
520         struct list_head *rn_list;
521         struct lnet_peer_ni *lpni;
522         struct lnet_route *route;
523         struct list_head zombies;
524         struct lnet_peer *lp;
525         int i = 0;
526
527         INIT_LIST_HEAD(&rnet_zombies);
528         INIT_LIST_HEAD(&zombies);
529
530         CDEBUG(D_NET, "Del route: net %s : gw %s\n",
531                libcfs_net2str(net), libcfs_nid2str(gw_nid));
532
533         /* NB Caller may specify either all routes via the given gateway
534          * or a specific route entry actual NIDs) */
535
536         lnet_net_lock(LNET_LOCK_EX);
537
538         lpni = lnet_find_peer_ni_locked(gw_nid);
539         if (lpni) {
540                 lp = lpni->lpni_peer_net->lpn_peer;
541                 LASSERT(lp);
542                 gw_nid = lp->lp_primary_nid;
543                 lnet_peer_ni_decref_locked(lpni);
544         }
545
546         if (net != LNET_NIDNET(LNET_NID_ANY)) {
547                 rnet = lnet_find_rnet_locked(net);
548                 if (!rnet) {
549                         lnet_net_unlock(LNET_LOCK_EX);
550                         return -ENOENT;
551                 }
552                 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
553                                          &zombies);
554                 if (list_empty(&rnet->lrn_routes))
555                         list_move(&rnet->lrn_list, &rnet_zombies);
556                 goto delete_zombies;
557         }
558
559         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
560                 rn_list = &the_lnet.ln_remote_nets_hash[i];
561
562                 list_for_each_entry_safe(rnet, tmp, rn_list, lrn_list) {
563                         lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
564                                                  &zombies);
565                         if (list_empty(&rnet->lrn_routes))
566                                 list_move(&rnet->lrn_list, &rnet_zombies);
567                 }
568         }
569
570 delete_zombies:
571         lnet_net_unlock(LNET_LOCK_EX);
572
573         while (!list_empty(&zombies)) {
574                 route = list_first_entry(&zombies, struct lnet_route, lr_list);
575                 list_del(&route->lr_list);
576                 LIBCFS_FREE(route, sizeof(*route));
577         }
578
579         while (!list_empty(&rnet_zombies)) {
580                 rnet = list_first_entry(&rnet_zombies, struct lnet_remotenet,
581                                         lrn_list);
582                 list_del(&rnet->lrn_list);
583                 LIBCFS_FREE(rnet, sizeof(*rnet));
584         }
585
586         return 0;
587 }
588
589 void
590 lnet_destroy_routes (void)
591 {
592         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
593 }
594
595 int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
596 {
597         struct lnet_rtrbufpool *rbp;
598         int i, rc = -ENOENT, j;
599
600         if (the_lnet.ln_rtrpools == NULL)
601                 return rc;
602
603
604         cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
605                 if (i != cpt)
606                         continue;
607
608                 lnet_net_lock(i);
609                 for (j = 0; j < LNET_NRBPOOLS; j++) {
610                         pool_cfg->pl_pools[j].pl_npages = rbp[j].rbp_npages;
611                         pool_cfg->pl_pools[j].pl_nbuffers = rbp[j].rbp_nbuffers;
612                         pool_cfg->pl_pools[j].pl_credits = rbp[j].rbp_credits;
613                         pool_cfg->pl_pools[j].pl_mincredits = rbp[j].rbp_mincredits;
614                 }
615                 lnet_net_unlock(i);
616                 rc = 0;
617                 break;
618         }
619
620         lnet_net_lock(LNET_LOCK_EX);
621         pool_cfg->pl_routing = the_lnet.ln_routing;
622         lnet_net_unlock(LNET_LOCK_EX);
623
624         return rc;
625 }
626
627 int
628 lnet_get_route(int idx, __u32 *net, __u32 *hops,
629                lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
630 {
631         struct list_head *e1;
632         struct list_head *e2;
633         struct lnet_remotenet *rnet;
634         struct lnet_route        *route;
635         int               cpt;
636         int               i;
637         struct list_head *rn_list;
638
639         cpt = lnet_net_lock_current();
640
641         for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
642                 rn_list = &the_lnet.ln_remote_nets_hash[i];
643                 list_for_each(e1, rn_list) {
644                         rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
645
646                         list_for_each(e2, &rnet->lrn_routes) {
647                                 route = list_entry(e2, struct lnet_route,
648                                                    lr_list);
649
650                                 if (idx-- == 0) {
651                                         *net      = rnet->lrn_net;
652                                         *hops     = route->lr_hops;
653                                         *priority = route->lr_priority;
654                                         *gateway  = route->lr_gateway->lp_primary_nid;
655                                         *alive    = lnet_is_route_alive(route);
656                                         lnet_net_unlock(cpt);
657                                         return 0;
658                                 }
659                         }
660                 }
661         }
662
663         lnet_net_unlock(cpt);
664         return -ENOENT;
665 }
666
667 void
668 lnet_swap_pinginfo(struct lnet_ping_buffer *pbuf)
669 {
670         struct lnet_ni_status *stat;
671         int nnis;
672         int i;
673
674         __swab32s(&pbuf->pb_info.pi_magic);
675         __swab32s(&pbuf->pb_info.pi_features);
676         __swab32s(&pbuf->pb_info.pi_pid);
677         __swab32s(&pbuf->pb_info.pi_nnis);
678         nnis = pbuf->pb_info.pi_nnis;
679         if (nnis > pbuf->pb_nnis)
680                 nnis = pbuf->pb_nnis;
681         for (i = 0; i < nnis; i++) {
682                 stat = &pbuf->pb_info.pi_ni[i];
683                 __swab64s(&stat->ns_nid);
684                 __swab32s(&stat->ns_status);
685         }
686         return;
687 }
688
689 /**
690  * TODO: re-implement
691  */
692 static void
693 lnet_parse_rc_info(struct lnet_rc_data *rcd)
694 {
695         rcd = rcd;
696 }
697
698 static void
699 lnet_router_checker_event(struct lnet_event *event)
700 {
701         struct lnet_rc_data *rcd = event->md.user_ptr;
702         struct lnet_peer_ni *lp;
703
704         LASSERT(rcd != NULL);
705
706         if (event->unlinked) {
707                 LNetInvalidateMDHandle(&rcd->rcd_mdh);
708                 return;
709         }
710
711         LASSERT(event->type == LNET_EVENT_SEND ||
712                 event->type == LNET_EVENT_REPLY);
713
714         lp = rcd->rcd_gateway;
715         LASSERT(lp != NULL);
716
717          /* NB: it's called with holding lnet_res_lock, we have a few
718           * places need to hold both locks at the same time, please take
719           * care of lock ordering */
720         lnet_net_lock(lp->lpni_cpt);
721         if (!lnet_isrouter(lp) || lp->lpni_rcd != rcd) {
722                 /* ignore if no longer a router or rcd is replaced */
723                 goto out;
724         }
725
726         if (event->type == LNET_EVENT_SEND) {
727                 if (event->status == 0)
728                         goto out;
729         }
730
731         /* LNET_EVENT_REPLY */
732         /* A successful REPLY means the router is up.  If _any_ comms
733          * to the router fail I assume it's down (this will happen if
734          * we ping alive routers to try to detect router death before
735          * apps get burned). */
736
737         lnet_notify_locked(lp, 1, !event->status, ktime_get_seconds());
738         /* The router checker will wake up very shortly and do the
739          * actual notification.
740          * XXX If 'lp' stops being a router before then, it will still
741          * have the notification pending!!! */
742
743         if (avoid_asym_router_failure && event->status == 0)
744                 lnet_parse_rc_info(rcd);
745
746  out:
747         lnet_net_unlock(lp->lpni_cpt);
748 }
749
750 static void
751 lnet_wait_known_routerstate(void)
752 {
753         struct lnet_peer *rtr;
754         struct list_head *entry;
755         int all_known;
756
757         LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
758
759         for (;;) {
760                 int cpt = lnet_net_lock_current();
761
762                 all_known = 1;
763                 list_for_each(entry, &the_lnet.ln_routers) {
764                         rtr = list_entry(entry, struct lnet_peer,
765                                          lp_rtr_list);
766
767                         spin_lock(&rtr->lp_lock);
768
769                         if ((rtr->lp_state & LNET_PEER_DISCOVERED) == 0) {
770                                 all_known = 0;
771                                 spin_unlock(&rtr->lp_lock);
772                                 break;
773                         }
774                         spin_unlock(&rtr->lp_lock);
775                 }
776
777                 lnet_net_unlock(cpt);
778
779                 if (all_known)
780                         return;
781
782                 set_current_state(TASK_UNINTERRUPTIBLE);
783                 schedule_timeout(cfs_time_seconds(1));
784         }
785 }
786
787 /* TODO: reimplement */
788 void
789 lnet_router_ni_update_locked(struct lnet_peer_ni *gw, __u32 net)
790 {
791         struct lnet_route *rte;
792         struct lnet_peer *lp;
793
794         if ((gw->lpni_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0)
795                 lp = gw->lpni_peer_net->lpn_peer;
796         else
797                 return;
798
799         list_for_each_entry(rte, &lp->lp_routes, lr_gwlist) {
800                 if (rte->lr_net == net) {
801                         rte->lr_downis = 0;
802                         break;
803                 }
804         }
805 }
806
807 static void
808 lnet_update_ni_status_locked(void)
809 {
810         struct lnet_ni *ni = NULL;
811         time64_t now;
812         time64_t timeout;
813
814         LASSERT(the_lnet.ln_routing);
815
816         timeout = router_ping_timeout +
817                   MAX(live_router_check_interval, dead_router_check_interval);
818
819         now = ktime_get_real_seconds();
820         while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
821                 if (ni->ni_net->net_lnd->lnd_type == LOLND)
822                         continue;
823
824                 if (now < ni->ni_last_alive + timeout)
825                         continue;
826
827                 lnet_ni_lock(ni);
828                 /* re-check with lock */
829                 if (now < ni->ni_last_alive + timeout) {
830                         lnet_ni_unlock(ni);
831                         continue;
832                 }
833
834                 LASSERT(ni->ni_status != NULL);
835
836                 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
837                         CDEBUG(D_NET, "NI(%s:%lld) status changed to down\n",
838                                libcfs_nid2str(ni->ni_nid), timeout);
839                         /* NB: so far, this is the only place to set
840                          * NI status to "down" */
841                         ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
842                 }
843                 lnet_ni_unlock(ni);
844         }
845 }
846
847 int lnet_router_pre_mt_start(void)
848 {
849         int rc;
850
851         if (check_routers_before_use &&
852             dead_router_check_interval <= 0) {
853                 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
854                                    " set if 'check_routers_before_use' is set"
855                                    "\n");
856                 return -EINVAL;
857         }
858
859         rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
860         if (rc != 0) {
861                 CERROR("Can't allocate EQ(0): %d\n", rc);
862                 return -ENOMEM;
863         }
864
865         return 0;
866 }
867
868 void lnet_router_post_mt_start(void)
869 {
870         if (check_routers_before_use) {
871                 /* Note that a helpful side-effect of pinging all known routers
872                  * at startup is that it makes them drop stale connections they
873                  * may have to a previous instance of me. */
874                 lnet_wait_known_routerstate();
875         }
876 }
877
878 void
879 lnet_router_cleanup(void)
880 {
881         int rc;
882
883         rc = LNetEQFree(the_lnet.ln_rc_eqh);
884         LASSERT(rc == 0);
885         return;
886 }
887
888 void
889 lnet_prune_rc_data(int wait_unlink)
890 {
891         wait_unlink = wait_unlink;
892 }
893
894 /*
895  * This function is called from the monitor thread to check if there are
896  * any active routers that need to be checked.
897  */
898 inline bool
899 lnet_router_checker_active(void)
900 {
901         if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
902                 return true;
903
904         /* Router Checker thread needs to run when routing is enabled in
905          * order to call lnet_update_ni_status_locked() */
906         if (the_lnet.ln_routing)
907                 return true;
908
909         /* if there are routers that need to be cleaned up then do so */
910         if (!list_empty(&the_lnet.ln_rcd_deathrow) ||
911             !list_empty(&the_lnet.ln_rcd_zombie))
912                 return true;
913
914         return !list_empty(&the_lnet.ln_routers) &&
915                 (live_router_check_interval > 0 ||
916                  dead_router_check_interval > 0);
917 }
918
919 void
920 lnet_check_routers(void)
921 {
922         struct lnet_peer *rtr;
923         struct list_head *entry;
924         __u64   version;
925         int     cpt;
926
927         cpt = lnet_net_lock_current();
928 rescan:
929         version = the_lnet.ln_routers_version;
930
931         list_for_each(entry, &the_lnet.ln_routers) {
932                 rtr = list_entry(entry, struct lnet_peer,
933                                  lp_rtr_list);
934
935                 /* TODO use discovery to determine if router is alive */
936
937                 /* NB dropped lock */
938                 if (version != the_lnet.ln_routers_version) {
939                         /* the routers list has changed */
940                         goto rescan;
941                 }
942         }
943
944         if (the_lnet.ln_routing)
945                 lnet_update_ni_status_locked();
946
947         lnet_net_unlock(cpt);
948
949         lnet_prune_rc_data(0); /* don't wait for UNLINK */
950 }
951
952 void
953 lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
954 {
955         int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
956
957         while (--npages >= 0)
958                 __free_page(rb->rb_kiov[npages].kiov_page);
959
960         LIBCFS_FREE(rb, sz);
961 }
962
963 static struct lnet_rtrbuf *
964 lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
965 {
966         int            npages = rbp->rbp_npages;
967         int            sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
968         struct page   *page;
969         struct lnet_rtrbuf *rb;
970         int            i;
971
972         LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
973         if (rb == NULL)
974                 return NULL;
975
976         rb->rb_pool = rbp;
977
978         for (i = 0; i < npages; i++) {
979                 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
980                                           GFP_KERNEL | __GFP_ZERO);
981                 if (page == NULL) {
982                         while (--i >= 0)
983                                 __free_page(rb->rb_kiov[i].kiov_page);
984
985                         LIBCFS_FREE(rb, sz);
986                         return NULL;
987                 }
988
989                 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
990                 rb->rb_kiov[i].kiov_offset = 0;
991                 rb->rb_kiov[i].kiov_page = page;
992         }
993
994         return rb;
995 }
996
997 static void
998 lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
999 {
1000         int npages = rbp->rbp_npages;
1001         struct lnet_rtrbuf *rb;
1002         struct list_head tmp;
1003
1004         if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1005                 return;
1006
1007         INIT_LIST_HEAD(&tmp);
1008
1009         lnet_net_lock(cpt);
1010         list_splice_init(&rbp->rbp_msgs, &tmp);
1011         lnet_drop_routed_msgs_locked(&tmp, cpt);
1012         list_splice_init(&rbp->rbp_bufs, &tmp);
1013         rbp->rbp_req_nbuffers = 0;
1014         rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1015         rbp->rbp_mincredits = 0;
1016         lnet_net_unlock(cpt);
1017
1018         /* Free buffers on the free list. */
1019         while (!list_empty(&tmp)) {
1020                 rb = list_entry(tmp.next, struct lnet_rtrbuf, rb_list);
1021                 list_del(&rb->rb_list);
1022                 lnet_destroy_rtrbuf(rb, npages);
1023         }
1024 }
1025
1026 static int
1027 lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
1028 {
1029         struct list_head rb_list;
1030         struct lnet_rtrbuf *rb;
1031         int             num_rb;
1032         int             num_buffers = 0;
1033         int             old_req_nbufs;
1034         int             npages = rbp->rbp_npages;
1035
1036         lnet_net_lock(cpt);
1037         /* If we are called for less buffers than already in the pool, we
1038          * just lower the req_nbuffers number and excess buffers will be
1039          * thrown away as they are returned to the free list.  Credits
1040          * then get adjusted as well.
1041          * If we already have enough buffers allocated to serve the
1042          * increase requested, then we can treat that the same way as we
1043          * do the decrease. */
1044         num_rb = nbufs - rbp->rbp_nbuffers;
1045         if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1046                 rbp->rbp_req_nbuffers = nbufs;
1047                 lnet_net_unlock(cpt);
1048                 return 0;
1049         }
1050         /* store the older value of rbp_req_nbuffers and then set it to
1051          * the new request to prevent lnet_return_rx_credits_locked() from
1052          * freeing buffers that we need to keep around */
1053         old_req_nbufs = rbp->rbp_req_nbuffers;
1054         rbp->rbp_req_nbuffers = nbufs;
1055         lnet_net_unlock(cpt);
1056
1057         INIT_LIST_HEAD(&rb_list);
1058
1059         /* allocate the buffers on a local list first.  If all buffers are
1060          * allocated successfully then join this list to the rbp buffer
1061          * list.  If not then free all allocated buffers. */
1062         while (num_rb-- > 0) {
1063                 rb = lnet_new_rtrbuf(rbp, cpt);
1064                 if (rb == NULL) {
1065                         CERROR("Failed to allocate %d route bufs of %d pages\n",
1066                                nbufs, npages);
1067
1068                         lnet_net_lock(cpt);
1069                         rbp->rbp_req_nbuffers = old_req_nbufs;
1070                         lnet_net_unlock(cpt);
1071
1072                         goto failed;
1073                 }
1074
1075                 list_add(&rb->rb_list, &rb_list);
1076                 num_buffers++;
1077         }
1078
1079         lnet_net_lock(cpt);
1080
1081         list_splice_tail(&rb_list, &rbp->rbp_bufs);
1082         rbp->rbp_nbuffers += num_buffers;
1083         rbp->rbp_credits += num_buffers;
1084         rbp->rbp_mincredits = rbp->rbp_credits;
1085         /* We need to schedule blocked msg using the newly
1086          * added buffers. */
1087         while (!list_empty(&rbp->rbp_bufs) &&
1088                !list_empty(&rbp->rbp_msgs))
1089                 lnet_schedule_blocked_locked(rbp);
1090
1091         lnet_net_unlock(cpt);
1092
1093         return 0;
1094
1095 failed:
1096         while (!list_empty(&rb_list)) {
1097                 rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
1098                 list_del(&rb->rb_list);
1099                 lnet_destroy_rtrbuf(rb, npages);
1100         }
1101
1102         return -ENOMEM;
1103 }
1104
1105 static void
1106 lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
1107 {
1108         INIT_LIST_HEAD(&rbp->rbp_msgs);
1109         INIT_LIST_HEAD(&rbp->rbp_bufs);
1110
1111         rbp->rbp_npages = npages;
1112         rbp->rbp_credits = 0;
1113         rbp->rbp_mincredits = 0;
1114 }
1115
1116 void
1117 lnet_rtrpools_free(int keep_pools)
1118 {
1119         struct lnet_rtrbufpool *rtrp;
1120         int               i;
1121
1122         if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1123                 return;
1124
1125         cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1126                 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1127                 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1128                 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1129         }
1130
1131         if (!keep_pools) {
1132                 cfs_percpt_free(the_lnet.ln_rtrpools);
1133                 the_lnet.ln_rtrpools = NULL;
1134         }
1135 }
1136
1137 static int
1138 lnet_nrb_tiny_calculate(void)
1139 {
1140         int     nrbs = LNET_NRB_TINY;
1141
1142         if (tiny_router_buffers < 0) {
1143                 LCONSOLE_ERROR_MSG(0x10c,
1144                                    "tiny_router_buffers=%d invalid when "
1145                                    "routing enabled\n", tiny_router_buffers);
1146                 return -EINVAL;
1147         }
1148
1149         if (tiny_router_buffers > 0)
1150                 nrbs = tiny_router_buffers;
1151
1152         nrbs /= LNET_CPT_NUMBER;
1153         return max(nrbs, LNET_NRB_TINY_MIN);
1154 }
1155
1156 static int
1157 lnet_nrb_small_calculate(void)
1158 {
1159         int     nrbs = LNET_NRB_SMALL;
1160
1161         if (small_router_buffers < 0) {
1162                 LCONSOLE_ERROR_MSG(0x10c,
1163                                    "small_router_buffers=%d invalid when "
1164                                    "routing enabled\n", small_router_buffers);
1165                 return -EINVAL;
1166         }
1167
1168         if (small_router_buffers > 0)
1169                 nrbs = small_router_buffers;
1170
1171         nrbs /= LNET_CPT_NUMBER;
1172         return max(nrbs, LNET_NRB_SMALL_MIN);
1173 }
1174
1175 static int
1176 lnet_nrb_large_calculate(void)
1177 {
1178         int     nrbs = LNET_NRB_LARGE;
1179
1180         if (large_router_buffers < 0) {
1181                 LCONSOLE_ERROR_MSG(0x10c,
1182                                    "large_router_buffers=%d invalid when "
1183                                    "routing enabled\n", large_router_buffers);
1184                 return -EINVAL;
1185         }
1186
1187         if (large_router_buffers > 0)
1188                 nrbs = large_router_buffers;
1189
1190         nrbs /= LNET_CPT_NUMBER;
1191         return max(nrbs, LNET_NRB_LARGE_MIN);
1192 }
1193
1194 int
1195 lnet_rtrpools_alloc(int im_a_router)
1196 {
1197         struct lnet_rtrbufpool *rtrp;
1198         int     nrb_tiny;
1199         int     nrb_small;
1200         int     nrb_large;
1201         int     rc;
1202         int     i;
1203
1204         if (!strcmp(forwarding, "")) {
1205                 /* not set either way */
1206                 if (!im_a_router)
1207                         return 0;
1208         } else if (!strcmp(forwarding, "disabled")) {
1209                 /* explicitly disabled */
1210                 return 0;
1211         } else if (!strcmp(forwarding, "enabled")) {
1212                 /* explicitly enabled */
1213         } else {
1214                 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1215                                    "'enabled' or 'disabled'\n");
1216                 return -EINVAL;
1217         }
1218
1219         nrb_tiny = lnet_nrb_tiny_calculate();
1220         if (nrb_tiny < 0)
1221                 return -EINVAL;
1222
1223         nrb_small = lnet_nrb_small_calculate();
1224         if (nrb_small < 0)
1225                 return -EINVAL;
1226
1227         nrb_large = lnet_nrb_large_calculate();
1228         if (nrb_large < 0)
1229                 return -EINVAL;
1230
1231         the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1232                                                 LNET_NRBPOOLS *
1233                                                 sizeof(struct lnet_rtrbufpool));
1234         if (the_lnet.ln_rtrpools == NULL) {
1235                 LCONSOLE_ERROR_MSG(0x10c,
1236                                    "Failed to initialize router buffe pool\n");
1237                 return -ENOMEM;
1238         }
1239
1240         cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1241                 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1242                 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1243                                               nrb_tiny, i);
1244                 if (rc != 0)
1245                         goto failed;
1246
1247                 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1248                                   LNET_NRB_SMALL_PAGES);
1249                 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1250                                               nrb_small, i);
1251                 if (rc != 0)
1252                         goto failed;
1253
1254                 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1255                                   LNET_NRB_LARGE_PAGES);
1256                 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1257                                               nrb_large, i);
1258                 if (rc != 0)
1259                         goto failed;
1260         }
1261
1262         lnet_net_lock(LNET_LOCK_EX);
1263         the_lnet.ln_routing = 1;
1264         lnet_net_unlock(LNET_LOCK_EX);
1265         wake_up(&the_lnet.ln_mt_waitq);
1266         return 0;
1267
1268  failed:
1269         lnet_rtrpools_free(0);
1270         return rc;
1271 }
1272
1273 static int
1274 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1275 {
1276         int nrb = 0;
1277         int rc = 0;
1278         int i;
1279         struct lnet_rtrbufpool *rtrp;
1280
1281         /* If the provided values for each buffer pool are different than the
1282          * configured values, we need to take action. */
1283         if (tiny >= 0) {
1284                 tiny_router_buffers = tiny;
1285                 nrb = lnet_nrb_tiny_calculate();
1286                 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1287                         rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1288                                                       nrb, i);
1289                         if (rc != 0)
1290                                 return rc;
1291                 }
1292         }
1293         if (small >= 0) {
1294                 small_router_buffers = small;
1295                 nrb = lnet_nrb_small_calculate();
1296                 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1297                         rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1298                                                       nrb, i);
1299                         if (rc != 0)
1300                                 return rc;
1301                 }
1302         }
1303         if (large >= 0) {
1304                 large_router_buffers = large;
1305                 nrb = lnet_nrb_large_calculate();
1306                 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1307                         rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1308                                                       nrb, i);
1309                         if (rc != 0)
1310                                 return rc;
1311                 }
1312         }
1313
1314         return 0;
1315 }
1316
1317 int
1318 lnet_rtrpools_adjust(int tiny, int small, int large)
1319 {
1320         /* this function doesn't revert the changes if adding new buffers
1321          * failed.  It's up to the user space caller to revert the
1322          * changes. */
1323
1324         if (!the_lnet.ln_routing)
1325                 return 0;
1326
1327         return lnet_rtrpools_adjust_helper(tiny, small, large);
1328 }
1329
1330 int
1331 lnet_rtrpools_enable(void)
1332 {
1333         int rc = 0;
1334
1335         if (the_lnet.ln_routing)
1336                 return 0;
1337
1338         if (the_lnet.ln_rtrpools == NULL)
1339                 /* If routing is turned off, and we have never
1340                  * initialized the pools before, just call the
1341                  * standard buffer pool allocation routine as
1342                  * if we are just configuring this for the first
1343                  * time. */
1344                 rc = lnet_rtrpools_alloc(1);
1345         else
1346                 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1347         if (rc != 0)
1348                 return rc;
1349
1350         lnet_net_lock(LNET_LOCK_EX);
1351         the_lnet.ln_routing = 1;
1352
1353         the_lnet.ln_ping_target->pb_info.pi_features &=
1354                 ~LNET_PING_FEAT_RTE_DISABLED;
1355         lnet_net_unlock(LNET_LOCK_EX);
1356
1357         return rc;
1358 }
1359
1360 void
1361 lnet_rtrpools_disable(void)
1362 {
1363         if (!the_lnet.ln_routing)
1364                 return;
1365
1366         lnet_net_lock(LNET_LOCK_EX);
1367         the_lnet.ln_routing = 0;
1368         the_lnet.ln_ping_target->pb_info.pi_features |=
1369                 LNET_PING_FEAT_RTE_DISABLED;
1370
1371         tiny_router_buffers = 0;
1372         small_router_buffers = 0;
1373         large_router_buffers = 0;
1374         lnet_net_unlock(LNET_LOCK_EX);
1375         lnet_rtrpools_free(1);
1376 }
1377
1378 int
1379 lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, int alive, time64_t when)
1380 {
1381         struct lnet_peer_ni *lp = NULL;
1382         time64_t now = ktime_get_seconds();
1383         int cpt = lnet_cpt_of_nid(nid, ni);
1384
1385         LASSERT (!in_interrupt ());
1386
1387         CDEBUG (D_NET, "%s notifying %s: %s\n",
1388                 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1389                 libcfs_nid2str(nid),
1390                 alive ? "up" : "down");
1391
1392         if (ni != NULL &&
1393             LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1394                 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1395                       libcfs_nid2str(nid), alive ? "birth" : "death",
1396                       libcfs_nid2str(ni->ni_nid));
1397                 return -EINVAL;
1398         }
1399
1400         /* can't do predictions... */
1401         if (when > now) {
1402                 CWARN("Ignoring prediction from %s of %s %s "
1403                       "%lld seconds in the future\n",
1404                       (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1405                       libcfs_nid2str(nid), alive ? "up" : "down", when - now);
1406                 return -EINVAL;
1407         }
1408
1409         if (ni != NULL && !alive &&             /* LND telling me she's down */
1410             !auto_down) {                       /* auto-down disabled */
1411                 CDEBUG(D_NET, "Auto-down disabled\n");
1412                 return 0;
1413         }
1414
1415         lnet_net_lock(cpt);
1416
1417         if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1418                 lnet_net_unlock(cpt);
1419                 return -ESHUTDOWN;
1420         }
1421
1422         lp = lnet_find_peer_ni_locked(nid);
1423         if (lp == NULL) {
1424                 /* nid not found */
1425                 lnet_net_unlock(cpt);
1426                 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1427                 return 0;
1428         }
1429
1430         /*
1431          * It is possible for this function to be called for the same peer
1432          * but with different NIs. We want to synchronize the notification
1433          * between the different calls. So we will use the lpni_cpt to
1434          * grab the net lock.
1435          */
1436         if (lp->lpni_cpt != cpt) {
1437                 lnet_net_unlock(cpt);
1438                 cpt = lp->lpni_cpt;
1439                 lnet_net_lock(cpt);
1440         }
1441
1442         /* We can't fully trust LND on reporting exact peer last_alive
1443          * if he notifies us about dead peer. For example ksocklnd can
1444          * call us with when == _time_when_the_node_was_booted_ if
1445          * no connections were successfully established */
1446         if (ni != NULL && !alive && when < lp->lpni_last_alive)
1447                 when = lp->lpni_last_alive;
1448
1449         lnet_notify_locked(lp, ni == NULL, alive, when);
1450
1451         if (ni != NULL)
1452                 lnet_ni_notify_locked(ni, lp);
1453
1454         lnet_peer_ni_decref_locked(lp);
1455
1456         lnet_net_unlock(cpt);
1457         return 0;
1458 }
1459 EXPORT_SYMBOL(lnet_notify);