2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * This file is part of Lustre, https://wiki.whamcloud.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
25 #include <linux/random.h>
26 #include <lnet/lib-lnet.h>
28 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
29 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
30 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
31 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
32 #define LNET_NRB_SMALL_PAGES 1
33 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
34 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
35 #define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
38 extern unsigned int lnet_current_net_count;
40 static char *forwarding = "";
41 module_param(forwarding, charp, 0444);
42 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
44 static int tiny_router_buffers;
45 module_param(tiny_router_buffers, int, 0444);
46 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
47 static int small_router_buffers;
48 module_param(small_router_buffers, int, 0444);
49 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
50 static int large_router_buffers;
51 module_param(large_router_buffers, int, 0444);
52 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
53 static int peer_buffer_credits;
54 module_param(peer_buffer_credits, int, 0444);
55 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
57 static int auto_down = 1;
58 module_param(auto_down, int, 0444);
59 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
62 lnet_peer_buffer_credits(struct lnet_net *net)
64 /* NI option overrides LNet default */
65 if (net->net_tunables.lct_peer_rtr_credits > 0)
66 return net->net_tunables.lct_peer_rtr_credits;
67 if (peer_buffer_credits > 0)
68 return peer_buffer_credits;
70 /* As an approximation, allow this peer the same number of router
71 * buffers as it is allowed outstanding sends */
72 return net->net_tunables.lct_peer_tx_credits;
75 static int check_routers_before_use;
76 module_param(check_routers_before_use, int, 0444);
77 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
79 int avoid_asym_router_failure = 1;
80 module_param(avoid_asym_router_failure, int, 0644);
81 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
83 int dead_router_check_interval = INT_MIN;
84 module_param(dead_router_check_interval, int, 0444);
85 MODULE_PARM_DESC(dead_router_check_interval, "(DEPRECATED - Use alive_router_check_interval)");
87 int live_router_check_interval = INT_MIN;
88 module_param(live_router_check_interval, int, 0444);
89 MODULE_PARM_DESC(live_router_check_interval, "(DEPRECATED - Use alive_router_check_interval)");
91 int alive_router_check_interval = 60;
92 module_param(alive_router_check_interval, int, 0644);
93 MODULE_PARM_DESC(alive_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
95 static int router_ping_timeout = 50;
96 module_param(router_ping_timeout, int, 0644);
97 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
100 * A value between 0 and 100. 0 meaning that even if router's interfaces
101 * have the worse health still consider the gateway usable.
102 * 100 means that at least one interface on the route's remote net is 100%
103 * healthy to consider the route alive.
104 * The default is set to 100 to ensure we maintain the original behavior.
106 unsigned int router_sensitivity_percentage = 100;
107 static int rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
108 static struct kernel_param_ops param_ops_rtr_sensitivity = {
109 .set = rtr_sensitivity_set,
110 .get = param_get_int,
112 #define param_check_rtr_sensitivity(name, p) \
113 __param_check(name, p, int)
114 #ifdef HAVE_KERNEL_PARAM_OPS
115 module_param(router_sensitivity_percentage, rtr_sensitivity, S_IRUGO|S_IWUSR);
117 module_param_call(router_sensitivity_percentage, rtr_sensitivity_set, param_get_int,
118 &router_sensitivity_percentage, S_IRUGO|S_IWUSR);
120 MODULE_PARM_DESC(router_sensitivity_percentage,
121 "How healthy a gateway should be to be used in percent");
124 rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
127 unsigned *sen = (unsigned *)kp->arg;
130 rc = kstrtoul(val, 0, &value);
132 CERROR("Invalid module parameter value for 'router_sensitivity_percentage'\n");
136 if (value < 0 || value > 100) {
137 CERROR("Invalid value: %lu for 'router_sensitivity_percentage'\n", value);
142 * The purpose of locking the api_mutex here is to ensure that
143 * the correct value ends up stored properly.
145 mutex_lock(&the_lnet.ln_api_mutex);
149 mutex_unlock(&the_lnet.ln_api_mutex);
155 lnet_rtr_transfer_to_peer(struct lnet_peer *src, struct lnet_peer *target)
157 struct lnet_route *route;
159 lnet_net_lock(LNET_LOCK_EX);
160 target->lp_rtr_refcount += src->lp_rtr_refcount;
161 /* move the list of queued messages to the new peer */
162 list_splice_init(&src->lp_rtrq, &target->lp_rtrq);
163 /* move all the routes that reference the peer */
164 list_splice_init(&src->lp_routes, &target->lp_routes);
165 /* update all the routes to point to the new peer */
166 list_for_each_entry(route, &target->lp_routes, lr_gwlist)
167 route->lr_gateway = target;
168 /* remove the old peer from the ln_routers list */
169 list_del_init(&src->lp_rtr_list);
170 /* add the new peer to the ln_routers list */
171 if (list_empty(&target->lp_rtr_list)) {
172 lnet_peer_addref_locked(target);
173 list_add_tail(&target->lp_rtr_list, &the_lnet.ln_routers);
175 /* reset the ref count on the old peer and decrement its ref count */
176 src->lp_rtr_refcount = 0;
177 lnet_peer_decref_locked(src);
178 /* update the router version */
179 the_lnet.ln_routers_version++;
180 lnet_net_unlock(LNET_LOCK_EX);
184 lnet_peers_start_down(void)
186 return check_routers_before_use;
190 * A net is alive if at least one gateway NI on the network is alive.
193 lnet_is_gateway_net_alive(struct lnet_peer_net *lpn)
195 struct lnet_peer_ni *lpni;
197 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
198 if (lnet_is_peer_ni_alive(lpni))
206 * a gateway is alive only if all its nets are alive
207 * called with cpt lock held
209 bool lnet_is_gateway_alive(struct lnet_peer *gw)
211 struct lnet_peer_net *lpn;
213 list_for_each_entry(lpn, &gw->lp_peer_nets, lpn_peer_nets) {
214 if (!lnet_is_gateway_net_alive(lpn))
222 * lnet_is_route_alive() needs to be called with cpt lock held
223 * A route is alive if the gateway can route between the local network and
224 * the remote network of the route.
225 * This means at least one NI is alive on each of the local and remote
226 * networks of the gateway.
228 bool lnet_is_route_alive(struct lnet_route *route)
230 struct lnet_peer *gw = route->lr_gateway;
231 struct lnet_peer_net *llpn;
232 struct lnet_peer_net *rlpn;
236 * check the gateway's interfaces on the route rnet to make sure
237 * that the gateway is viable.
239 llpn = lnet_peer_get_net_locked(gw, route->lr_lnet);
243 route_alive = lnet_is_gateway_net_alive(llpn);
245 if (avoid_asym_router_failure) {
246 rlpn = lnet_peer_get_net_locked(gw, route->lr_net);
249 route_alive = route_alive &&
250 lnet_is_gateway_net_alive(rlpn);
256 spin_lock(&gw->lp_lock);
257 if (!(gw->lp_state & LNET_PEER_ROUTER_ENABLED)) {
258 if (gw->lp_rtr_refcount > 0)
259 CERROR("peer %s is being used as a gateway but routing feature is not turned on\n",
260 libcfs_nid2str(gw->lp_primary_nid));
263 spin_unlock(&gw->lp_lock);
269 lnet_consolidate_routes_locked(struct lnet_peer *orig_lp,
270 struct lnet_peer *new_lp)
272 struct lnet_peer_ni *lpni;
273 struct lnet_route *route;
276 * Although a route is correlated with a peer, but when it's added
277 * a specific NID is used. That NID refers to a peer_ni within
278 * a peer. There could be other peer_nis on the same net, which
279 * can be used to send to that gateway. However when we are
280 * consolidating gateways because of discovery, the nid used to
281 * add the route might've moved between gateway peers. In this
282 * case we want to move the route to the new gateway as well. The
283 * intent here is not to confuse the user who added the route.
285 list_for_each_entry(route, &orig_lp->lp_routes, lr_gwlist) {
286 lpni = lnet_peer_get_ni_locked(orig_lp, route->lr_nid);
288 lnet_net_lock(LNET_LOCK_EX);
289 list_move(&route->lr_gwlist, &new_lp->lp_routes);
290 lnet_net_unlock(LNET_LOCK_EX);
297 lnet_router_discovery_complete(struct lnet_peer *lp)
299 struct lnet_peer_ni *lpni = NULL;
301 spin_lock(&lp->lp_lock);
302 lp->lp_state &= ~LNET_PEER_RTR_DISCOVERY;
303 spin_unlock(&lp->lp_lock);
306 * Router discovery successful? All peer information would've been
307 * updated already. No need to do any more processing
309 if (!lp->lp_dc_error)
312 * discovery failed? then we need to set the status of each lpni
313 * to DOWN. It will be updated the next time we discover the
314 * router. For router peer NIs not on local networks, we never send
315 * messages directly to them, so their health will always remain
316 * at maximum. We can only tell if they are up or down from the
317 * status returned in the PING response. If we fail to get that
318 * status in our scheduled router discovery, then we'll assume
319 * it's down until we're told otherwise.
321 CDEBUG(D_NET, "%s: Router discovery failed %d\n",
322 libcfs_nid2str(lp->lp_primary_nid), lp->lp_dc_error);
323 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
324 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
328 lnet_rtr_addref_locked(struct lnet_peer *lp)
330 LASSERT(lp->lp_rtr_refcount >= 0);
332 /* lnet_net_lock must be exclusively locked */
333 lp->lp_rtr_refcount++;
334 if (lp->lp_rtr_refcount == 1) {
335 list_add_tail(&lp->lp_rtr_list, &the_lnet.ln_routers);
336 /* addref for the_lnet.ln_routers */
337 lnet_peer_addref_locked(lp);
338 the_lnet.ln_routers_version++;
343 lnet_rtr_decref_locked(struct lnet_peer *lp)
345 LASSERT(atomic_read(&lp->lp_refcount) > 0);
346 LASSERT(lp->lp_rtr_refcount > 0);
348 /* lnet_net_lock must be exclusively locked */
349 lp->lp_rtr_refcount--;
350 if (lp->lp_rtr_refcount == 0) {
351 LASSERT(list_empty(&lp->lp_routes));
353 list_del(&lp->lp_rtr_list);
354 /* decref for the_lnet.ln_routers */
355 lnet_peer_decref_locked(lp);
356 the_lnet.ln_routers_version++;
360 struct lnet_remotenet *
361 lnet_find_rnet_locked(__u32 net)
363 struct lnet_remotenet *rnet;
364 struct list_head *tmp;
365 struct list_head *rn_list;
367 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
369 rn_list = lnet_net2rnethash(net);
370 list_for_each(tmp, rn_list) {
371 rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
373 if (rnet->lrn_net == net)
379 static void lnet_shuffle_seed(void)
382 struct lnet_ni *ni = NULL;
387 /* Nodes with small feet have little entropy
388 * the NID for this node gives the most entropy in the low bits */
389 while ((ni = lnet_get_next_ni_locked(NULL, ni)))
390 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
396 /* NB expects LNET_LOCK held */
398 lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
400 struct lnet_peer_net *lpn;
401 unsigned int offset = 0;
402 unsigned int len = 0;
407 list_for_each(e, &rnet->lrn_routes)
411 * Randomly adding routes to the list is done to ensure that when
412 * different nodes are using the same list of routers, they end up
413 * preferring different routers.
415 offset = prandom_u32_max(len + 1);
416 list_for_each(e, &rnet->lrn_routes) {
421 list_add(&route->lr_list, e);
423 * force a router check on the gateway to make sure the route is
426 list_for_each_entry(lpn, &route->lr_gateway->lp_peer_nets,
428 lpn->lpn_rtrcheck_timestamp = 0;
431 the_lnet.ln_remote_nets_version++;
433 /* add the route on the gateway list */
434 list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
436 /* take a router reference count on the gateway */
437 lnet_rtr_addref_locked(route->lr_gateway);
441 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
442 __u32 priority, __u32 sensitivity)
444 struct list_head *route_entry;
445 struct lnet_remotenet *rnet;
446 struct lnet_remotenet *rnet2;
447 struct lnet_route *route;
448 struct lnet_peer_ni *lpni;
449 struct lnet_peer *gw;
453 CDEBUG(D_NET, "Add route: remote net %s hops %d priority %u gw %s\n",
454 libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
456 if (gateway == LNET_NID_ANY ||
457 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
458 net == LNET_NIDNET(LNET_NID_ANY) ||
459 LNET_NETTYP(net) == LOLND ||
460 LNET_NIDNET(gateway) == net ||
461 (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
464 /* it's a local network */
465 if (lnet_islocalnet(net))
468 if (!lnet_islocalnet(LNET_NIDNET(gateway))) {
469 CERROR("Cannot add route with gateway %s. There is no local interface configured on LNet %s\n",
470 libcfs_nid2str(gateway),
471 libcfs_net2str(LNET_NIDNET(gateway)));
472 return -EHOSTUNREACH;
475 /* Assume net, route, all new */
476 LIBCFS_ALLOC(route, sizeof(*route));
477 LIBCFS_ALLOC(rnet, sizeof(*rnet));
478 if (route == NULL || rnet == NULL) {
479 CERROR("Out of memory creating route %s %d %s\n",
480 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
482 LIBCFS_FREE(route, sizeof(*route));
484 LIBCFS_FREE(rnet, sizeof(*rnet));
488 INIT_LIST_HEAD(&rnet->lrn_routes);
490 /* store the local and remote net that the route represents */
491 route->lr_lnet = LNET_NIDNET(gateway);
493 route->lr_nid = gateway;
494 route->lr_priority = priority;
495 route->lr_hops = hops;
497 lnet_net_lock(LNET_LOCK_EX);
500 * lnet_nid2peerni_ex() grabs a ref on the lpni. We will need to
501 * lose that once we're done
503 lpni = lnet_nid2peerni_ex(gateway, LNET_LOCK_EX);
505 lnet_net_unlock(LNET_LOCK_EX);
507 LIBCFS_FREE(route, sizeof(*route));
508 LIBCFS_FREE(rnet, sizeof(*rnet));
511 CERROR("Error %d creating route %s %d %s\n", rc,
512 libcfs_net2str(net), hops,
513 libcfs_nid2str(gateway));
517 LASSERT(lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer);
518 gw = lpni->lpni_peer_net->lpn_peer;
520 route->lr_gateway = gw;
522 rnet2 = lnet_find_rnet_locked(net);
525 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
529 /* Search for a duplicate route (it's a NOOP if it is) */
531 list_for_each(route_entry, &rnet2->lrn_routes) {
532 struct lnet_route *route2;
534 route2 = list_entry(route_entry, struct lnet_route, lr_list);
535 if (route2->lr_gateway == route->lr_gateway) {
540 /* our lookups must be true */
541 LASSERT(route2->lr_gateway->lp_primary_nid != gateway);
545 * It is possible to add multiple routes through the same peer,
546 * but it'll be using a different NID of that peer. When the
547 * gateway is discovered, discovery will consolidate the different
548 * peers into one peer. In this case the discovery code will have
549 * to move the routes from the peer that's being deleted to the
550 * consolidated peer lp_routes list
553 gw->lp_health_sensitivity = sensitivity;
554 lnet_add_route_to_rnet(rnet2, route);
555 if (lnet_peer_discovery_disabled)
556 CWARN("Consider turning discovery on to enable full "
557 "Multi-Rail routing functionality\n");
561 * get rid of the reference on the lpni.
563 lnet_peer_ni_decref_locked(lpni);
564 lnet_net_unlock(LNET_LOCK_EX);
570 LIBCFS_FREE(route, sizeof(*route));
574 LIBCFS_FREE(rnet, sizeof(*rnet));
576 /* kick start the monitor thread to handle the added route */
577 wake_up(&the_lnet.ln_mt_waitq);
583 lnet_del_route_from_rnet(lnet_nid_t gw_nid, struct list_head *route_list,
584 struct list_head *zombies)
586 struct lnet_peer *gateway;
587 struct lnet_route *route;
588 struct lnet_route *tmp;
590 list_for_each_entry_safe(route, tmp, route_list, lr_list) {
591 gateway = route->lr_gateway;
592 if (gw_nid != LNET_NID_ANY &&
593 gw_nid != gateway->lp_primary_nid)
597 * move to zombie to delete outside the lock
598 * Note that this function is called with the
599 * ln_api_mutex held as well as the exclusive net
600 * lock. Adding to the remote net list happens
601 * under the same conditions. Same goes for the
602 * gateway router list
604 list_move(&route->lr_list, zombies);
605 the_lnet.ln_remote_nets_version++;
607 list_del(&route->lr_gwlist);
608 lnet_rtr_decref_locked(gateway);
613 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
615 struct list_head rnet_zombies;
616 struct lnet_remotenet *rnet;
617 struct lnet_remotenet *tmp;
618 struct list_head *rn_list;
619 struct lnet_peer_ni *lpni;
620 struct lnet_route *route;
621 struct list_head zombies;
622 struct lnet_peer *lp;
625 INIT_LIST_HEAD(&rnet_zombies);
626 INIT_LIST_HEAD(&zombies);
628 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
629 libcfs_net2str(net), libcfs_nid2str(gw_nid));
631 /* NB Caller may specify either all routes via the given gateway
632 * or a specific route entry actual NIDs) */
634 lnet_net_lock(LNET_LOCK_EX);
636 lpni = lnet_find_peer_ni_locked(gw_nid);
638 lp = lpni->lpni_peer_net->lpn_peer;
640 gw_nid = lp->lp_primary_nid;
641 lnet_peer_ni_decref_locked(lpni);
644 if (net != LNET_NIDNET(LNET_NID_ANY)) {
645 rnet = lnet_find_rnet_locked(net);
647 lnet_net_unlock(LNET_LOCK_EX);
650 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
652 if (list_empty(&rnet->lrn_routes))
653 list_move(&rnet->lrn_list, &rnet_zombies);
657 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
658 rn_list = &the_lnet.ln_remote_nets_hash[i];
660 list_for_each_entry_safe(rnet, tmp, rn_list, lrn_list) {
661 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
663 if (list_empty(&rnet->lrn_routes))
664 list_move(&rnet->lrn_list, &rnet_zombies);
670 * check if there are any routes remaining on the gateway
671 * If there are no more routes make sure to set the peer's
672 * lp_disc_net_id to 0 (invalid), in case we add more routes in
673 * the future on that gateway, then we start our discovery process
677 if (list_empty(&lp->lp_routes))
678 lp->lp_disc_net_id = 0;
681 lnet_net_unlock(LNET_LOCK_EX);
683 while (!list_empty(&zombies)) {
684 route = list_first_entry(&zombies, struct lnet_route, lr_list);
685 list_del(&route->lr_list);
686 LIBCFS_FREE(route, sizeof(*route));
689 while (!list_empty(&rnet_zombies)) {
690 rnet = list_first_entry(&rnet_zombies, struct lnet_remotenet,
692 list_del(&rnet->lrn_list);
693 LIBCFS_FREE(rnet, sizeof(*rnet));
700 lnet_destroy_routes (void)
702 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
705 int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
707 struct lnet_rtrbufpool *rbp;
708 int i, rc = -ENOENT, j;
710 if (the_lnet.ln_rtrpools == NULL)
714 cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
719 for (j = 0; j < LNET_NRBPOOLS; j++) {
720 pool_cfg->pl_pools[j].pl_npages = rbp[j].rbp_npages;
721 pool_cfg->pl_pools[j].pl_nbuffers = rbp[j].rbp_nbuffers;
722 pool_cfg->pl_pools[j].pl_credits = rbp[j].rbp_credits;
723 pool_cfg->pl_pools[j].pl_mincredits = rbp[j].rbp_mincredits;
730 lnet_net_lock(LNET_LOCK_EX);
731 pool_cfg->pl_routing = the_lnet.ln_routing;
732 lnet_net_unlock(LNET_LOCK_EX);
738 lnet_get_route(int idx, __u32 *net, __u32 *hops,
739 lnet_nid_t *gateway, __u32 *alive, __u32 *priority, __u32 *sensitivity)
741 struct lnet_remotenet *rnet;
742 struct list_head *rn_list;
743 struct lnet_route *route;
744 struct list_head *e1;
745 struct list_head *e2;
749 cpt = lnet_net_lock_current();
751 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
752 rn_list = &the_lnet.ln_remote_nets_hash[i];
753 list_for_each(e1, rn_list) {
754 rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
756 list_for_each(e2, &rnet->lrn_routes) {
757 route = list_entry(e2, struct lnet_route,
761 *net = rnet->lrn_net;
762 *gateway = route->lr_nid;
763 *hops = route->lr_hops;
764 *priority = route->lr_priority;
765 *sensitivity = route->lr_gateway->
766 lp_health_sensitivity;
767 *alive = lnet_is_route_alive(route);
768 lnet_net_unlock(cpt);
775 lnet_net_unlock(cpt);
780 lnet_wait_known_routerstate(void)
782 struct lnet_peer *rtr;
783 struct list_head *entry;
786 LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
789 int cpt = lnet_net_lock_current();
792 list_for_each(entry, &the_lnet.ln_routers) {
793 rtr = list_entry(entry, struct lnet_peer,
796 spin_lock(&rtr->lp_lock);
798 if ((rtr->lp_state & LNET_PEER_DISCOVERED) == 0) {
800 spin_unlock(&rtr->lp_lock);
803 spin_unlock(&rtr->lp_lock);
806 lnet_net_unlock(cpt);
811 set_current_state(TASK_UNINTERRUPTIBLE);
812 schedule_timeout(cfs_time_seconds(1));
817 lnet_net_set_status_locked(struct lnet_net *net, __u32 status)
822 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
825 ni->ni_status->ns_status != status) {
826 ni->ni_status->ns_status = status;
836 lnet_update_ni_status_locked(void)
838 struct lnet_net *net;
843 LASSERT(the_lnet.ln_routing);
845 timeout = router_ping_timeout + alive_router_check_interval;
847 now = ktime_get_real_seconds();
848 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
849 if (net->net_lnd->lnd_type == LOLND)
852 if (now < net->net_last_alive + timeout)
855 spin_lock(&net->net_lock);
856 /* re-check with lock */
857 if (now < net->net_last_alive + timeout) {
858 spin_unlock(&net->net_lock);
861 spin_unlock(&net->net_lock);
864 * if the net didn't receive any traffic for past the
865 * timeout on any of its constituent NIs, then mark all
868 push = lnet_net_set_status_locked(net, LNET_NI_STATUS_DOWN);
874 void lnet_wait_router_start(void)
876 if (check_routers_before_use) {
877 /* Note that a helpful side-effect of pinging all known routers
878 * at startup is that it makes them drop stale connections they
879 * may have to a previous instance of me. */
880 lnet_wait_known_routerstate();
885 * This function is called from the monitor thread to check if there are
886 * any active routers that need to be checked.
889 lnet_router_checker_active(void)
891 /* Router Checker thread needs to run when routing is enabled in
892 * order to call lnet_update_ni_status_locked() */
893 if (the_lnet.ln_routing)
896 return !list_empty(&the_lnet.ln_routers) &&
897 alive_router_check_interval > 0;
901 lnet_check_routers(void)
903 struct lnet_peer_net *first_lpn = NULL;
904 struct lnet_peer_net *lpn;
905 struct lnet_peer_ni *lpni;
906 struct list_head *entry;
907 struct lnet_peer *rtr;
916 cpt = lnet_net_lock_current();
918 version = the_lnet.ln_routers_version;
920 list_for_each(entry, &the_lnet.ln_routers) {
921 rtr = list_entry(entry, struct lnet_peer,
924 now = ktime_get_real_seconds();
927 * only discover the router if we've passed
928 * alive_router_check_interval seconds. Some of the router
929 * interfaces could be down and in that case they would be
930 * undergoing recovery separately from this discovery.
932 /* find next peer net which is also local */
933 net_id = rtr->lp_disc_net_id;
935 lpn = lnet_get_next_peer_net_locked(rtr, net_id);
937 CERROR("gateway %s has no networks\n",
938 libcfs_nid2str(rtr->lp_primary_nid));
941 if (first_lpn == lpn)
945 found_lpn = lnet_islocalnet_locked(lpn->lpn_net_id);
946 net_id = lpn->lpn_net_id;
947 } while (!found_lpn);
949 if (!found_lpn || !lpn) {
950 CERROR("no local network found for gateway %s\n",
951 libcfs_nid2str(rtr->lp_primary_nid));
955 if (now - lpn->lpn_rtrcheck_timestamp <
956 alive_router_check_interval / lnet_current_net_count)
960 * If we're currently discovering the peer then don't
961 * issue another discovery
963 spin_lock(&rtr->lp_lock);
964 if (rtr->lp_state & LNET_PEER_RTR_DISCOVERY) {
965 spin_unlock(&rtr->lp_lock);
968 /* make sure we actively discover the router */
969 rtr->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
970 rtr->lp_state |= LNET_PEER_RTR_DISCOVERY;
971 spin_unlock(&rtr->lp_lock);
973 /* find the peer_ni associated with the primary NID */
974 lpni = lnet_peer_get_ni_locked(rtr, rtr->lp_primary_nid);
976 CDEBUG(D_NET, "Expected to find an lpni for %s, but non found\n",
977 libcfs_nid2str(rtr->lp_primary_nid));
980 lnet_peer_ni_addref_locked(lpni);
982 /* specify the net to use */
983 rtr->lp_disc_net_id = lpn->lpn_net_id;
985 /* discover the router */
986 CDEBUG(D_NET, "discover %s, cpt = %d\n",
987 libcfs_nid2str(lpni->lpni_nid), cpt);
988 rc = lnet_discover_peer_locked(lpni, cpt, false);
990 /* decrement ref count acquired by find_peer_ni_locked() */
991 lnet_peer_ni_decref_locked(lpni);
994 lpn->lpn_rtrcheck_timestamp = now;
996 CERROR("Failed to discover router %s\n",
997 libcfs_nid2str(rtr->lp_primary_nid));
999 /* NB dropped lock */
1000 if (version != the_lnet.ln_routers_version) {
1001 /* the routers list has changed */
1006 if (the_lnet.ln_routing)
1007 push = lnet_update_ni_status_locked();
1009 lnet_net_unlock(cpt);
1011 /* if the status of the ni changed update the peers */
1013 lnet_push_update_to_peers(1);
1017 lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
1019 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
1021 while (--npages >= 0)
1022 __free_page(rb->rb_kiov[npages].kiov_page);
1024 LIBCFS_FREE(rb, sz);
1027 static struct lnet_rtrbuf *
1028 lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
1030 int npages = rbp->rbp_npages;
1031 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
1033 struct lnet_rtrbuf *rb;
1036 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1042 for (i = 0; i < npages; i++) {
1043 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1044 GFP_KERNEL | __GFP_ZERO);
1047 __free_page(rb->rb_kiov[i].kiov_page);
1049 LIBCFS_FREE(rb, sz);
1053 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1054 rb->rb_kiov[i].kiov_offset = 0;
1055 rb->rb_kiov[i].kiov_page = page;
1062 lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
1064 int npages = rbp->rbp_npages;
1065 struct lnet_rtrbuf *rb;
1066 struct list_head tmp;
1068 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1071 INIT_LIST_HEAD(&tmp);
1074 list_splice_init(&rbp->rbp_msgs, &tmp);
1075 lnet_drop_routed_msgs_locked(&tmp, cpt);
1076 list_splice_init(&rbp->rbp_bufs, &tmp);
1077 rbp->rbp_req_nbuffers = 0;
1078 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1079 rbp->rbp_mincredits = 0;
1080 lnet_net_unlock(cpt);
1082 /* Free buffers on the free list. */
1083 while (!list_empty(&tmp)) {
1084 rb = list_entry(tmp.next, struct lnet_rtrbuf, rb_list);
1085 list_del(&rb->rb_list);
1086 lnet_destroy_rtrbuf(rb, npages);
1091 lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
1093 struct list_head rb_list;
1094 struct lnet_rtrbuf *rb;
1096 int num_buffers = 0;
1098 int npages = rbp->rbp_npages;
1101 /* If we are called for less buffers than already in the pool, we
1102 * just lower the req_nbuffers number and excess buffers will be
1103 * thrown away as they are returned to the free list. Credits
1104 * then get adjusted as well.
1105 * If we already have enough buffers allocated to serve the
1106 * increase requested, then we can treat that the same way as we
1107 * do the decrease. */
1108 num_rb = nbufs - rbp->rbp_nbuffers;
1109 if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1110 rbp->rbp_req_nbuffers = nbufs;
1111 lnet_net_unlock(cpt);
1114 /* store the older value of rbp_req_nbuffers and then set it to
1115 * the new request to prevent lnet_return_rx_credits_locked() from
1116 * freeing buffers that we need to keep around */
1117 old_req_nbufs = rbp->rbp_req_nbuffers;
1118 rbp->rbp_req_nbuffers = nbufs;
1119 lnet_net_unlock(cpt);
1121 INIT_LIST_HEAD(&rb_list);
1123 /* allocate the buffers on a local list first. If all buffers are
1124 * allocated successfully then join this list to the rbp buffer
1125 * list. If not then free all allocated buffers. */
1126 while (num_rb-- > 0) {
1127 rb = lnet_new_rtrbuf(rbp, cpt);
1129 CERROR("Failed to allocate %d route bufs of %d pages\n",
1133 rbp->rbp_req_nbuffers = old_req_nbufs;
1134 lnet_net_unlock(cpt);
1139 list_add(&rb->rb_list, &rb_list);
1145 list_splice_tail(&rb_list, &rbp->rbp_bufs);
1146 rbp->rbp_nbuffers += num_buffers;
1147 rbp->rbp_credits += num_buffers;
1148 rbp->rbp_mincredits = rbp->rbp_credits;
1149 /* We need to schedule blocked msg using the newly
1151 while (!list_empty(&rbp->rbp_bufs) &&
1152 !list_empty(&rbp->rbp_msgs))
1153 lnet_schedule_blocked_locked(rbp);
1155 lnet_net_unlock(cpt);
1160 while (!list_empty(&rb_list)) {
1161 rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
1162 list_del(&rb->rb_list);
1163 lnet_destroy_rtrbuf(rb, npages);
1170 lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
1172 INIT_LIST_HEAD(&rbp->rbp_msgs);
1173 INIT_LIST_HEAD(&rbp->rbp_bufs);
1175 rbp->rbp_npages = npages;
1176 rbp->rbp_credits = 0;
1177 rbp->rbp_mincredits = 0;
1181 lnet_rtrpools_free(int keep_pools)
1183 struct lnet_rtrbufpool *rtrp;
1186 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1189 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1190 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1191 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1192 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1196 cfs_percpt_free(the_lnet.ln_rtrpools);
1197 the_lnet.ln_rtrpools = NULL;
1202 lnet_nrb_tiny_calculate(void)
1204 int nrbs = LNET_NRB_TINY;
1206 if (tiny_router_buffers < 0) {
1207 LCONSOLE_ERROR_MSG(0x10c,
1208 "tiny_router_buffers=%d invalid when "
1209 "routing enabled\n", tiny_router_buffers);
1213 if (tiny_router_buffers > 0)
1214 nrbs = tiny_router_buffers;
1216 nrbs /= LNET_CPT_NUMBER;
1217 return max(nrbs, LNET_NRB_TINY_MIN);
1221 lnet_nrb_small_calculate(void)
1223 int nrbs = LNET_NRB_SMALL;
1225 if (small_router_buffers < 0) {
1226 LCONSOLE_ERROR_MSG(0x10c,
1227 "small_router_buffers=%d invalid when "
1228 "routing enabled\n", small_router_buffers);
1232 if (small_router_buffers > 0)
1233 nrbs = small_router_buffers;
1235 nrbs /= LNET_CPT_NUMBER;
1236 return max(nrbs, LNET_NRB_SMALL_MIN);
1240 lnet_nrb_large_calculate(void)
1242 int nrbs = LNET_NRB_LARGE;
1244 if (large_router_buffers < 0) {
1245 LCONSOLE_ERROR_MSG(0x10c,
1246 "large_router_buffers=%d invalid when "
1247 "routing enabled\n", large_router_buffers);
1251 if (large_router_buffers > 0)
1252 nrbs = large_router_buffers;
1254 nrbs /= LNET_CPT_NUMBER;
1255 return max(nrbs, LNET_NRB_LARGE_MIN);
1259 lnet_rtrpools_alloc(int im_a_router)
1261 struct lnet_rtrbufpool *rtrp;
1268 if (!strcmp(forwarding, "")) {
1269 /* not set either way */
1272 } else if (!strcmp(forwarding, "disabled")) {
1273 /* explicitly disabled */
1275 } else if (!strcmp(forwarding, "enabled")) {
1276 /* explicitly enabled */
1278 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1279 "'enabled' or 'disabled'\n");
1283 nrb_tiny = lnet_nrb_tiny_calculate();
1287 nrb_small = lnet_nrb_small_calculate();
1291 nrb_large = lnet_nrb_large_calculate();
1295 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1297 sizeof(struct lnet_rtrbufpool));
1298 if (the_lnet.ln_rtrpools == NULL) {
1299 LCONSOLE_ERROR_MSG(0x10c,
1300 "Failed to initialize router buffe pool\n");
1304 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1305 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1306 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1311 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1312 LNET_NRB_SMALL_PAGES);
1313 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1318 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1319 LNET_NRB_LARGE_PAGES);
1320 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1326 lnet_net_lock(LNET_LOCK_EX);
1327 the_lnet.ln_routing = 1;
1328 lnet_net_unlock(LNET_LOCK_EX);
1329 wake_up(&the_lnet.ln_mt_waitq);
1333 lnet_rtrpools_free(0);
1338 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1343 struct lnet_rtrbufpool *rtrp;
1345 /* If the provided values for each buffer pool are different than the
1346 * configured values, we need to take action. */
1348 tiny_router_buffers = tiny;
1349 nrb = lnet_nrb_tiny_calculate();
1350 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1351 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1358 small_router_buffers = small;
1359 nrb = lnet_nrb_small_calculate();
1360 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1361 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1368 large_router_buffers = large;
1369 nrb = lnet_nrb_large_calculate();
1370 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1371 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1382 lnet_rtrpools_adjust(int tiny, int small, int large)
1384 /* this function doesn't revert the changes if adding new buffers
1385 * failed. It's up to the user space caller to revert the
1388 if (!the_lnet.ln_routing)
1391 return lnet_rtrpools_adjust_helper(tiny, small, large);
1395 lnet_rtrpools_enable(void)
1399 if (the_lnet.ln_routing)
1402 if (the_lnet.ln_rtrpools == NULL)
1403 /* If routing is turned off, and we have never
1404 * initialized the pools before, just call the
1405 * standard buffer pool allocation routine as
1406 * if we are just configuring this for the first
1408 rc = lnet_rtrpools_alloc(1);
1410 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1414 lnet_net_lock(LNET_LOCK_EX);
1415 the_lnet.ln_routing = 1;
1417 the_lnet.ln_ping_target->pb_info.pi_features &=
1418 ~LNET_PING_FEAT_RTE_DISABLED;
1419 lnet_net_unlock(LNET_LOCK_EX);
1421 if (lnet_peer_discovery_disabled)
1422 CWARN("Consider turning discovery on to enable full "
1423 "Multi-Rail routing functionality\n");
1429 lnet_rtrpools_disable(void)
1431 if (!the_lnet.ln_routing)
1434 lnet_net_lock(LNET_LOCK_EX);
1435 the_lnet.ln_routing = 0;
1436 the_lnet.ln_ping_target->pb_info.pi_features |=
1437 LNET_PING_FEAT_RTE_DISABLED;
1439 tiny_router_buffers = 0;
1440 small_router_buffers = 0;
1441 large_router_buffers = 0;
1442 lnet_net_unlock(LNET_LOCK_EX);
1443 lnet_rtrpools_free(1);
1447 lnet_notify_peer_down(struct lnet_ni *ni, lnet_nid_t nid)
1449 if (ni->ni_net->net_lnd->lnd_notify_peer_down != NULL)
1450 (ni->ni_net->net_lnd->lnd_notify_peer_down)(nid);
1454 * ni: local NI used to communicate with the peer
1456 * alive: true if peer is alive, false otherwise
1457 * reset: reset health value. This is requested by the LND.
1458 * when: notificaiton time.
1461 lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, bool alive, bool reset,
1464 struct lnet_peer_ni *lpni = NULL;
1465 time64_t now = ktime_get_seconds();
1468 LASSERT (!in_interrupt ());
1470 CDEBUG (D_NET, "%s notifying %s: %s\n",
1471 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1472 libcfs_nid2str(nid),
1473 alive ? "up" : "down");
1476 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1477 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1478 libcfs_nid2str(nid), alive ? "birth" : "death",
1479 libcfs_nid2str(ni->ni_nid));
1483 /* can't do predictions... */
1485 CWARN("Ignoring prediction from %s of %s %s "
1486 "%lld seconds in the future\n",
1487 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1488 libcfs_nid2str(nid), alive ? "up" : "down", when - now);
1492 if (ni != NULL && !alive && /* LND telling me she's down */
1493 !auto_down) { /* auto-down disabled */
1494 CDEBUG(D_NET, "Auto-down disabled\n");
1498 /* must lock 0 since this is used for synchronization */
1501 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1506 lpni = lnet_find_peer_ni_locked(nid);
1510 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1516 lnet_set_healthv(&lpni->lpni_healthv,
1517 LNET_MAX_HEALTH_VALUE);
1519 lnet_inc_healthv(&lpni->lpni_healthv);
1521 lnet_handle_remote_failure_locked(lpni);
1524 /* recalculate aliveness */
1525 alive = lnet_is_peer_ni_alive(lpni);
1528 if (ni != NULL && !alive)
1529 lnet_notify_peer_down(ni, lpni->lpni_nid);
1531 cpt = lpni->lpni_cpt;
1533 lnet_peer_ni_decref_locked(lpni);
1534 lnet_net_unlock(cpt);
1538 EXPORT_SYMBOL(lnet_notify);