2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * This file is part of Lustre, https://wiki.whamcloud.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
25 #include <linux/random.h>
26 #include <lnet/lib-lnet.h>
28 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
29 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
30 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
31 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
32 #define LNET_NRB_SMALL_PAGES 1
33 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
34 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
35 #define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
38 extern unsigned int lnet_current_net_count;
40 static char *forwarding = "";
41 module_param(forwarding, charp, 0444);
42 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
44 static int tiny_router_buffers;
45 module_param(tiny_router_buffers, int, 0444);
46 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
47 static int small_router_buffers;
48 module_param(small_router_buffers, int, 0444);
49 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
50 static int large_router_buffers;
51 module_param(large_router_buffers, int, 0444);
52 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
53 static int peer_buffer_credits;
54 module_param(peer_buffer_credits, int, 0444);
55 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
57 static int auto_down = 1;
58 module_param(auto_down, int, 0444);
59 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
62 lnet_peer_buffer_credits(struct lnet_net *net)
64 /* NI option overrides LNet default */
65 if (net->net_tunables.lct_peer_rtr_credits > 0)
66 return net->net_tunables.lct_peer_rtr_credits;
67 if (peer_buffer_credits > 0)
68 return peer_buffer_credits;
70 /* As an approximation, allow this peer the same number of router
71 * buffers as it is allowed outstanding sends */
72 return net->net_tunables.lct_peer_tx_credits;
75 static int check_routers_before_use;
76 module_param(check_routers_before_use, int, 0444);
77 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
79 int avoid_asym_router_failure = 1;
80 module_param(avoid_asym_router_failure, int, 0644);
81 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
83 int alive_router_check_interval = 60;
84 module_param(alive_router_check_interval, int, 0644);
85 MODULE_PARM_DESC(alive_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
87 static int router_ping_timeout = 50;
88 module_param(router_ping_timeout, int, 0644);
89 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
92 * A value between 0 and 100. 0 meaning that even if router's interfaces
93 * have the worse health still consider the gateway usable.
94 * 100 means that at least one interface on the route's remote net is 100%
95 * healthy to consider the route alive.
96 * The default is set to 100 to ensure we maintain the original behavior.
98 unsigned int router_sensitivity_percentage = 100;
99 static int rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
100 static struct kernel_param_ops param_ops_rtr_sensitivity = {
101 .set = rtr_sensitivity_set,
102 .get = param_get_int,
104 #define param_check_rtr_sensitivity(name, p) \
105 __param_check(name, p, int)
106 #ifdef HAVE_KERNEL_PARAM_OPS
107 module_param(router_sensitivity_percentage, rtr_sensitivity, S_IRUGO|S_IWUSR);
109 module_param_call(router_sensitivity_percentage, rtr_sensitivity_set, param_get_int,
110 &router_sensitivity_percentage, S_IRUGO|S_IWUSR);
112 MODULE_PARM_DESC(router_sensitivity_percentage,
113 "How healthy a gateway should be to be used in percent");
116 rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
119 unsigned *sen = (unsigned *)kp->arg;
122 rc = kstrtoul(val, 0, &value);
124 CERROR("Invalid module parameter value for 'router_sensitivity_percentage'\n");
128 if (value < 0 || value > 100) {
129 CERROR("Invalid value: %lu for 'router_sensitivity_percentage'\n", value);
134 * The purpose of locking the api_mutex here is to ensure that
135 * the correct value ends up stored properly.
137 mutex_lock(&the_lnet.ln_api_mutex);
141 mutex_unlock(&the_lnet.ln_api_mutex);
147 lnet_rtr_transfer_to_peer(struct lnet_peer *src, struct lnet_peer *target)
149 struct lnet_route *route;
151 lnet_net_lock(LNET_LOCK_EX);
152 target->lp_rtr_refcount += src->lp_rtr_refcount;
153 /* move the list of queued messages to the new peer */
154 list_splice_init(&src->lp_rtrq, &target->lp_rtrq);
155 /* move all the routes that reference the peer */
156 list_splice_init(&src->lp_routes, &target->lp_routes);
157 /* update all the routes to point to the new peer */
158 list_for_each_entry(route, &target->lp_routes, lr_gwlist)
159 route->lr_gateway = target;
160 /* remove the old peer from the ln_routers list */
161 list_del_init(&src->lp_rtr_list);
162 /* add the new peer to the ln_routers list */
163 if (list_empty(&target->lp_rtr_list)) {
164 lnet_peer_addref_locked(target);
165 list_add_tail(&target->lp_rtr_list, &the_lnet.ln_routers);
167 /* reset the ref count on the old peer and decrement its ref count */
168 src->lp_rtr_refcount = 0;
169 lnet_peer_decref_locked(src);
170 /* update the router version */
171 the_lnet.ln_routers_version++;
172 lnet_net_unlock(LNET_LOCK_EX);
176 lnet_peers_start_down(void)
178 return check_routers_before_use;
182 * A net is alive if at least one gateway NI on the network is alive.
185 lnet_is_gateway_net_alive(struct lnet_peer_net *lpn)
187 struct lnet_peer_ni *lpni;
189 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
190 if (lnet_is_peer_ni_alive(lpni))
198 * a gateway is alive only if all its nets are alive
199 * called with cpt lock held
201 bool lnet_is_gateway_alive(struct lnet_peer *gw)
203 struct lnet_peer_net *lpn;
205 list_for_each_entry(lpn, &gw->lp_peer_nets, lpn_peer_nets) {
206 if (!lnet_is_gateway_net_alive(lpn))
214 * lnet_is_route_alive() needs to be called with cpt lock held
215 * A route is alive if the gateway can route between the local network and
216 * the remote network of the route.
217 * This means at least one NI is alive on each of the local and remote
218 * networks of the gateway.
220 bool lnet_is_route_alive(struct lnet_route *route)
222 struct lnet_peer *gw = route->lr_gateway;
223 struct lnet_peer_net *llpn;
224 struct lnet_peer_net *rlpn;
228 * check the gateway's interfaces on the route rnet to make sure
229 * that the gateway is viable.
231 llpn = lnet_peer_get_net_locked(gw, route->lr_lnet);
235 route_alive = lnet_is_gateway_net_alive(llpn);
237 if (avoid_asym_router_failure) {
238 rlpn = lnet_peer_get_net_locked(gw, route->lr_net);
241 route_alive = route_alive &&
242 lnet_is_gateway_net_alive(rlpn);
248 spin_lock(&gw->lp_lock);
249 if (!(gw->lp_state & LNET_PEER_ROUTER_ENABLED)) {
250 if (gw->lp_rtr_refcount > 0)
251 CERROR("peer %s is being used as a gateway but routing feature is not turned on\n",
252 libcfs_nid2str(gw->lp_primary_nid));
255 spin_unlock(&gw->lp_lock);
261 lnet_consolidate_routes_locked(struct lnet_peer *orig_lp,
262 struct lnet_peer *new_lp)
264 struct lnet_peer_ni *lpni;
265 struct lnet_route *route;
268 * Although a route is correlated with a peer, but when it's added
269 * a specific NID is used. That NID refers to a peer_ni within
270 * a peer. There could be other peer_nis on the same net, which
271 * can be used to send to that gateway. However when we are
272 * consolidating gateways because of discovery, the nid used to
273 * add the route might've moved between gateway peers. In this
274 * case we want to move the route to the new gateway as well. The
275 * intent here is not to confuse the user who added the route.
277 list_for_each_entry(route, &orig_lp->lp_routes, lr_gwlist) {
278 lpni = lnet_peer_get_ni_locked(orig_lp, route->lr_nid);
280 lnet_net_lock(LNET_LOCK_EX);
281 list_move(&route->lr_gwlist, &new_lp->lp_routes);
282 lnet_net_unlock(LNET_LOCK_EX);
289 lnet_router_discovery_complete(struct lnet_peer *lp)
291 struct lnet_peer_ni *lpni = NULL;
293 spin_lock(&lp->lp_lock);
294 lp->lp_state &= ~LNET_PEER_RTR_DISCOVERY;
295 spin_unlock(&lp->lp_lock);
298 * Router discovery successful? All peer information would've been
299 * updated already. No need to do any more processing
301 if (!lp->lp_dc_error)
304 * discovery failed? then we need to set the status of each lpni
305 * to DOWN. It will be updated the next time we discover the
306 * router. For router peer NIs not on local networks, we never send
307 * messages directly to them, so their health will always remain
308 * at maximum. We can only tell if they are up or down from the
309 * status returned in the PING response. If we fail to get that
310 * status in our scheduled router discovery, then we'll assume
311 * it's down until we're told otherwise.
313 CDEBUG(D_NET, "%s: Router discovery failed %d\n",
314 libcfs_nid2str(lp->lp_primary_nid), lp->lp_dc_error);
315 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
316 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
320 lnet_rtr_addref_locked(struct lnet_peer *lp)
322 LASSERT(lp->lp_rtr_refcount >= 0);
324 /* lnet_net_lock must be exclusively locked */
325 lp->lp_rtr_refcount++;
326 if (lp->lp_rtr_refcount == 1) {
327 list_add_tail(&lp->lp_rtr_list, &the_lnet.ln_routers);
328 /* addref for the_lnet.ln_routers */
329 lnet_peer_addref_locked(lp);
330 the_lnet.ln_routers_version++;
335 lnet_rtr_decref_locked(struct lnet_peer *lp)
337 LASSERT(atomic_read(&lp->lp_refcount) > 0);
338 LASSERT(lp->lp_rtr_refcount > 0);
340 /* lnet_net_lock must be exclusively locked */
341 lp->lp_rtr_refcount--;
342 if (lp->lp_rtr_refcount == 0) {
343 LASSERT(list_empty(&lp->lp_routes));
345 list_del(&lp->lp_rtr_list);
346 /* decref for the_lnet.ln_routers */
347 lnet_peer_decref_locked(lp);
348 the_lnet.ln_routers_version++;
352 struct lnet_remotenet *
353 lnet_find_rnet_locked(__u32 net)
355 struct lnet_remotenet *rnet;
356 struct list_head *tmp;
357 struct list_head *rn_list;
359 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
361 rn_list = lnet_net2rnethash(net);
362 list_for_each(tmp, rn_list) {
363 rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
365 if (rnet->lrn_net == net)
371 static void lnet_shuffle_seed(void)
374 struct lnet_ni *ni = NULL;
379 /* Nodes with small feet have little entropy
380 * the NID for this node gives the most entropy in the low bits */
381 while ((ni = lnet_get_next_ni_locked(NULL, ni)))
382 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
388 /* NB expects LNET_LOCK held */
390 lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
392 struct lnet_peer_net *lpn;
393 unsigned int offset = 0;
394 unsigned int len = 0;
399 list_for_each(e, &rnet->lrn_routes)
403 * Randomly adding routes to the list is done to ensure that when
404 * different nodes are using the same list of routers, they end up
405 * preferring different routers.
407 offset = cfs_rand() % (len + 1);
408 list_for_each(e, &rnet->lrn_routes) {
413 list_add(&route->lr_list, e);
415 * force a router check on the gateway to make sure the route is
418 list_for_each_entry(lpn, &route->lr_gateway->lp_peer_nets,
420 lpn->lpn_rtrcheck_timestamp = 0;
423 the_lnet.ln_remote_nets_version++;
425 /* add the route on the gateway list */
426 list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
428 /* take a router reference count on the gateway */
429 lnet_rtr_addref_locked(route->lr_gateway);
433 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
434 __u32 priority, __u32 sensitivity)
436 struct list_head *route_entry;
437 struct lnet_remotenet *rnet;
438 struct lnet_remotenet *rnet2;
439 struct lnet_route *route;
440 struct lnet_peer_ni *lpni;
441 struct lnet_peer *gw;
445 CDEBUG(D_NET, "Add route: remote net %s hops %d priority %u gw %s\n",
446 libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
448 if (gateway == LNET_NID_ANY ||
449 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
450 net == LNET_NIDNET(LNET_NID_ANY) ||
451 LNET_NETTYP(net) == LOLND ||
452 LNET_NIDNET(gateway) == net ||
453 (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
456 /* it's a local network */
457 if (lnet_islocalnet(net))
460 /* Assume net, route, all new */
461 LIBCFS_ALLOC(route, sizeof(*route));
462 LIBCFS_ALLOC(rnet, sizeof(*rnet));
463 if (route == NULL || rnet == NULL) {
464 CERROR("Out of memory creating route %s %d %s\n",
465 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
467 LIBCFS_FREE(route, sizeof(*route));
469 LIBCFS_FREE(rnet, sizeof(*rnet));
473 INIT_LIST_HEAD(&rnet->lrn_routes);
475 /* store the local and remote net that the route represents */
476 route->lr_lnet = LNET_NIDNET(gateway);
478 route->lr_nid = gateway;
479 route->lr_priority = priority;
480 route->lr_hops = hops;
482 lnet_net_lock(LNET_LOCK_EX);
485 * lnet_nid2peerni_ex() grabs a ref on the lpni. We will need to
486 * lose that once we're done
488 lpni = lnet_nid2peerni_ex(gateway, LNET_LOCK_EX);
490 lnet_net_unlock(LNET_LOCK_EX);
492 LIBCFS_FREE(route, sizeof(*route));
493 LIBCFS_FREE(rnet, sizeof(*rnet));
496 CERROR("Error %d creating route %s %d %s\n", rc,
497 libcfs_net2str(net), hops,
498 libcfs_nid2str(gateway));
502 LASSERT(lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer);
503 gw = lpni->lpni_peer_net->lpn_peer;
505 route->lr_gateway = gw;
507 rnet2 = lnet_find_rnet_locked(net);
510 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
514 /* Search for a duplicate route (it's a NOOP if it is) */
516 list_for_each(route_entry, &rnet2->lrn_routes) {
517 struct lnet_route *route2;
519 route2 = list_entry(route_entry, struct lnet_route, lr_list);
520 if (route2->lr_gateway == route->lr_gateway) {
525 /* our lookups must be true */
526 LASSERT(route2->lr_gateway->lp_primary_nid != gateway);
530 * It is possible to add multiple routes through the same peer,
531 * but it'll be using a different NID of that peer. When the
532 * gateway is discovered, discovery will consolidate the different
533 * peers into one peer. In this case the discovery code will have
534 * to move the routes from the peer that's being deleted to the
535 * consolidated peer lp_routes list
538 gw->lp_health_sensitivity = sensitivity;
539 lnet_add_route_to_rnet(rnet2, route);
543 * get rid of the reference on the lpni.
545 lnet_peer_ni_decref_locked(lpni);
546 lnet_net_unlock(LNET_LOCK_EX);
552 LIBCFS_FREE(route, sizeof(*route));
556 LIBCFS_FREE(rnet, sizeof(*rnet));
558 /* kick start the monitor thread to handle the added route */
559 wake_up(&the_lnet.ln_mt_waitq);
565 lnet_del_route_from_rnet(lnet_nid_t gw_nid, struct list_head *route_list,
566 struct list_head *zombies)
568 struct lnet_peer *gateway;
569 struct lnet_route *route;
570 struct lnet_route *tmp;
572 list_for_each_entry_safe(route, tmp, route_list, lr_list) {
573 gateway = route->lr_gateway;
574 if (gw_nid != LNET_NID_ANY &&
575 gw_nid != gateway->lp_primary_nid)
579 * move to zombie to delete outside the lock
580 * Note that this function is called with the
581 * ln_api_mutex held as well as the exclusive net
582 * lock. Adding to the remote net list happens
583 * under the same conditions. Same goes for the
584 * gateway router list
586 list_move(&route->lr_list, zombies);
587 the_lnet.ln_remote_nets_version++;
589 list_del(&route->lr_gwlist);
590 lnet_rtr_decref_locked(gateway);
595 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
597 struct list_head rnet_zombies;
598 struct lnet_remotenet *rnet;
599 struct lnet_remotenet *tmp;
600 struct list_head *rn_list;
601 struct lnet_peer_ni *lpni;
602 struct lnet_route *route;
603 struct list_head zombies;
604 struct lnet_peer *lp;
607 INIT_LIST_HEAD(&rnet_zombies);
608 INIT_LIST_HEAD(&zombies);
610 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
611 libcfs_net2str(net), libcfs_nid2str(gw_nid));
613 /* NB Caller may specify either all routes via the given gateway
614 * or a specific route entry actual NIDs) */
616 lnet_net_lock(LNET_LOCK_EX);
618 lpni = lnet_find_peer_ni_locked(gw_nid);
620 lp = lpni->lpni_peer_net->lpn_peer;
622 gw_nid = lp->lp_primary_nid;
623 lnet_peer_ni_decref_locked(lpni);
626 if (net != LNET_NIDNET(LNET_NID_ANY)) {
627 rnet = lnet_find_rnet_locked(net);
629 lnet_net_unlock(LNET_LOCK_EX);
632 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
634 if (list_empty(&rnet->lrn_routes))
635 list_move(&rnet->lrn_list, &rnet_zombies);
639 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
640 rn_list = &the_lnet.ln_remote_nets_hash[i];
642 list_for_each_entry_safe(rnet, tmp, rn_list, lrn_list) {
643 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
645 if (list_empty(&rnet->lrn_routes))
646 list_move(&rnet->lrn_list, &rnet_zombies);
652 * check if there are any routes remaining on the gateway
653 * If there are no more routes make sure to set the peer's
654 * lp_disc_net_id to 0 (invalid), in case we add more routes in
655 * the future on that gateway, then we start our discovery process
659 if (list_empty(&lp->lp_routes))
660 lp->lp_disc_net_id = 0;
663 lnet_net_unlock(LNET_LOCK_EX);
665 while (!list_empty(&zombies)) {
666 route = list_first_entry(&zombies, struct lnet_route, lr_list);
667 list_del(&route->lr_list);
668 LIBCFS_FREE(route, sizeof(*route));
671 while (!list_empty(&rnet_zombies)) {
672 rnet = list_first_entry(&rnet_zombies, struct lnet_remotenet,
674 list_del(&rnet->lrn_list);
675 LIBCFS_FREE(rnet, sizeof(*rnet));
682 lnet_destroy_routes (void)
684 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
687 int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
689 struct lnet_rtrbufpool *rbp;
690 int i, rc = -ENOENT, j;
692 if (the_lnet.ln_rtrpools == NULL)
696 cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
701 for (j = 0; j < LNET_NRBPOOLS; j++) {
702 pool_cfg->pl_pools[j].pl_npages = rbp[j].rbp_npages;
703 pool_cfg->pl_pools[j].pl_nbuffers = rbp[j].rbp_nbuffers;
704 pool_cfg->pl_pools[j].pl_credits = rbp[j].rbp_credits;
705 pool_cfg->pl_pools[j].pl_mincredits = rbp[j].rbp_mincredits;
712 lnet_net_lock(LNET_LOCK_EX);
713 pool_cfg->pl_routing = the_lnet.ln_routing;
714 lnet_net_unlock(LNET_LOCK_EX);
720 lnet_get_route(int idx, __u32 *net, __u32 *hops,
721 lnet_nid_t *gateway, __u32 *alive, __u32 *priority, __u32 *sensitivity)
723 struct lnet_remotenet *rnet;
724 struct list_head *rn_list;
725 struct lnet_route *route;
726 struct list_head *e1;
727 struct list_head *e2;
731 cpt = lnet_net_lock_current();
733 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
734 rn_list = &the_lnet.ln_remote_nets_hash[i];
735 list_for_each(e1, rn_list) {
736 rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
738 list_for_each(e2, &rnet->lrn_routes) {
739 route = list_entry(e2, struct lnet_route,
743 *net = rnet->lrn_net;
744 *gateway = route->lr_nid;
745 *hops = route->lr_hops;
746 *priority = route->lr_priority;
747 *sensitivity = route->lr_gateway->
748 lp_health_sensitivity;
749 *alive = lnet_is_route_alive(route);
750 lnet_net_unlock(cpt);
757 lnet_net_unlock(cpt);
762 lnet_wait_known_routerstate(void)
764 struct lnet_peer *rtr;
765 struct list_head *entry;
768 LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
771 int cpt = lnet_net_lock_current();
774 list_for_each(entry, &the_lnet.ln_routers) {
775 rtr = list_entry(entry, struct lnet_peer,
778 spin_lock(&rtr->lp_lock);
780 if ((rtr->lp_state & LNET_PEER_DISCOVERED) == 0) {
782 spin_unlock(&rtr->lp_lock);
785 spin_unlock(&rtr->lp_lock);
788 lnet_net_unlock(cpt);
793 set_current_state(TASK_UNINTERRUPTIBLE);
794 schedule_timeout(cfs_time_seconds(1));
799 lnet_net_set_status_locked(struct lnet_net *net, __u32 status)
804 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
807 ni->ni_status->ns_status != status) {
808 ni->ni_status->ns_status = status;
818 lnet_update_ni_status_locked(void)
820 struct lnet_net *net;
825 LASSERT(the_lnet.ln_routing);
827 timeout = router_ping_timeout + alive_router_check_interval;
829 now = ktime_get_real_seconds();
830 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
831 if (net->net_lnd->lnd_type == LOLND)
834 if (now < net->net_last_alive + timeout)
837 spin_lock(&net->net_lock);
838 /* re-check with lock */
839 if (now < net->net_last_alive + timeout) {
840 spin_unlock(&net->net_lock);
843 spin_unlock(&net->net_lock);
846 * if the net didn't receive any traffic for past the
847 * timeout on any of its constituent NIs, then mark all
850 push = lnet_net_set_status_locked(net, LNET_NI_STATUS_DOWN);
856 void lnet_wait_router_start(void)
858 if (check_routers_before_use) {
859 /* Note that a helpful side-effect of pinging all known routers
860 * at startup is that it makes them drop stale connections they
861 * may have to a previous instance of me. */
862 lnet_wait_known_routerstate();
867 * This function is called from the monitor thread to check if there are
868 * any active routers that need to be checked.
871 lnet_router_checker_active(void)
873 /* Router Checker thread needs to run when routing is enabled in
874 * order to call lnet_update_ni_status_locked() */
875 if (the_lnet.ln_routing)
878 return !list_empty(&the_lnet.ln_routers) &&
879 alive_router_check_interval > 0;
883 lnet_check_routers(void)
885 struct lnet_peer_net *first_lpn = NULL;
886 struct lnet_peer_net *lpn;
887 struct lnet_peer_ni *lpni;
888 struct list_head *entry;
889 struct lnet_peer *rtr;
898 cpt = lnet_net_lock_current();
900 version = the_lnet.ln_routers_version;
902 list_for_each(entry, &the_lnet.ln_routers) {
903 rtr = list_entry(entry, struct lnet_peer,
906 now = ktime_get_real_seconds();
909 * only discover the router if we've passed
910 * alive_router_check_interval seconds. Some of the router
911 * interfaces could be down and in that case they would be
912 * undergoing recovery separately from this discovery.
914 /* find next peer net which is also local */
915 net_id = rtr->lp_disc_net_id;
917 lpn = lnet_get_next_peer_net_locked(rtr, net_id);
919 CERROR("gateway %s has no networks\n",
920 libcfs_nid2str(rtr->lp_primary_nid));
923 if (first_lpn == lpn)
927 found_lpn = lnet_islocalnet_locked(lpn->lpn_net_id);
928 net_id = lpn->lpn_net_id;
929 } while (!found_lpn);
931 if (!found_lpn || !lpn) {
932 CERROR("no local network found for gateway %s\n",
933 libcfs_nid2str(rtr->lp_primary_nid));
937 if (now - lpn->lpn_rtrcheck_timestamp <
938 alive_router_check_interval / lnet_current_net_count)
942 * If we're currently discovering the peer then don't
943 * issue another discovery
945 spin_lock(&rtr->lp_lock);
946 if (rtr->lp_state & LNET_PEER_RTR_DISCOVERY) {
947 spin_unlock(&rtr->lp_lock);
950 /* make sure we actively discover the router */
951 rtr->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
952 rtr->lp_state |= LNET_PEER_RTR_DISCOVERY;
953 spin_unlock(&rtr->lp_lock);
955 /* find the peer_ni associated with the primary NID */
956 lpni = lnet_peer_get_ni_locked(rtr, rtr->lp_primary_nid);
958 CDEBUG(D_NET, "Expected to find an lpni for %s, but non found\n",
959 libcfs_nid2str(rtr->lp_primary_nid));
962 lnet_peer_ni_addref_locked(lpni);
964 /* specify the net to use */
965 rtr->lp_disc_net_id = lpn->lpn_net_id;
967 /* discover the router */
968 CDEBUG(D_NET, "discover %s, cpt = %d\n",
969 libcfs_nid2str(lpni->lpni_nid), cpt);
970 rc = lnet_discover_peer_locked(lpni, cpt, false);
972 /* decrement ref count acquired by find_peer_ni_locked() */
973 lnet_peer_ni_decref_locked(lpni);
976 lpn->lpn_rtrcheck_timestamp = now;
978 CERROR("Failed to discover router %s\n",
979 libcfs_nid2str(rtr->lp_primary_nid));
981 /* NB dropped lock */
982 if (version != the_lnet.ln_routers_version) {
983 /* the routers list has changed */
988 if (the_lnet.ln_routing)
989 push = lnet_update_ni_status_locked();
991 lnet_net_unlock(cpt);
993 /* if the status of the ni changed update the peers */
995 lnet_push_update_to_peers(1);
999 lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
1001 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
1003 while (--npages >= 0)
1004 __free_page(rb->rb_kiov[npages].kiov_page);
1006 LIBCFS_FREE(rb, sz);
1009 static struct lnet_rtrbuf *
1010 lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
1012 int npages = rbp->rbp_npages;
1013 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
1015 struct lnet_rtrbuf *rb;
1018 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1024 for (i = 0; i < npages; i++) {
1025 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1026 GFP_KERNEL | __GFP_ZERO);
1029 __free_page(rb->rb_kiov[i].kiov_page);
1031 LIBCFS_FREE(rb, sz);
1035 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1036 rb->rb_kiov[i].kiov_offset = 0;
1037 rb->rb_kiov[i].kiov_page = page;
1044 lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
1046 int npages = rbp->rbp_npages;
1047 struct lnet_rtrbuf *rb;
1048 struct list_head tmp;
1050 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1053 INIT_LIST_HEAD(&tmp);
1056 list_splice_init(&rbp->rbp_msgs, &tmp);
1057 lnet_drop_routed_msgs_locked(&tmp, cpt);
1058 list_splice_init(&rbp->rbp_bufs, &tmp);
1059 rbp->rbp_req_nbuffers = 0;
1060 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1061 rbp->rbp_mincredits = 0;
1062 lnet_net_unlock(cpt);
1064 /* Free buffers on the free list. */
1065 while (!list_empty(&tmp)) {
1066 rb = list_entry(tmp.next, struct lnet_rtrbuf, rb_list);
1067 list_del(&rb->rb_list);
1068 lnet_destroy_rtrbuf(rb, npages);
1073 lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
1075 struct list_head rb_list;
1076 struct lnet_rtrbuf *rb;
1078 int num_buffers = 0;
1080 int npages = rbp->rbp_npages;
1083 /* If we are called for less buffers than already in the pool, we
1084 * just lower the req_nbuffers number and excess buffers will be
1085 * thrown away as they are returned to the free list. Credits
1086 * then get adjusted as well.
1087 * If we already have enough buffers allocated to serve the
1088 * increase requested, then we can treat that the same way as we
1089 * do the decrease. */
1090 num_rb = nbufs - rbp->rbp_nbuffers;
1091 if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1092 rbp->rbp_req_nbuffers = nbufs;
1093 lnet_net_unlock(cpt);
1096 /* store the older value of rbp_req_nbuffers and then set it to
1097 * the new request to prevent lnet_return_rx_credits_locked() from
1098 * freeing buffers that we need to keep around */
1099 old_req_nbufs = rbp->rbp_req_nbuffers;
1100 rbp->rbp_req_nbuffers = nbufs;
1101 lnet_net_unlock(cpt);
1103 INIT_LIST_HEAD(&rb_list);
1105 /* allocate the buffers on a local list first. If all buffers are
1106 * allocated successfully then join this list to the rbp buffer
1107 * list. If not then free all allocated buffers. */
1108 while (num_rb-- > 0) {
1109 rb = lnet_new_rtrbuf(rbp, cpt);
1111 CERROR("Failed to allocate %d route bufs of %d pages\n",
1115 rbp->rbp_req_nbuffers = old_req_nbufs;
1116 lnet_net_unlock(cpt);
1121 list_add(&rb->rb_list, &rb_list);
1127 list_splice_tail(&rb_list, &rbp->rbp_bufs);
1128 rbp->rbp_nbuffers += num_buffers;
1129 rbp->rbp_credits += num_buffers;
1130 rbp->rbp_mincredits = rbp->rbp_credits;
1131 /* We need to schedule blocked msg using the newly
1133 while (!list_empty(&rbp->rbp_bufs) &&
1134 !list_empty(&rbp->rbp_msgs))
1135 lnet_schedule_blocked_locked(rbp);
1137 lnet_net_unlock(cpt);
1142 while (!list_empty(&rb_list)) {
1143 rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
1144 list_del(&rb->rb_list);
1145 lnet_destroy_rtrbuf(rb, npages);
1152 lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
1154 INIT_LIST_HEAD(&rbp->rbp_msgs);
1155 INIT_LIST_HEAD(&rbp->rbp_bufs);
1157 rbp->rbp_npages = npages;
1158 rbp->rbp_credits = 0;
1159 rbp->rbp_mincredits = 0;
1163 lnet_rtrpools_free(int keep_pools)
1165 struct lnet_rtrbufpool *rtrp;
1168 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1171 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1172 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1173 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1174 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1178 cfs_percpt_free(the_lnet.ln_rtrpools);
1179 the_lnet.ln_rtrpools = NULL;
1184 lnet_nrb_tiny_calculate(void)
1186 int nrbs = LNET_NRB_TINY;
1188 if (tiny_router_buffers < 0) {
1189 LCONSOLE_ERROR_MSG(0x10c,
1190 "tiny_router_buffers=%d invalid when "
1191 "routing enabled\n", tiny_router_buffers);
1195 if (tiny_router_buffers > 0)
1196 nrbs = tiny_router_buffers;
1198 nrbs /= LNET_CPT_NUMBER;
1199 return max(nrbs, LNET_NRB_TINY_MIN);
1203 lnet_nrb_small_calculate(void)
1205 int nrbs = LNET_NRB_SMALL;
1207 if (small_router_buffers < 0) {
1208 LCONSOLE_ERROR_MSG(0x10c,
1209 "small_router_buffers=%d invalid when "
1210 "routing enabled\n", small_router_buffers);
1214 if (small_router_buffers > 0)
1215 nrbs = small_router_buffers;
1217 nrbs /= LNET_CPT_NUMBER;
1218 return max(nrbs, LNET_NRB_SMALL_MIN);
1222 lnet_nrb_large_calculate(void)
1224 int nrbs = LNET_NRB_LARGE;
1226 if (large_router_buffers < 0) {
1227 LCONSOLE_ERROR_MSG(0x10c,
1228 "large_router_buffers=%d invalid when "
1229 "routing enabled\n", large_router_buffers);
1233 if (large_router_buffers > 0)
1234 nrbs = large_router_buffers;
1236 nrbs /= LNET_CPT_NUMBER;
1237 return max(nrbs, LNET_NRB_LARGE_MIN);
1241 lnet_rtrpools_alloc(int im_a_router)
1243 struct lnet_rtrbufpool *rtrp;
1250 if (!strcmp(forwarding, "")) {
1251 /* not set either way */
1254 } else if (!strcmp(forwarding, "disabled")) {
1255 /* explicitly disabled */
1257 } else if (!strcmp(forwarding, "enabled")) {
1258 /* explicitly enabled */
1260 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1261 "'enabled' or 'disabled'\n");
1265 nrb_tiny = lnet_nrb_tiny_calculate();
1269 nrb_small = lnet_nrb_small_calculate();
1273 nrb_large = lnet_nrb_large_calculate();
1277 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1279 sizeof(struct lnet_rtrbufpool));
1280 if (the_lnet.ln_rtrpools == NULL) {
1281 LCONSOLE_ERROR_MSG(0x10c,
1282 "Failed to initialize router buffe pool\n");
1286 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1287 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1288 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1293 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1294 LNET_NRB_SMALL_PAGES);
1295 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1300 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1301 LNET_NRB_LARGE_PAGES);
1302 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1308 lnet_net_lock(LNET_LOCK_EX);
1309 the_lnet.ln_routing = 1;
1310 lnet_net_unlock(LNET_LOCK_EX);
1311 wake_up(&the_lnet.ln_mt_waitq);
1315 lnet_rtrpools_free(0);
1320 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1325 struct lnet_rtrbufpool *rtrp;
1327 /* If the provided values for each buffer pool are different than the
1328 * configured values, we need to take action. */
1330 tiny_router_buffers = tiny;
1331 nrb = lnet_nrb_tiny_calculate();
1332 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1333 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1340 small_router_buffers = small;
1341 nrb = lnet_nrb_small_calculate();
1342 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1343 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1350 large_router_buffers = large;
1351 nrb = lnet_nrb_large_calculate();
1352 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1353 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1364 lnet_rtrpools_adjust(int tiny, int small, int large)
1366 /* this function doesn't revert the changes if adding new buffers
1367 * failed. It's up to the user space caller to revert the
1370 if (!the_lnet.ln_routing)
1373 return lnet_rtrpools_adjust_helper(tiny, small, large);
1377 lnet_rtrpools_enable(void)
1381 if (the_lnet.ln_routing)
1384 if (the_lnet.ln_rtrpools == NULL)
1385 /* If routing is turned off, and we have never
1386 * initialized the pools before, just call the
1387 * standard buffer pool allocation routine as
1388 * if we are just configuring this for the first
1390 rc = lnet_rtrpools_alloc(1);
1392 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1396 lnet_net_lock(LNET_LOCK_EX);
1397 the_lnet.ln_routing = 1;
1399 the_lnet.ln_ping_target->pb_info.pi_features &=
1400 ~LNET_PING_FEAT_RTE_DISABLED;
1401 lnet_net_unlock(LNET_LOCK_EX);
1407 lnet_rtrpools_disable(void)
1409 if (!the_lnet.ln_routing)
1412 lnet_net_lock(LNET_LOCK_EX);
1413 the_lnet.ln_routing = 0;
1414 the_lnet.ln_ping_target->pb_info.pi_features |=
1415 LNET_PING_FEAT_RTE_DISABLED;
1417 tiny_router_buffers = 0;
1418 small_router_buffers = 0;
1419 large_router_buffers = 0;
1420 lnet_net_unlock(LNET_LOCK_EX);
1421 lnet_rtrpools_free(1);
1425 lnet_notify_peer_down(struct lnet_ni *ni, lnet_nid_t nid)
1427 if (ni->ni_net->net_lnd->lnd_notify_peer_down != NULL)
1428 (ni->ni_net->net_lnd->lnd_notify_peer_down)(nid);
1432 * ni: local NI used to communicate with the peer
1434 * alive: true if peer is alive, false otherwise
1435 * reset: reset health value. This is requested by the LND.
1436 * when: notificaiton time.
1439 lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, bool alive, bool reset,
1442 struct lnet_peer_ni *lpni = NULL;
1443 time64_t now = ktime_get_seconds();
1446 LASSERT (!in_interrupt ());
1448 CDEBUG (D_NET, "%s notifying %s: %s\n",
1449 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1450 libcfs_nid2str(nid),
1451 alive ? "up" : "down");
1454 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1455 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1456 libcfs_nid2str(nid), alive ? "birth" : "death",
1457 libcfs_nid2str(ni->ni_nid));
1461 /* can't do predictions... */
1463 CWARN("Ignoring prediction from %s of %s %s "
1464 "%lld seconds in the future\n",
1465 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1466 libcfs_nid2str(nid), alive ? "up" : "down", when - now);
1470 if (ni != NULL && !alive && /* LND telling me she's down */
1471 !auto_down) { /* auto-down disabled */
1472 CDEBUG(D_NET, "Auto-down disabled\n");
1476 /* must lock 0 since this is used for synchronization */
1479 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1484 lpni = lnet_find_peer_ni_locked(nid);
1488 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1494 lnet_set_healthv(&lpni->lpni_healthv,
1495 LNET_MAX_HEALTH_VALUE);
1497 lnet_inc_healthv(&lpni->lpni_healthv);
1499 lnet_handle_remote_failure_locked(lpni);
1502 /* recalculate aliveness */
1503 alive = lnet_is_peer_ni_alive(lpni);
1506 if (ni != NULL && !alive)
1507 lnet_notify_peer_down(ni, lpni->lpni_nid);
1509 cpt = lpni->lpni_cpt;
1511 lnet_peer_ni_decref_locked(lpni);
1512 lnet_net_unlock(cpt);
1516 EXPORT_SYMBOL(lnet_notify);