2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * This file is part of Lustre, https://wiki.whamcloud.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
25 #include <linux/random.h>
26 #include <lnet/lib-lnet.h>
28 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
29 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
30 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
31 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
32 #define LNET_NRB_SMALL_PAGES 1
33 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
34 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
35 #define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
38 static char *forwarding = "";
39 module_param(forwarding, charp, 0444);
40 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
42 static int tiny_router_buffers;
43 module_param(tiny_router_buffers, int, 0444);
44 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
45 static int small_router_buffers;
46 module_param(small_router_buffers, int, 0444);
47 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
48 static int large_router_buffers;
49 module_param(large_router_buffers, int, 0444);
50 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
51 static int peer_buffer_credits;
52 module_param(peer_buffer_credits, int, 0444);
53 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
55 static int auto_down = 1;
56 module_param(auto_down, int, 0444);
57 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
60 lnet_peer_buffer_credits(struct lnet_net *net)
62 /* NI option overrides LNet default */
63 if (net->net_tunables.lct_peer_rtr_credits > 0)
64 return net->net_tunables.lct_peer_rtr_credits;
65 if (peer_buffer_credits > 0)
66 return peer_buffer_credits;
68 /* As an approximation, allow this peer the same number of router
69 * buffers as it is allowed outstanding sends */
70 return net->net_tunables.lct_peer_tx_credits;
73 static int check_routers_before_use;
74 module_param(check_routers_before_use, int, 0444);
75 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
77 int avoid_asym_router_failure = 1;
78 module_param(avoid_asym_router_failure, int, 0644);
79 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
81 int alive_router_check_interval = 60;
82 module_param(alive_router_check_interval, int, 0644);
83 MODULE_PARM_DESC(alive_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
85 static int router_ping_timeout = 50;
86 module_param(router_ping_timeout, int, 0644);
87 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
90 * A value between 0 and 100. 0 meaning that even if router's interfaces
91 * have the worse health still consider the gateway usable.
92 * 100 means that at least one interface on the route's remote net is 100%
93 * healthy to consider the route alive.
94 * The default is set to 100 to ensure we maintain the original behavior.
96 unsigned int router_sensitivity_percentage = 100;
97 static int rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
98 static struct kernel_param_ops param_ops_rtr_sensitivity = {
99 .set = rtr_sensitivity_set,
100 .get = param_get_int,
102 #define param_check_rtr_sensitivity(name, p) \
103 __param_check(name, p, int)
104 #ifdef HAVE_KERNEL_PARAM_OPS
105 module_param(router_sensitivity_percentage, rtr_sensitivity, S_IRUGO|S_IWUSR);
107 module_param_call(router_sensitivity_percentage, rtr_sensitivity_set, param_get_int,
108 &router_sensitivity_percentage, S_IRUGO|S_IWUSR);
110 MODULE_PARM_DESC(router_sensitivity_percentage,
111 "How healthy a gateway should be to be used in percent");
114 rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
117 unsigned *sen = (unsigned *)kp->arg;
120 rc = kstrtoul(val, 0, &value);
122 CERROR("Invalid module parameter value for 'router_sensitivity_percentage'\n");
126 if (value < 0 || value > 100) {
127 CERROR("Invalid value: %lu for 'router_sensitivity_percentage'\n", value);
132 * The purpose of locking the api_mutex here is to ensure that
133 * the correct value ends up stored properly.
135 mutex_lock(&the_lnet.ln_api_mutex);
139 mutex_unlock(&the_lnet.ln_api_mutex);
145 lnet_rtr_transfer_to_peer(struct lnet_peer *src, struct lnet_peer *target)
147 struct lnet_route *route;
149 lnet_net_lock(LNET_LOCK_EX);
150 target->lp_rtr_refcount += src->lp_rtr_refcount;
151 /* move the list of queued messages to the new peer */
152 list_splice_init(&src->lp_rtrq, &target->lp_rtrq);
153 /* move all the routes that reference the peer */
154 list_splice_init(&src->lp_routes, &target->lp_routes);
155 /* update all the routes to point to the new peer */
156 list_for_each_entry(route, &target->lp_routes, lr_gwlist)
157 route->lr_gateway = target;
158 /* remove the old peer from the ln_routers list */
159 list_del_init(&src->lp_rtr_list);
160 /* add the new peer to the ln_routers list */
161 if (list_empty(&target->lp_rtr_list)) {
162 lnet_peer_addref_locked(target);
163 list_add_tail(&target->lp_rtr_list, &the_lnet.ln_routers);
165 /* reset the ref count on the old peer and decrement its ref count */
166 src->lp_rtr_refcount = 0;
167 lnet_peer_decref_locked(src);
168 /* update the router version */
169 the_lnet.ln_routers_version++;
170 lnet_net_unlock(LNET_LOCK_EX);
174 lnet_peers_start_down(void)
176 return check_routers_before_use;
180 * A net is alive if at least one gateway NI on the network is alive.
183 lnet_is_gateway_net_alive(struct lnet_peer_net *lpn)
185 struct lnet_peer_ni *lpni;
187 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
188 if (lnet_is_peer_ni_alive(lpni))
196 * a gateway is alive only if all its nets are alive
197 * called with cpt lock held
199 bool lnet_is_gateway_alive(struct lnet_peer *gw)
201 struct lnet_peer_net *lpn;
203 list_for_each_entry(lpn, &gw->lp_peer_nets, lpn_peer_nets) {
204 if (!lnet_is_gateway_net_alive(lpn))
212 * lnet_is_route_alive() needs to be called with cpt lock held
213 * A route is alive if the gateway can route between the local network and
214 * the remote network of the route.
215 * This means at least one NI is alive on each of the local and remote
216 * networks of the gateway.
218 bool lnet_is_route_alive(struct lnet_route *route)
220 struct lnet_peer *gw = route->lr_gateway;
221 struct lnet_peer_net *llpn;
222 struct lnet_peer_net *rlpn;
226 * check the gateway's interfaces on the route rnet to make sure
227 * that the gateway is viable.
229 llpn = lnet_peer_get_net_locked(gw, route->lr_lnet);
233 route_alive = lnet_is_gateway_net_alive(llpn);
235 if (avoid_asym_router_failure) {
236 rlpn = lnet_peer_get_net_locked(gw, route->lr_net);
239 route_alive = route_alive &&
240 lnet_is_gateway_net_alive(rlpn);
246 spin_lock(&gw->lp_lock);
247 if (!(gw->lp_state & LNET_PEER_ROUTER_ENABLED)) {
248 if (gw->lp_rtr_refcount > 0)
249 CERROR("peer %s is being used as a gateway but routing feature is not turned on\n",
250 libcfs_nid2str(gw->lp_primary_nid));
253 spin_unlock(&gw->lp_lock);
259 lnet_consolidate_routes_locked(struct lnet_peer *orig_lp,
260 struct lnet_peer *new_lp)
262 struct lnet_peer_ni *lpni;
263 struct lnet_route *route;
266 * Although a route is correlated with a peer, but when it's added
267 * a specific NID is used. That NID refers to a peer_ni within
268 * a peer. There could be other peer_nis on the same net, which
269 * can be used to send to that gateway. However when we are
270 * consolidating gateways because of discovery, the nid used to
271 * add the route might've moved between gateway peers. In this
272 * case we want to move the route to the new gateway as well. The
273 * intent here is not to confuse the user who added the route.
275 list_for_each_entry(route, &orig_lp->lp_routes, lr_gwlist) {
276 lpni = lnet_peer_get_ni_locked(orig_lp, route->lr_nid);
278 lnet_net_lock(LNET_LOCK_EX);
279 list_move(&route->lr_gwlist, &new_lp->lp_routes);
280 lnet_net_unlock(LNET_LOCK_EX);
287 lnet_router_discovery_complete(struct lnet_peer *lp)
289 struct lnet_peer_ni *lpni = NULL;
291 spin_lock(&lp->lp_lock);
292 lp->lp_state &= ~LNET_PEER_RTR_DISCOVERY;
293 spin_unlock(&lp->lp_lock);
296 * Router discovery successful? All peer information would've been
297 * updated already. No need to do any more processing
299 if (!lp->lp_dc_error)
302 * discovery failed? then we need to set the status of each lpni
303 * to DOWN. It will be updated the next time we discover the
304 * router. For router peer NIs not on local networks, we never send
305 * messages directly to them, so their health will always remain
306 * at maximum. We can only tell if they are up or down from the
307 * status returned in the PING response. If we fail to get that
308 * status in our scheduled router discovery, then we'll assume
309 * it's down until we're told otherwise.
311 CDEBUG(D_NET, "%s: Router discovery failed %d\n",
312 libcfs_nid2str(lp->lp_primary_nid), lp->lp_dc_error);
313 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
314 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
318 lnet_rtr_addref_locked(struct lnet_peer *lp)
320 LASSERT(lp->lp_rtr_refcount >= 0);
322 /* lnet_net_lock must be exclusively locked */
323 lp->lp_rtr_refcount++;
324 if (lp->lp_rtr_refcount == 1) {
325 list_add_tail(&lp->lp_rtr_list, &the_lnet.ln_routers);
326 /* addref for the_lnet.ln_routers */
327 lnet_peer_addref_locked(lp);
328 the_lnet.ln_routers_version++;
333 lnet_rtr_decref_locked(struct lnet_peer *lp)
335 LASSERT(atomic_read(&lp->lp_refcount) > 0);
336 LASSERT(lp->lp_rtr_refcount > 0);
338 /* lnet_net_lock must be exclusively locked */
339 lp->lp_rtr_refcount--;
340 if (lp->lp_rtr_refcount == 0) {
341 LASSERT(list_empty(&lp->lp_routes));
343 list_del(&lp->lp_rtr_list);
344 /* decref for the_lnet.ln_routers */
345 lnet_peer_decref_locked(lp);
346 the_lnet.ln_routers_version++;
350 struct lnet_remotenet *
351 lnet_find_rnet_locked(__u32 net)
353 struct lnet_remotenet *rnet;
354 struct list_head *tmp;
355 struct list_head *rn_list;
357 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
359 rn_list = lnet_net2rnethash(net);
360 list_for_each(tmp, rn_list) {
361 rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
363 if (rnet->lrn_net == net)
369 static void lnet_shuffle_seed(void)
372 struct lnet_ni *ni = NULL;
377 /* Nodes with small feet have little entropy
378 * the NID for this node gives the most entropy in the low bits */
379 while ((ni = lnet_get_next_ni_locked(NULL, ni)))
380 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
386 /* NB expects LNET_LOCK held */
388 lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
390 unsigned int len = 0;
391 unsigned int offset = 0;
396 list_for_each(e, &rnet->lrn_routes)
400 * Randomly adding routes to the list is done to ensure that when
401 * different nodes are using the same list of routers, they end up
402 * preferring different routers.
404 offset = cfs_rand() % (len + 1);
405 list_for_each(e, &rnet->lrn_routes) {
410 list_add(&route->lr_list, e);
412 * force a router check on the gateway to make sure the route is
415 route->lr_gateway->lp_rtrcheck_timestamp = 0;
417 the_lnet.ln_remote_nets_version++;
419 /* add the route on the gateway list */
420 list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
422 /* take a router reference count on the gateway */
423 lnet_rtr_addref_locked(route->lr_gateway);
427 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
428 unsigned int priority)
430 struct list_head *route_entry;
431 struct lnet_remotenet *rnet;
432 struct lnet_remotenet *rnet2;
433 struct lnet_route *route;
434 struct lnet_peer_ni *lpni;
435 struct lnet_peer *gw;
439 CDEBUG(D_NET, "Add route: remote net %s hops %d priority %u gw %s\n",
440 libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
442 if (gateway == LNET_NID_ANY ||
443 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
444 net == LNET_NIDNET(LNET_NID_ANY) ||
445 LNET_NETTYP(net) == LOLND ||
446 LNET_NIDNET(gateway) == net ||
447 (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
450 /* it's a local network */
451 if (lnet_islocalnet(net))
454 /* Assume net, route, all new */
455 LIBCFS_ALLOC(route, sizeof(*route));
456 LIBCFS_ALLOC(rnet, sizeof(*rnet));
457 if (route == NULL || rnet == NULL) {
458 CERROR("Out of memory creating route %s %d %s\n",
459 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
461 LIBCFS_FREE(route, sizeof(*route));
463 LIBCFS_FREE(rnet, sizeof(*rnet));
467 INIT_LIST_HEAD(&rnet->lrn_routes);
469 /* store the local and remote net that the route represents */
470 route->lr_lnet = LNET_NIDNET(gateway);
472 route->lr_nid = gateway;
473 route->lr_priority = priority;
474 route->lr_hops = hops;
476 lnet_net_lock(LNET_LOCK_EX);
479 * lnet_nid2peerni_ex() grabs a ref on the lpni. We will need to
480 * lose that once we're done
482 lpni = lnet_nid2peerni_ex(gateway, LNET_LOCK_EX);
484 lnet_net_unlock(LNET_LOCK_EX);
486 LIBCFS_FREE(route, sizeof(*route));
487 LIBCFS_FREE(rnet, sizeof(*rnet));
490 CERROR("Error %d creating route %s %d %s\n", rc,
491 libcfs_net2str(net), hops,
492 libcfs_nid2str(gateway));
496 LASSERT(lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer);
497 gw = lpni->lpni_peer_net->lpn_peer;
499 route->lr_gateway = gw;
501 rnet2 = lnet_find_rnet_locked(net);
504 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
508 /* Search for a duplicate route (it's a NOOP if it is) */
510 list_for_each(route_entry, &rnet2->lrn_routes) {
511 struct lnet_route *route2;
513 route2 = list_entry(route_entry, struct lnet_route, lr_list);
514 if (route2->lr_gateway == route->lr_gateway) {
519 /* our lookups must be true */
520 LASSERT(route2->lr_gateway->lp_primary_nid != gateway);
524 * It is possible to add multiple routes through the same peer,
525 * but it'll be using a different NID of that peer. When the
526 * gateway is discovered, discovery will consolidate the different
527 * peers into one peer. In this case the discovery code will have
528 * to move the routes from the peer that's being deleted to the
529 * consolidated peer lp_routes list
532 lnet_add_route_to_rnet(rnet2, route);
535 * get rid of the reference on the lpni.
537 lnet_peer_ni_decref_locked(lpni);
538 lnet_net_unlock(LNET_LOCK_EX);
544 LIBCFS_FREE(route, sizeof(*route));
548 LIBCFS_FREE(rnet, sizeof(*rnet));
550 /* kick start the monitor thread to handle the added route */
551 wake_up(&the_lnet.ln_mt_waitq);
557 lnet_del_route_from_rnet(lnet_nid_t gw_nid, struct list_head *route_list,
558 struct list_head *zombies)
560 struct lnet_peer *gateway;
561 struct lnet_route *route;
562 struct lnet_route *tmp;
564 list_for_each_entry_safe(route, tmp, route_list, lr_list) {
565 gateway = route->lr_gateway;
566 if (gw_nid != LNET_NID_ANY &&
567 gw_nid != gateway->lp_primary_nid)
571 * move to zombie to delete outside the lock
572 * Note that this function is called with the
573 * ln_api_mutex held as well as the exclusive net
574 * lock. Adding to the remote net list happens
575 * under the same conditions. Same goes for the
576 * gateway router list
578 list_move(&route->lr_list, zombies);
579 the_lnet.ln_remote_nets_version++;
581 list_del(&route->lr_gwlist);
582 lnet_rtr_decref_locked(gateway);
587 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
589 struct list_head rnet_zombies;
590 struct lnet_remotenet *rnet;
591 struct lnet_remotenet *tmp;
592 struct list_head *rn_list;
593 struct lnet_peer_ni *lpni;
594 struct lnet_route *route;
595 struct list_head zombies;
596 struct lnet_peer *lp;
599 INIT_LIST_HEAD(&rnet_zombies);
600 INIT_LIST_HEAD(&zombies);
602 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
603 libcfs_net2str(net), libcfs_nid2str(gw_nid));
605 /* NB Caller may specify either all routes via the given gateway
606 * or a specific route entry actual NIDs) */
608 lnet_net_lock(LNET_LOCK_EX);
610 lpni = lnet_find_peer_ni_locked(gw_nid);
612 lp = lpni->lpni_peer_net->lpn_peer;
614 gw_nid = lp->lp_primary_nid;
615 lnet_peer_ni_decref_locked(lpni);
618 if (net != LNET_NIDNET(LNET_NID_ANY)) {
619 rnet = lnet_find_rnet_locked(net);
621 lnet_net_unlock(LNET_LOCK_EX);
624 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
626 if (list_empty(&rnet->lrn_routes))
627 list_move(&rnet->lrn_list, &rnet_zombies);
631 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
632 rn_list = &the_lnet.ln_remote_nets_hash[i];
634 list_for_each_entry_safe(rnet, tmp, rn_list, lrn_list) {
635 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
637 if (list_empty(&rnet->lrn_routes))
638 list_move(&rnet->lrn_list, &rnet_zombies);
643 lnet_net_unlock(LNET_LOCK_EX);
645 while (!list_empty(&zombies)) {
646 route = list_first_entry(&zombies, struct lnet_route, lr_list);
647 list_del(&route->lr_list);
648 LIBCFS_FREE(route, sizeof(*route));
651 while (!list_empty(&rnet_zombies)) {
652 rnet = list_first_entry(&rnet_zombies, struct lnet_remotenet,
654 list_del(&rnet->lrn_list);
655 LIBCFS_FREE(rnet, sizeof(*rnet));
662 lnet_destroy_routes (void)
664 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
667 int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
669 struct lnet_rtrbufpool *rbp;
670 int i, rc = -ENOENT, j;
672 if (the_lnet.ln_rtrpools == NULL)
676 cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
681 for (j = 0; j < LNET_NRBPOOLS; j++) {
682 pool_cfg->pl_pools[j].pl_npages = rbp[j].rbp_npages;
683 pool_cfg->pl_pools[j].pl_nbuffers = rbp[j].rbp_nbuffers;
684 pool_cfg->pl_pools[j].pl_credits = rbp[j].rbp_credits;
685 pool_cfg->pl_pools[j].pl_mincredits = rbp[j].rbp_mincredits;
692 lnet_net_lock(LNET_LOCK_EX);
693 pool_cfg->pl_routing = the_lnet.ln_routing;
694 lnet_net_unlock(LNET_LOCK_EX);
700 lnet_get_route(int idx, __u32 *net, __u32 *hops,
701 lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
703 struct list_head *e1;
704 struct list_head *e2;
705 struct lnet_remotenet *rnet;
706 struct lnet_route *route;
709 struct list_head *rn_list;
711 cpt = lnet_net_lock_current();
713 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
714 rn_list = &the_lnet.ln_remote_nets_hash[i];
715 list_for_each(e1, rn_list) {
716 rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
718 list_for_each(e2, &rnet->lrn_routes) {
719 route = list_entry(e2, struct lnet_route,
723 *net = rnet->lrn_net;
724 *gateway = route->lr_nid;
725 *hops = route->lr_hops;
726 *priority = route->lr_priority;
727 *alive = lnet_is_route_alive(route);
728 lnet_net_unlock(cpt);
735 lnet_net_unlock(cpt);
740 lnet_wait_known_routerstate(void)
742 struct lnet_peer *rtr;
743 struct list_head *entry;
746 LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
749 int cpt = lnet_net_lock_current();
752 list_for_each(entry, &the_lnet.ln_routers) {
753 rtr = list_entry(entry, struct lnet_peer,
756 spin_lock(&rtr->lp_lock);
758 if ((rtr->lp_state & LNET_PEER_DISCOVERED) == 0) {
760 spin_unlock(&rtr->lp_lock);
763 spin_unlock(&rtr->lp_lock);
766 lnet_net_unlock(cpt);
771 set_current_state(TASK_UNINTERRUPTIBLE);
772 schedule_timeout(cfs_time_seconds(1));
777 lnet_update_ni_status_locked(void)
779 struct lnet_ni *ni = NULL;
783 LASSERT(the_lnet.ln_routing);
785 timeout = router_ping_timeout + alive_router_check_interval;
787 now = ktime_get_real_seconds();
788 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
789 if (ni->ni_net->net_lnd->lnd_type == LOLND)
792 if (now < ni->ni_last_alive + timeout)
796 /* re-check with lock */
797 if (now < ni->ni_last_alive + timeout) {
802 LASSERT(ni->ni_status != NULL);
804 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
805 CDEBUG(D_NET, "NI(%s:%lld) status changed to down\n",
806 libcfs_nid2str(ni->ni_nid), timeout);
807 /* NB: so far, this is the only place to set
808 * NI status to "down" */
809 ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
815 void lnet_wait_router_start(void)
817 if (check_routers_before_use) {
818 /* Note that a helpful side-effect of pinging all known routers
819 * at startup is that it makes them drop stale connections they
820 * may have to a previous instance of me. */
821 lnet_wait_known_routerstate();
826 * This function is called from the monitor thread to check if there are
827 * any active routers that need to be checked.
830 lnet_router_checker_active(void)
832 /* Router Checker thread needs to run when routing is enabled in
833 * order to call lnet_update_ni_status_locked() */
834 if (the_lnet.ln_routing)
837 return !list_empty(&the_lnet.ln_routers) &&
838 alive_router_check_interval > 0;
842 lnet_check_routers(void)
844 struct lnet_peer_ni *lpni;
845 struct list_head *entry;
846 struct lnet_peer *rtr;
852 cpt = lnet_net_lock_current();
854 version = the_lnet.ln_routers_version;
856 list_for_each(entry, &the_lnet.ln_routers) {
857 rtr = list_entry(entry, struct lnet_peer,
860 now = ktime_get_real_seconds();
863 * only discover the router if we've passed
864 * alive_router_check_interval seconds. Some of the router
865 * interfaces could be down and in that case they would be
866 * undergoing recovery separately from this discovery.
868 if (now - rtr->lp_rtrcheck_timestamp <
869 alive_router_check_interval)
873 * If we're currently discovering the peer then don't
874 * issue another discovery
876 spin_lock(&rtr->lp_lock);
877 if (rtr->lp_state & LNET_PEER_RTR_DISCOVERY) {
878 spin_unlock(&rtr->lp_lock);
881 /* make sure we actively discover the router */
882 rtr->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
883 rtr->lp_state |= LNET_PEER_RTR_DISCOVERY;
884 spin_unlock(&rtr->lp_lock);
886 /* find the peer_ni associated with the primary NID */
887 lpni = lnet_peer_get_ni_locked(rtr, rtr->lp_primary_nid);
889 CDEBUG(D_NET, "Expected to find an lpni for %s, but non found\n",
890 libcfs_nid2str(rtr->lp_primary_nid));
893 lnet_peer_ni_addref_locked(lpni);
895 /* discover the router */
896 CDEBUG(D_NET, "discover %s, cpt = %d\n",
897 libcfs_nid2str(lpni->lpni_nid), cpt);
898 rc = lnet_discover_peer_locked(lpni, cpt, false);
900 /* decrement ref count acquired by find_peer_ni_locked() */
901 lnet_peer_ni_decref_locked(lpni);
904 rtr->lp_rtrcheck_timestamp = now;
906 CERROR("Failed to discover router %s\n",
907 libcfs_nid2str(rtr->lp_primary_nid));
909 /* NB dropped lock */
910 if (version != the_lnet.ln_routers_version) {
911 /* the routers list has changed */
916 if (the_lnet.ln_routing)
917 lnet_update_ni_status_locked();
919 lnet_net_unlock(cpt);
923 lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
925 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
927 while (--npages >= 0)
928 __free_page(rb->rb_kiov[npages].kiov_page);
933 static struct lnet_rtrbuf *
934 lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
936 int npages = rbp->rbp_npages;
937 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
939 struct lnet_rtrbuf *rb;
942 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
948 for (i = 0; i < npages; i++) {
949 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
950 GFP_KERNEL | __GFP_ZERO);
953 __free_page(rb->rb_kiov[i].kiov_page);
959 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
960 rb->rb_kiov[i].kiov_offset = 0;
961 rb->rb_kiov[i].kiov_page = page;
968 lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
970 int npages = rbp->rbp_npages;
971 struct lnet_rtrbuf *rb;
972 struct list_head tmp;
974 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
977 INIT_LIST_HEAD(&tmp);
980 list_splice_init(&rbp->rbp_msgs, &tmp);
981 lnet_drop_routed_msgs_locked(&tmp, cpt);
982 list_splice_init(&rbp->rbp_bufs, &tmp);
983 rbp->rbp_req_nbuffers = 0;
984 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
985 rbp->rbp_mincredits = 0;
986 lnet_net_unlock(cpt);
988 /* Free buffers on the free list. */
989 while (!list_empty(&tmp)) {
990 rb = list_entry(tmp.next, struct lnet_rtrbuf, rb_list);
991 list_del(&rb->rb_list);
992 lnet_destroy_rtrbuf(rb, npages);
997 lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
999 struct list_head rb_list;
1000 struct lnet_rtrbuf *rb;
1002 int num_buffers = 0;
1004 int npages = rbp->rbp_npages;
1007 /* If we are called for less buffers than already in the pool, we
1008 * just lower the req_nbuffers number and excess buffers will be
1009 * thrown away as they are returned to the free list. Credits
1010 * then get adjusted as well.
1011 * If we already have enough buffers allocated to serve the
1012 * increase requested, then we can treat that the same way as we
1013 * do the decrease. */
1014 num_rb = nbufs - rbp->rbp_nbuffers;
1015 if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1016 rbp->rbp_req_nbuffers = nbufs;
1017 lnet_net_unlock(cpt);
1020 /* store the older value of rbp_req_nbuffers and then set it to
1021 * the new request to prevent lnet_return_rx_credits_locked() from
1022 * freeing buffers that we need to keep around */
1023 old_req_nbufs = rbp->rbp_req_nbuffers;
1024 rbp->rbp_req_nbuffers = nbufs;
1025 lnet_net_unlock(cpt);
1027 INIT_LIST_HEAD(&rb_list);
1029 /* allocate the buffers on a local list first. If all buffers are
1030 * allocated successfully then join this list to the rbp buffer
1031 * list. If not then free all allocated buffers. */
1032 while (num_rb-- > 0) {
1033 rb = lnet_new_rtrbuf(rbp, cpt);
1035 CERROR("Failed to allocate %d route bufs of %d pages\n",
1039 rbp->rbp_req_nbuffers = old_req_nbufs;
1040 lnet_net_unlock(cpt);
1045 list_add(&rb->rb_list, &rb_list);
1051 list_splice_tail(&rb_list, &rbp->rbp_bufs);
1052 rbp->rbp_nbuffers += num_buffers;
1053 rbp->rbp_credits += num_buffers;
1054 rbp->rbp_mincredits = rbp->rbp_credits;
1055 /* We need to schedule blocked msg using the newly
1057 while (!list_empty(&rbp->rbp_bufs) &&
1058 !list_empty(&rbp->rbp_msgs))
1059 lnet_schedule_blocked_locked(rbp);
1061 lnet_net_unlock(cpt);
1066 while (!list_empty(&rb_list)) {
1067 rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
1068 list_del(&rb->rb_list);
1069 lnet_destroy_rtrbuf(rb, npages);
1076 lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
1078 INIT_LIST_HEAD(&rbp->rbp_msgs);
1079 INIT_LIST_HEAD(&rbp->rbp_bufs);
1081 rbp->rbp_npages = npages;
1082 rbp->rbp_credits = 0;
1083 rbp->rbp_mincredits = 0;
1087 lnet_rtrpools_free(int keep_pools)
1089 struct lnet_rtrbufpool *rtrp;
1092 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1095 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1096 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1097 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1098 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1102 cfs_percpt_free(the_lnet.ln_rtrpools);
1103 the_lnet.ln_rtrpools = NULL;
1108 lnet_nrb_tiny_calculate(void)
1110 int nrbs = LNET_NRB_TINY;
1112 if (tiny_router_buffers < 0) {
1113 LCONSOLE_ERROR_MSG(0x10c,
1114 "tiny_router_buffers=%d invalid when "
1115 "routing enabled\n", tiny_router_buffers);
1119 if (tiny_router_buffers > 0)
1120 nrbs = tiny_router_buffers;
1122 nrbs /= LNET_CPT_NUMBER;
1123 return max(nrbs, LNET_NRB_TINY_MIN);
1127 lnet_nrb_small_calculate(void)
1129 int nrbs = LNET_NRB_SMALL;
1131 if (small_router_buffers < 0) {
1132 LCONSOLE_ERROR_MSG(0x10c,
1133 "small_router_buffers=%d invalid when "
1134 "routing enabled\n", small_router_buffers);
1138 if (small_router_buffers > 0)
1139 nrbs = small_router_buffers;
1141 nrbs /= LNET_CPT_NUMBER;
1142 return max(nrbs, LNET_NRB_SMALL_MIN);
1146 lnet_nrb_large_calculate(void)
1148 int nrbs = LNET_NRB_LARGE;
1150 if (large_router_buffers < 0) {
1151 LCONSOLE_ERROR_MSG(0x10c,
1152 "large_router_buffers=%d invalid when "
1153 "routing enabled\n", large_router_buffers);
1157 if (large_router_buffers > 0)
1158 nrbs = large_router_buffers;
1160 nrbs /= LNET_CPT_NUMBER;
1161 return max(nrbs, LNET_NRB_LARGE_MIN);
1165 lnet_rtrpools_alloc(int im_a_router)
1167 struct lnet_rtrbufpool *rtrp;
1174 if (!strcmp(forwarding, "")) {
1175 /* not set either way */
1178 } else if (!strcmp(forwarding, "disabled")) {
1179 /* explicitly disabled */
1181 } else if (!strcmp(forwarding, "enabled")) {
1182 /* explicitly enabled */
1184 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1185 "'enabled' or 'disabled'\n");
1189 nrb_tiny = lnet_nrb_tiny_calculate();
1193 nrb_small = lnet_nrb_small_calculate();
1197 nrb_large = lnet_nrb_large_calculate();
1201 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1203 sizeof(struct lnet_rtrbufpool));
1204 if (the_lnet.ln_rtrpools == NULL) {
1205 LCONSOLE_ERROR_MSG(0x10c,
1206 "Failed to initialize router buffe pool\n");
1210 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1211 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1212 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1217 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1218 LNET_NRB_SMALL_PAGES);
1219 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1224 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1225 LNET_NRB_LARGE_PAGES);
1226 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1232 lnet_net_lock(LNET_LOCK_EX);
1233 the_lnet.ln_routing = 1;
1234 lnet_net_unlock(LNET_LOCK_EX);
1235 wake_up(&the_lnet.ln_mt_waitq);
1239 lnet_rtrpools_free(0);
1244 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1249 struct lnet_rtrbufpool *rtrp;
1251 /* If the provided values for each buffer pool are different than the
1252 * configured values, we need to take action. */
1254 tiny_router_buffers = tiny;
1255 nrb = lnet_nrb_tiny_calculate();
1256 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1257 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1264 small_router_buffers = small;
1265 nrb = lnet_nrb_small_calculate();
1266 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1267 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1274 large_router_buffers = large;
1275 nrb = lnet_nrb_large_calculate();
1276 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1277 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1288 lnet_rtrpools_adjust(int tiny, int small, int large)
1290 /* this function doesn't revert the changes if adding new buffers
1291 * failed. It's up to the user space caller to revert the
1294 if (!the_lnet.ln_routing)
1297 return lnet_rtrpools_adjust_helper(tiny, small, large);
1301 lnet_rtrpools_enable(void)
1305 if (the_lnet.ln_routing)
1308 if (the_lnet.ln_rtrpools == NULL)
1309 /* If routing is turned off, and we have never
1310 * initialized the pools before, just call the
1311 * standard buffer pool allocation routine as
1312 * if we are just configuring this for the first
1314 rc = lnet_rtrpools_alloc(1);
1316 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1320 lnet_net_lock(LNET_LOCK_EX);
1321 the_lnet.ln_routing = 1;
1323 the_lnet.ln_ping_target->pb_info.pi_features &=
1324 ~LNET_PING_FEAT_RTE_DISABLED;
1325 lnet_net_unlock(LNET_LOCK_EX);
1331 lnet_rtrpools_disable(void)
1333 if (!the_lnet.ln_routing)
1336 lnet_net_lock(LNET_LOCK_EX);
1337 the_lnet.ln_routing = 0;
1338 the_lnet.ln_ping_target->pb_info.pi_features |=
1339 LNET_PING_FEAT_RTE_DISABLED;
1341 tiny_router_buffers = 0;
1342 small_router_buffers = 0;
1343 large_router_buffers = 0;
1344 lnet_net_unlock(LNET_LOCK_EX);
1345 lnet_rtrpools_free(1);
1349 lnet_notify_peer_down(struct lnet_ni *ni, lnet_nid_t nid)
1351 if (ni->ni_net->net_lnd->lnd_notify_peer_down != NULL)
1352 (ni->ni_net->net_lnd->lnd_notify_peer_down)(nid);
1356 * ni: local NI used to communicate with the peer
1358 * alive: true if peer is alive, false otherwise
1359 * reset: reset health value. This is requested by the LND.
1360 * when: notificaiton time.
1363 lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, bool alive, bool reset,
1366 struct lnet_peer_ni *lpni = NULL;
1367 time64_t now = ktime_get_seconds();
1370 LASSERT (!in_interrupt ());
1372 CDEBUG (D_NET, "%s notifying %s: %s\n",
1373 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1374 libcfs_nid2str(nid),
1375 alive ? "up" : "down");
1378 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1379 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1380 libcfs_nid2str(nid), alive ? "birth" : "death",
1381 libcfs_nid2str(ni->ni_nid));
1385 /* can't do predictions... */
1387 CWARN("Ignoring prediction from %s of %s %s "
1388 "%lld seconds in the future\n",
1389 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1390 libcfs_nid2str(nid), alive ? "up" : "down", when - now);
1394 if (ni != NULL && !alive && /* LND telling me she's down */
1395 !auto_down) { /* auto-down disabled */
1396 CDEBUG(D_NET, "Auto-down disabled\n");
1400 /* must lock 0 since this is used for synchronization */
1403 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1408 lpni = lnet_find_peer_ni_locked(nid);
1412 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1418 lnet_set_healthv(&lpni->lpni_healthv,
1419 LNET_MAX_HEALTH_VALUE);
1421 lnet_inc_healthv(&lpni->lpni_healthv);
1423 lnet_handle_remote_failure_locked(lpni);
1426 /* recalculate aliveness */
1427 alive = lnet_is_peer_ni_alive(lpni);
1430 if (ni != NULL && !alive)
1431 lnet_notify_peer_down(ni, lpni->lpni_nid);
1433 cpt = lpni->lpni_cpt;
1435 lnet_peer_ni_decref_locked(lpni);
1436 lnet_net_unlock(cpt);
1440 EXPORT_SYMBOL(lnet_notify);