2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2017, Intel Corporation.
6 * This file is part of Lustre, https://wiki.whamcloud.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
25 #include <linux/random.h>
26 #include <lnet/lib-lnet.h>
28 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
29 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
30 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
31 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
32 #define LNET_NRB_SMALL_PAGES 1
33 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
34 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
35 #define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
38 extern unsigned int lnet_current_net_count;
40 static char *forwarding = "";
41 module_param(forwarding, charp, 0444);
42 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
44 static int tiny_router_buffers;
45 module_param(tiny_router_buffers, int, 0444);
46 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
47 static int small_router_buffers;
48 module_param(small_router_buffers, int, 0444);
49 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
50 static int large_router_buffers;
51 module_param(large_router_buffers, int, 0444);
52 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
53 static int peer_buffer_credits;
54 module_param(peer_buffer_credits, int, 0444);
55 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
57 static int auto_down = 1;
58 module_param(auto_down, int, 0444);
59 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
62 lnet_peer_buffer_credits(struct lnet_net *net)
64 /* NI option overrides LNet default */
65 if (net->net_tunables.lct_peer_rtr_credits > 0)
66 return net->net_tunables.lct_peer_rtr_credits;
67 if (peer_buffer_credits > 0)
68 return peer_buffer_credits;
70 /* As an approximation, allow this peer the same number of router
71 * buffers as it is allowed outstanding sends */
72 return net->net_tunables.lct_peer_tx_credits;
75 static int check_routers_before_use;
76 module_param(check_routers_before_use, int, 0444);
77 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
79 int avoid_asym_router_failure = 1;
80 module_param(avoid_asym_router_failure, int, 0644);
81 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
83 int dead_router_check_interval = INT_MIN;
84 module_param(dead_router_check_interval, int, 0444);
85 MODULE_PARM_DESC(dead_router_check_interval, "(DEPRECATED - Use alive_router_check_interval)");
87 int live_router_check_interval = INT_MIN;
88 module_param(live_router_check_interval, int, 0444);
89 MODULE_PARM_DESC(live_router_check_interval, "(DEPRECATED - Use alive_router_check_interval)");
91 int alive_router_check_interval = 60;
92 module_param(alive_router_check_interval, int, 0644);
93 MODULE_PARM_DESC(alive_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
95 static int router_ping_timeout = 50;
96 module_param(router_ping_timeout, int, 0644);
97 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
100 * A value between 0 and 100. 0 meaning that even if router's interfaces
101 * have the worse health still consider the gateway usable.
102 * 100 means that at least one interface on the route's remote net is 100%
103 * healthy to consider the route alive.
104 * The default is set to 100 to ensure we maintain the original behavior.
106 unsigned int router_sensitivity_percentage = 100;
107 static int rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp);
108 static struct kernel_param_ops param_ops_rtr_sensitivity = {
109 .set = rtr_sensitivity_set,
110 .get = param_get_int,
112 #define param_check_rtr_sensitivity(name, p) \
113 __param_check(name, p, int)
114 #ifdef HAVE_KERNEL_PARAM_OPS
115 module_param(router_sensitivity_percentage, rtr_sensitivity, S_IRUGO|S_IWUSR);
117 module_param_call(router_sensitivity_percentage, rtr_sensitivity_set, param_get_int,
118 &router_sensitivity_percentage, S_IRUGO|S_IWUSR);
120 MODULE_PARM_DESC(router_sensitivity_percentage,
121 "How healthy a gateway should be to be used in percent");
124 rtr_sensitivity_set(const char *val, cfs_kernel_param_arg_t *kp)
127 unsigned *sen = (unsigned *)kp->arg;
130 rc = kstrtoul(val, 0, &value);
132 CERROR("Invalid module parameter value for 'router_sensitivity_percentage'\n");
136 if (value < 0 || value > 100) {
137 CERROR("Invalid value: %lu for 'router_sensitivity_percentage'\n", value);
142 * The purpose of locking the api_mutex here is to ensure that
143 * the correct value ends up stored properly.
145 mutex_lock(&the_lnet.ln_api_mutex);
149 mutex_unlock(&the_lnet.ln_api_mutex);
155 lnet_rtr_transfer_to_peer(struct lnet_peer *src, struct lnet_peer *target)
157 struct lnet_route *route;
159 lnet_net_lock(LNET_LOCK_EX);
160 target->lp_rtr_refcount += src->lp_rtr_refcount;
161 /* move the list of queued messages to the new peer */
162 list_splice_init(&src->lp_rtrq, &target->lp_rtrq);
163 /* move all the routes that reference the peer */
164 list_splice_init(&src->lp_routes, &target->lp_routes);
165 /* update all the routes to point to the new peer */
166 list_for_each_entry(route, &target->lp_routes, lr_gwlist)
167 route->lr_gateway = target;
168 /* remove the old peer from the ln_routers list */
169 list_del_init(&src->lp_rtr_list);
170 /* add the new peer to the ln_routers list */
171 if (list_empty(&target->lp_rtr_list)) {
172 lnet_peer_addref_locked(target);
173 list_add_tail(&target->lp_rtr_list, &the_lnet.ln_routers);
175 /* reset the ref count on the old peer and decrement its ref count */
176 src->lp_rtr_refcount = 0;
177 lnet_peer_decref_locked(src);
178 /* update the router version */
179 the_lnet.ln_routers_version++;
180 lnet_net_unlock(LNET_LOCK_EX);
184 lnet_peers_start_down(void)
186 return check_routers_before_use;
190 * A net is alive if at least one gateway NI on the network is alive.
193 lnet_is_gateway_net_alive(struct lnet_peer_net *lpn)
195 struct lnet_peer_ni *lpni;
197 list_for_each_entry(lpni, &lpn->lpn_peer_nis, lpni_peer_nis) {
198 if (lnet_is_peer_ni_alive(lpni))
206 * a gateway is alive only if all its nets are alive
207 * called with cpt lock held
209 bool lnet_is_gateway_alive(struct lnet_peer *gw)
211 struct lnet_peer_net *lpn;
213 list_for_each_entry(lpn, &gw->lp_peer_nets, lpn_peer_nets) {
214 if (!lnet_is_gateway_net_alive(lpn))
222 * lnet_is_route_alive() needs to be called with cpt lock held
223 * A route is alive if the gateway can route between the local network and
224 * the remote network of the route.
225 * This means at least one NI is alive on each of the local and remote
226 * networks of the gateway.
228 bool lnet_is_route_alive(struct lnet_route *route)
230 struct lnet_peer *gw = route->lr_gateway;
231 struct lnet_peer_net *llpn;
232 struct lnet_peer_net *rlpn;
236 * if discovery is disabled then rely on the cached aliveness
237 * information. This is handicapped information which we log when
238 * we receive the discovery ping response. The most uptodate
239 * aliveness information can only be obtained when discovery is
242 if (lnet_peer_discovery_disabled)
243 return route->lr_alive;
246 * check the gateway's interfaces on the route rnet to make sure
247 * that the gateway is viable.
249 llpn = lnet_peer_get_net_locked(gw, route->lr_lnet);
253 route_alive = lnet_is_gateway_net_alive(llpn);
255 if (avoid_asym_router_failure) {
256 rlpn = lnet_peer_get_net_locked(gw, route->lr_net);
259 route_alive = route_alive &&
260 lnet_is_gateway_net_alive(rlpn);
266 spin_lock(&gw->lp_lock);
267 if (!(gw->lp_state & LNET_PEER_ROUTER_ENABLED)) {
268 if (gw->lp_rtr_refcount > 0)
269 CERROR("peer %s is being used as a gateway but routing feature is not turned on\n",
270 libcfs_nid2str(gw->lp_primary_nid));
273 spin_unlock(&gw->lp_lock);
279 lnet_consolidate_routes_locked(struct lnet_peer *orig_lp,
280 struct lnet_peer *new_lp)
282 struct lnet_peer_ni *lpni;
283 struct lnet_route *route;
286 * Although a route is correlated with a peer, but when it's added
287 * a specific NID is used. That NID refers to a peer_ni within
288 * a peer. There could be other peer_nis on the same net, which
289 * can be used to send to that gateway. However when we are
290 * consolidating gateways because of discovery, the nid used to
291 * add the route might've moved between gateway peers. In this
292 * case we want to move the route to the new gateway as well. The
293 * intent here is not to confuse the user who added the route.
295 list_for_each_entry(route, &orig_lp->lp_routes, lr_gwlist) {
296 lpni = lnet_peer_get_ni_locked(orig_lp, route->lr_nid);
298 lnet_net_lock(LNET_LOCK_EX);
299 list_move(&route->lr_gwlist, &new_lp->lp_routes);
300 lnet_net_unlock(LNET_LOCK_EX);
307 lnet_set_route_aliveness(struct lnet_route *route, bool alive)
309 /* Log when there's a state change */
310 if (route->lr_alive != alive) {
311 CERROR("route to %s through %s has gone from %s to %s\n",
312 libcfs_net2str(route->lr_net),
313 libcfs_nid2str(route->lr_gateway->lp_primary_nid),
314 (route->lr_alive) ? "up" : "down",
315 alive ? "up" : "down");
316 route->lr_alive = alive;
321 lnet_router_discovery_ping_reply(struct lnet_peer *lp)
323 struct lnet_ping_buffer *pbuf = lp->lp_data;
324 struct lnet_remotenet *rnet;
325 struct lnet_peer_net *llpn;
326 struct lnet_route *route;
333 spin_lock(&lp->lp_lock);
334 lp_state = lp->lp_state;
335 spin_unlock(&lp->lp_lock);
337 /* only handle replies if discovery is disabled. */
338 if (!lnet_peer_discovery_disabled)
341 if (lp_state & LNET_PEER_PING_FAILED) {
343 "Ping failed with %d. Set routes down for gw %s\n",
344 lp->lp_ping_error, libcfs_nid2str(lp->lp_primary_nid));
345 /* If the ping failed then mark the routes served by this
348 list_for_each_entry(route, &lp->lp_routes, lr_gwlist)
349 lnet_set_route_aliveness(route, false);
353 CDEBUG(D_NET, "Discovery is disabled. Processing reply for gw: %s\n",
354 libcfs_nid2str(lp->lp_primary_nid));
357 * examine the ping response:
358 * For each NID in the ping response, extract the net
359 * if the net exists on our remote net list then
360 * iterate over the routes on the rnet and if:
361 * The route's local net is healthy and
362 * The remote net status is UP, then mark the route up
363 * otherwise mark the route down
365 for (i = 1; i < pbuf->pb_info.pi_nnis; i++) {
366 net = LNET_NIDNET(pbuf->pb_info.pi_ni[i].ns_nid);
367 rnet = lnet_find_rnet_locked(net);
370 list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
371 /* check if this is the route's gateway */
372 if (lp->lp_primary_nid !=
373 route->lr_gateway->lp_primary_nid)
376 /* gateway has the routing feature disabled */
377 if (pbuf->pb_info.pi_features &
378 LNET_PING_FEAT_RTE_DISABLED) {
379 lnet_set_route_aliveness(route, false);
383 llpn = lnet_peer_get_net_locked(lp, route->lr_lnet);
385 lnet_set_route_aliveness(route, false);
389 if (!lnet_is_gateway_net_alive(llpn)) {
390 lnet_set_route_aliveness(route, false);
394 if (avoid_asym_router_failure &&
395 pbuf->pb_info.pi_ni[i].ns_status !=
400 * revisit all previous NIDs and check if
401 * any on the network we're examining is
402 * up. If at least one is up then we consider
403 * the route to be alive.
405 for (j = 1; j < i; j++) {
406 net2 = LNET_NIDNET(pbuf->pb_info.
409 pbuf->pb_info.pi_ni[j].ns_status ==
414 lnet_set_route_aliveness(route, false);
419 lnet_set_route_aliveness(route, true);
425 lnet_router_discovery_complete(struct lnet_peer *lp)
427 struct lnet_peer_ni *lpni = NULL;
428 struct lnet_route *route;
430 spin_lock(&lp->lp_lock);
431 lp->lp_state &= ~LNET_PEER_RTR_DISCOVERY;
432 spin_unlock(&lp->lp_lock);
435 * Router discovery successful? All peer information would've been
436 * updated already. No need to do any more processing
438 if (!lp->lp_dc_error)
441 * discovery failed? then we need to set the status of each lpni
442 * to DOWN. It will be updated the next time we discover the
443 * router. For router peer NIs not on local networks, we never send
444 * messages directly to them, so their health will always remain
445 * at maximum. We can only tell if they are up or down from the
446 * status returned in the PING response. If we fail to get that
447 * status in our scheduled router discovery, then we'll assume
448 * it's down until we're told otherwise.
450 CDEBUG(D_NET, "%s: Router discovery failed %d\n",
451 libcfs_nid2str(lp->lp_primary_nid), lp->lp_dc_error);
452 while ((lpni = lnet_get_next_peer_ni_locked(lp, NULL, lpni)) != NULL)
453 lpni->lpni_ns_status = LNET_NI_STATUS_DOWN;
455 list_for_each_entry(route, &lp->lp_routes, lr_gwlist)
456 lnet_set_route_aliveness(route, false);
460 lnet_rtr_addref_locked(struct lnet_peer *lp)
462 LASSERT(lp->lp_rtr_refcount >= 0);
464 /* lnet_net_lock must be exclusively locked */
465 lp->lp_rtr_refcount++;
466 if (lp->lp_rtr_refcount == 1) {
467 list_add_tail(&lp->lp_rtr_list, &the_lnet.ln_routers);
468 /* addref for the_lnet.ln_routers */
469 lnet_peer_addref_locked(lp);
470 the_lnet.ln_routers_version++;
475 lnet_rtr_decref_locked(struct lnet_peer *lp)
477 LASSERT(atomic_read(&lp->lp_refcount) > 0);
478 LASSERT(lp->lp_rtr_refcount > 0);
480 /* lnet_net_lock must be exclusively locked */
481 lp->lp_rtr_refcount--;
482 if (lp->lp_rtr_refcount == 0) {
483 LASSERT(list_empty(&lp->lp_routes));
485 list_del(&lp->lp_rtr_list);
486 /* decref for the_lnet.ln_routers */
487 lnet_peer_decref_locked(lp);
488 the_lnet.ln_routers_version++;
492 struct lnet_remotenet *
493 lnet_find_rnet_locked(__u32 net)
495 struct lnet_remotenet *rnet;
496 struct list_head *tmp;
497 struct list_head *rn_list;
499 LASSERT(the_lnet.ln_state == LNET_STATE_RUNNING);
501 rn_list = lnet_net2rnethash(net);
502 list_for_each(tmp, rn_list) {
503 rnet = list_entry(tmp, struct lnet_remotenet, lrn_list);
505 if (rnet->lrn_net == net)
511 static void lnet_shuffle_seed(void)
514 struct lnet_ni *ni = NULL;
519 /* Nodes with small feet have little entropy
520 * the NID for this node gives the most entropy in the low bits */
521 while ((ni = lnet_get_next_ni_locked(NULL, ni)))
522 add_device_randomness(&ni->ni_nid, sizeof(ni->ni_nid));
528 /* NB expects LNET_LOCK held */
530 lnet_add_route_to_rnet(struct lnet_remotenet *rnet, struct lnet_route *route)
532 struct lnet_peer_net *lpn;
533 unsigned int offset = 0;
534 unsigned int len = 0;
539 list_for_each(e, &rnet->lrn_routes)
543 * Randomly adding routes to the list is done to ensure that when
544 * different nodes are using the same list of routers, they end up
545 * preferring different routers.
547 offset = prandom_u32_max(len + 1);
548 list_for_each(e, &rnet->lrn_routes) {
553 list_add(&route->lr_list, e);
555 * force a router check on the gateway to make sure the route is
558 list_for_each_entry(lpn, &route->lr_gateway->lp_peer_nets,
560 lpn->lpn_rtrcheck_timestamp = 0;
563 the_lnet.ln_remote_nets_version++;
565 /* add the route on the gateway list */
566 list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
568 /* take a router reference count on the gateway */
569 lnet_rtr_addref_locked(route->lr_gateway);
573 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
574 __u32 priority, __u32 sensitivity)
576 struct list_head *route_entry;
577 struct lnet_remotenet *rnet;
578 struct lnet_remotenet *rnet2;
579 struct lnet_route *route;
580 struct lnet_peer_ni *lpni;
581 struct lnet_peer *gw;
585 CDEBUG(D_NET, "Add route: remote net %s hops %d priority %u gw %s\n",
586 libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
588 if (gateway == LNET_NID_ANY ||
589 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
590 net == LNET_NIDNET(LNET_NID_ANY) ||
591 LNET_NETTYP(net) == LOLND ||
592 LNET_NIDNET(gateway) == net ||
593 (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
596 /* it's a local network */
597 if (lnet_islocalnet(net))
600 if (!lnet_islocalnet(LNET_NIDNET(gateway))) {
601 CERROR("Cannot add route with gateway %s. There is no local interface configured on LNet %s\n",
602 libcfs_nid2str(gateway),
603 libcfs_net2str(LNET_NIDNET(gateway)));
604 return -EHOSTUNREACH;
607 /* Assume net, route, all new */
608 LIBCFS_ALLOC(route, sizeof(*route));
609 LIBCFS_ALLOC(rnet, sizeof(*rnet));
610 if (route == NULL || rnet == NULL) {
611 CERROR("Out of memory creating route %s %d %s\n",
612 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
614 LIBCFS_FREE(route, sizeof(*route));
616 LIBCFS_FREE(rnet, sizeof(*rnet));
620 INIT_LIST_HEAD(&rnet->lrn_routes);
622 /* store the local and remote net that the route represents */
623 route->lr_lnet = LNET_NIDNET(gateway);
625 route->lr_nid = gateway;
626 route->lr_priority = priority;
627 route->lr_hops = hops;
629 lnet_net_lock(LNET_LOCK_EX);
632 * lnet_nid2peerni_ex() grabs a ref on the lpni. We will need to
633 * lose that once we're done
635 lpni = lnet_nid2peerni_ex(gateway, LNET_LOCK_EX);
637 lnet_net_unlock(LNET_LOCK_EX);
639 LIBCFS_FREE(route, sizeof(*route));
640 LIBCFS_FREE(rnet, sizeof(*rnet));
643 CERROR("Error %d creating route %s %d %s\n", rc,
644 libcfs_net2str(net), hops,
645 libcfs_nid2str(gateway));
649 LASSERT(lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer);
650 gw = lpni->lpni_peer_net->lpn_peer;
652 route->lr_gateway = gw;
654 rnet2 = lnet_find_rnet_locked(net);
657 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
661 /* Search for a duplicate route (it's a NOOP if it is) */
663 list_for_each(route_entry, &rnet2->lrn_routes) {
664 struct lnet_route *route2;
666 route2 = list_entry(route_entry, struct lnet_route, lr_list);
667 if (route2->lr_gateway == route->lr_gateway) {
672 /* our lookups must be true */
673 LASSERT(route2->lr_gateway->lp_primary_nid != gateway);
677 * It is possible to add multiple routes through the same peer,
678 * but it'll be using a different NID of that peer. When the
679 * gateway is discovered, discovery will consolidate the different
680 * peers into one peer. In this case the discovery code will have
681 * to move the routes from the peer that's being deleted to the
682 * consolidated peer lp_routes list
685 gw->lp_health_sensitivity = sensitivity;
686 lnet_add_route_to_rnet(rnet2, route);
687 if (lnet_peer_discovery_disabled)
688 CWARN("Consider turning discovery on to enable full "
689 "Multi-Rail routing functionality\n");
693 * get rid of the reference on the lpni.
695 lnet_peer_ni_decref_locked(lpni);
696 lnet_net_unlock(LNET_LOCK_EX);
702 LIBCFS_FREE(route, sizeof(*route));
706 LIBCFS_FREE(rnet, sizeof(*rnet));
708 /* kick start the monitor thread to handle the added route */
709 complete(&the_lnet.ln_mt_wait_complete);
715 lnet_del_route_from_rnet(lnet_nid_t gw_nid, struct list_head *route_list,
716 struct list_head *zombies)
718 struct lnet_peer *gateway;
719 struct lnet_route *route;
720 struct lnet_route *tmp;
722 list_for_each_entry_safe(route, tmp, route_list, lr_list) {
723 gateway = route->lr_gateway;
724 if (gw_nid != LNET_NID_ANY &&
725 gw_nid != gateway->lp_primary_nid)
729 * move to zombie to delete outside the lock
730 * Note that this function is called with the
731 * ln_api_mutex held as well as the exclusive net
732 * lock. Adding to the remote net list happens
733 * under the same conditions. Same goes for the
734 * gateway router list
736 list_move(&route->lr_list, zombies);
737 the_lnet.ln_remote_nets_version++;
739 list_del(&route->lr_gwlist);
740 lnet_rtr_decref_locked(gateway);
745 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
747 struct list_head rnet_zombies;
748 struct lnet_remotenet *rnet;
749 struct lnet_remotenet *tmp;
750 struct list_head *rn_list;
751 struct lnet_peer_ni *lpni;
752 struct lnet_route *route;
753 struct list_head zombies;
754 struct lnet_peer *lp;
757 INIT_LIST_HEAD(&rnet_zombies);
758 INIT_LIST_HEAD(&zombies);
760 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
761 libcfs_net2str(net), libcfs_nid2str(gw_nid));
763 /* NB Caller may specify either all routes via the given gateway
764 * or a specific route entry actual NIDs) */
766 lnet_net_lock(LNET_LOCK_EX);
768 lpni = lnet_find_peer_ni_locked(gw_nid);
770 lp = lpni->lpni_peer_net->lpn_peer;
772 gw_nid = lp->lp_primary_nid;
773 lnet_peer_ni_decref_locked(lpni);
776 if (net != LNET_NIDNET(LNET_NID_ANY)) {
777 rnet = lnet_find_rnet_locked(net);
779 lnet_net_unlock(LNET_LOCK_EX);
782 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
784 if (list_empty(&rnet->lrn_routes))
785 list_move(&rnet->lrn_list, &rnet_zombies);
789 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
790 rn_list = &the_lnet.ln_remote_nets_hash[i];
792 list_for_each_entry_safe(rnet, tmp, rn_list, lrn_list) {
793 lnet_del_route_from_rnet(gw_nid, &rnet->lrn_routes,
795 if (list_empty(&rnet->lrn_routes))
796 list_move(&rnet->lrn_list, &rnet_zombies);
802 * check if there are any routes remaining on the gateway
803 * If there are no more routes make sure to set the peer's
804 * lp_disc_net_id to 0 (invalid), in case we add more routes in
805 * the future on that gateway, then we start our discovery process
809 if (list_empty(&lp->lp_routes))
810 lp->lp_disc_net_id = 0;
813 lnet_net_unlock(LNET_LOCK_EX);
815 while (!list_empty(&zombies)) {
816 route = list_first_entry(&zombies, struct lnet_route, lr_list);
817 list_del(&route->lr_list);
818 LIBCFS_FREE(route, sizeof(*route));
821 while (!list_empty(&rnet_zombies)) {
822 rnet = list_first_entry(&rnet_zombies, struct lnet_remotenet,
824 list_del(&rnet->lrn_list);
825 LIBCFS_FREE(rnet, sizeof(*rnet));
832 lnet_destroy_routes (void)
834 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
837 int lnet_get_rtr_pool_cfg(int cpt, struct lnet_ioctl_pool_cfg *pool_cfg)
839 struct lnet_rtrbufpool *rbp;
840 int i, rc = -ENOENT, j;
842 if (the_lnet.ln_rtrpools == NULL)
846 cfs_percpt_for_each(rbp, i, the_lnet.ln_rtrpools) {
851 for (j = 0; j < LNET_NRBPOOLS; j++) {
852 pool_cfg->pl_pools[j].pl_npages = rbp[j].rbp_npages;
853 pool_cfg->pl_pools[j].pl_nbuffers = rbp[j].rbp_nbuffers;
854 pool_cfg->pl_pools[j].pl_credits = rbp[j].rbp_credits;
855 pool_cfg->pl_pools[j].pl_mincredits = rbp[j].rbp_mincredits;
862 lnet_net_lock(LNET_LOCK_EX);
863 pool_cfg->pl_routing = the_lnet.ln_routing;
864 lnet_net_unlock(LNET_LOCK_EX);
870 lnet_get_route(int idx, __u32 *net, __u32 *hops,
871 lnet_nid_t *gateway, __u32 *alive, __u32 *priority, __u32 *sensitivity)
873 struct lnet_remotenet *rnet;
874 struct list_head *rn_list;
875 struct lnet_route *route;
876 struct list_head *e1;
877 struct list_head *e2;
881 cpt = lnet_net_lock_current();
883 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
884 rn_list = &the_lnet.ln_remote_nets_hash[i];
885 list_for_each(e1, rn_list) {
886 rnet = list_entry(e1, struct lnet_remotenet, lrn_list);
888 list_for_each(e2, &rnet->lrn_routes) {
889 route = list_entry(e2, struct lnet_route,
893 *net = rnet->lrn_net;
894 *gateway = route->lr_nid;
895 *hops = route->lr_hops;
896 *priority = route->lr_priority;
897 *sensitivity = route->lr_gateway->
898 lp_health_sensitivity;
899 *alive = lnet_is_route_alive(route);
900 lnet_net_unlock(cpt);
907 lnet_net_unlock(cpt);
912 lnet_wait_known_routerstate(void)
914 struct lnet_peer *rtr;
915 struct list_head *entry;
918 LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
921 int cpt = lnet_net_lock_current();
924 list_for_each(entry, &the_lnet.ln_routers) {
925 rtr = list_entry(entry, struct lnet_peer,
928 spin_lock(&rtr->lp_lock);
930 if ((rtr->lp_state & LNET_PEER_DISCOVERED) == 0) {
932 spin_unlock(&rtr->lp_lock);
935 spin_unlock(&rtr->lp_lock);
938 lnet_net_unlock(cpt);
943 set_current_state(TASK_UNINTERRUPTIBLE);
944 schedule_timeout(cfs_time_seconds(1));
949 lnet_net_set_status_locked(struct lnet_net *net, __u32 status)
954 list_for_each_entry(ni, &net->net_ni_list, ni_netlist) {
957 ni->ni_status->ns_status != status) {
958 ni->ni_status->ns_status = status;
968 lnet_update_ni_status_locked(void)
970 struct lnet_net *net;
975 LASSERT(the_lnet.ln_routing);
977 timeout = router_ping_timeout + alive_router_check_interval;
979 now = ktime_get_real_seconds();
980 list_for_each_entry(net, &the_lnet.ln_nets, net_list) {
981 if (net->net_lnd->lnd_type == LOLND)
984 if (now < net->net_last_alive + timeout)
987 spin_lock(&net->net_lock);
988 /* re-check with lock */
989 if (now < net->net_last_alive + timeout) {
990 spin_unlock(&net->net_lock);
993 spin_unlock(&net->net_lock);
996 * if the net didn't receive any traffic for past the
997 * timeout on any of its constituent NIs, then mark all
1000 push = lnet_net_set_status_locked(net, LNET_NI_STATUS_DOWN);
1006 void lnet_wait_router_start(void)
1008 if (check_routers_before_use) {
1009 /* Note that a helpful side-effect of pinging all known routers
1010 * at startup is that it makes them drop stale connections they
1011 * may have to a previous instance of me. */
1012 lnet_wait_known_routerstate();
1017 * This function is called from the monitor thread to check if there are
1018 * any active routers that need to be checked.
1021 lnet_router_checker_active(void)
1023 /* Router Checker thread needs to run when routing is enabled in
1024 * order to call lnet_update_ni_status_locked() */
1025 if (the_lnet.ln_routing)
1028 return !list_empty(&the_lnet.ln_routers) &&
1029 alive_router_check_interval > 0;
1033 lnet_check_routers(void)
1035 struct lnet_peer_net *first_lpn = NULL;
1036 struct lnet_peer_net *lpn;
1037 struct lnet_peer_ni *lpni;
1038 struct list_head *entry;
1039 struct lnet_peer *rtr;
1048 cpt = lnet_net_lock_current();
1050 version = the_lnet.ln_routers_version;
1052 list_for_each(entry, &the_lnet.ln_routers) {
1053 rtr = list_entry(entry, struct lnet_peer,
1056 now = ktime_get_real_seconds();
1059 * only discover the router if we've passed
1060 * alive_router_check_interval seconds. Some of the router
1061 * interfaces could be down and in that case they would be
1062 * undergoing recovery separately from this discovery.
1064 /* find next peer net which is also local */
1065 net_id = rtr->lp_disc_net_id;
1067 lpn = lnet_get_next_peer_net_locked(rtr, net_id);
1069 CERROR("gateway %s has no networks\n",
1070 libcfs_nid2str(rtr->lp_primary_nid));
1073 if (first_lpn == lpn)
1077 found_lpn = lnet_islocalnet_locked(lpn->lpn_net_id);
1078 net_id = lpn->lpn_net_id;
1079 } while (!found_lpn);
1081 if (!found_lpn || !lpn) {
1082 CERROR("no local network found for gateway %s\n",
1083 libcfs_nid2str(rtr->lp_primary_nid));
1087 if (now - lpn->lpn_rtrcheck_timestamp <
1088 alive_router_check_interval / lnet_current_net_count)
1092 * If we're currently discovering the peer then don't
1093 * issue another discovery
1095 spin_lock(&rtr->lp_lock);
1096 if (rtr->lp_state & LNET_PEER_RTR_DISCOVERY) {
1097 spin_unlock(&rtr->lp_lock);
1100 /* make sure we actively discover the router */
1101 rtr->lp_state &= ~LNET_PEER_NIDS_UPTODATE;
1102 rtr->lp_state |= LNET_PEER_RTR_DISCOVERY;
1103 spin_unlock(&rtr->lp_lock);
1105 /* find the peer_ni associated with the primary NID */
1106 lpni = lnet_peer_get_ni_locked(rtr, rtr->lp_primary_nid);
1108 CDEBUG(D_NET, "Expected to find an lpni for %s, but non found\n",
1109 libcfs_nid2str(rtr->lp_primary_nid));
1112 lnet_peer_ni_addref_locked(lpni);
1114 /* specify the net to use */
1115 rtr->lp_disc_net_id = lpn->lpn_net_id;
1117 /* discover the router */
1118 CDEBUG(D_NET, "discover %s, cpt = %d\n",
1119 libcfs_nid2str(lpni->lpni_nid), cpt);
1120 rc = lnet_discover_peer_locked(lpni, cpt, false);
1122 /* decrement ref count acquired by find_peer_ni_locked() */
1123 lnet_peer_ni_decref_locked(lpni);
1126 lpn->lpn_rtrcheck_timestamp = now;
1128 CERROR("Failed to discover router %s\n",
1129 libcfs_nid2str(rtr->lp_primary_nid));
1131 /* NB dropped lock */
1132 if (version != the_lnet.ln_routers_version) {
1133 /* the routers list has changed */
1138 if (the_lnet.ln_routing)
1139 push = lnet_update_ni_status_locked();
1141 lnet_net_unlock(cpt);
1143 /* if the status of the ni changed update the peers */
1145 lnet_push_update_to_peers(1);
1149 lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages)
1151 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
1153 while (--npages >= 0)
1154 __free_page(rb->rb_kiov[npages].kiov_page);
1156 LIBCFS_FREE(rb, sz);
1159 static struct lnet_rtrbuf *
1160 lnet_new_rtrbuf(struct lnet_rtrbufpool *rbp, int cpt)
1162 int npages = rbp->rbp_npages;
1163 int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
1165 struct lnet_rtrbuf *rb;
1168 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1174 for (i = 0; i < npages; i++) {
1175 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1176 GFP_KERNEL | __GFP_ZERO);
1179 __free_page(rb->rb_kiov[i].kiov_page);
1181 LIBCFS_FREE(rb, sz);
1185 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1186 rb->rb_kiov[i].kiov_offset = 0;
1187 rb->rb_kiov[i].kiov_page = page;
1194 lnet_rtrpool_free_bufs(struct lnet_rtrbufpool *rbp, int cpt)
1196 int npages = rbp->rbp_npages;
1197 struct lnet_rtrbuf *rb;
1198 struct list_head tmp;
1200 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1203 INIT_LIST_HEAD(&tmp);
1206 list_splice_init(&rbp->rbp_msgs, &tmp);
1207 lnet_drop_routed_msgs_locked(&tmp, cpt);
1208 list_splice_init(&rbp->rbp_bufs, &tmp);
1209 rbp->rbp_req_nbuffers = 0;
1210 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1211 rbp->rbp_mincredits = 0;
1212 lnet_net_unlock(cpt);
1214 /* Free buffers on the free list. */
1215 while (!list_empty(&tmp)) {
1216 rb = list_entry(tmp.next, struct lnet_rtrbuf, rb_list);
1217 list_del(&rb->rb_list);
1218 lnet_destroy_rtrbuf(rb, npages);
1223 lnet_rtrpool_adjust_bufs(struct lnet_rtrbufpool *rbp, int nbufs, int cpt)
1225 struct list_head rb_list;
1226 struct lnet_rtrbuf *rb;
1228 int num_buffers = 0;
1230 int npages = rbp->rbp_npages;
1233 /* If we are called for less buffers than already in the pool, we
1234 * just lower the req_nbuffers number and excess buffers will be
1235 * thrown away as they are returned to the free list. Credits
1236 * then get adjusted as well.
1237 * If we already have enough buffers allocated to serve the
1238 * increase requested, then we can treat that the same way as we
1239 * do the decrease. */
1240 num_rb = nbufs - rbp->rbp_nbuffers;
1241 if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1242 rbp->rbp_req_nbuffers = nbufs;
1243 lnet_net_unlock(cpt);
1246 /* store the older value of rbp_req_nbuffers and then set it to
1247 * the new request to prevent lnet_return_rx_credits_locked() from
1248 * freeing buffers that we need to keep around */
1249 old_req_nbufs = rbp->rbp_req_nbuffers;
1250 rbp->rbp_req_nbuffers = nbufs;
1251 lnet_net_unlock(cpt);
1253 INIT_LIST_HEAD(&rb_list);
1255 /* allocate the buffers on a local list first. If all buffers are
1256 * allocated successfully then join this list to the rbp buffer
1257 * list. If not then free all allocated buffers. */
1258 while (num_rb-- > 0) {
1259 rb = lnet_new_rtrbuf(rbp, cpt);
1261 CERROR("Failed to allocate %d route bufs of %d pages\n",
1265 rbp->rbp_req_nbuffers = old_req_nbufs;
1266 lnet_net_unlock(cpt);
1271 list_add(&rb->rb_list, &rb_list);
1277 list_splice_tail(&rb_list, &rbp->rbp_bufs);
1278 rbp->rbp_nbuffers += num_buffers;
1279 rbp->rbp_credits += num_buffers;
1280 rbp->rbp_mincredits = rbp->rbp_credits;
1281 /* We need to schedule blocked msg using the newly
1283 while (!list_empty(&rbp->rbp_bufs) &&
1284 !list_empty(&rbp->rbp_msgs))
1285 lnet_schedule_blocked_locked(rbp);
1287 lnet_net_unlock(cpt);
1292 while (!list_empty(&rb_list)) {
1293 rb = list_entry(rb_list.next, struct lnet_rtrbuf, rb_list);
1294 list_del(&rb->rb_list);
1295 lnet_destroy_rtrbuf(rb, npages);
1302 lnet_rtrpool_init(struct lnet_rtrbufpool *rbp, int npages)
1304 INIT_LIST_HEAD(&rbp->rbp_msgs);
1305 INIT_LIST_HEAD(&rbp->rbp_bufs);
1307 rbp->rbp_npages = npages;
1308 rbp->rbp_credits = 0;
1309 rbp->rbp_mincredits = 0;
1313 lnet_rtrpools_free(int keep_pools)
1315 struct lnet_rtrbufpool *rtrp;
1318 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1321 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1322 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1323 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1324 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1328 cfs_percpt_free(the_lnet.ln_rtrpools);
1329 the_lnet.ln_rtrpools = NULL;
1334 lnet_nrb_tiny_calculate(void)
1336 int nrbs = LNET_NRB_TINY;
1338 if (tiny_router_buffers < 0) {
1339 LCONSOLE_ERROR_MSG(0x10c,
1340 "tiny_router_buffers=%d invalid when "
1341 "routing enabled\n", tiny_router_buffers);
1345 if (tiny_router_buffers > 0)
1346 nrbs = tiny_router_buffers;
1348 nrbs /= LNET_CPT_NUMBER;
1349 return max(nrbs, LNET_NRB_TINY_MIN);
1353 lnet_nrb_small_calculate(void)
1355 int nrbs = LNET_NRB_SMALL;
1357 if (small_router_buffers < 0) {
1358 LCONSOLE_ERROR_MSG(0x10c,
1359 "small_router_buffers=%d invalid when "
1360 "routing enabled\n", small_router_buffers);
1364 if (small_router_buffers > 0)
1365 nrbs = small_router_buffers;
1367 nrbs /= LNET_CPT_NUMBER;
1368 return max(nrbs, LNET_NRB_SMALL_MIN);
1372 lnet_nrb_large_calculate(void)
1374 int nrbs = LNET_NRB_LARGE;
1376 if (large_router_buffers < 0) {
1377 LCONSOLE_ERROR_MSG(0x10c,
1378 "large_router_buffers=%d invalid when "
1379 "routing enabled\n", large_router_buffers);
1383 if (large_router_buffers > 0)
1384 nrbs = large_router_buffers;
1386 nrbs /= LNET_CPT_NUMBER;
1387 return max(nrbs, LNET_NRB_LARGE_MIN);
1391 lnet_rtrpools_alloc(int im_a_router)
1393 struct lnet_rtrbufpool *rtrp;
1400 if (!strcmp(forwarding, "")) {
1401 /* not set either way */
1404 } else if (!strcmp(forwarding, "disabled")) {
1405 /* explicitly disabled */
1407 } else if (!strcmp(forwarding, "enabled")) {
1408 /* explicitly enabled */
1410 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1411 "'enabled' or 'disabled'\n");
1415 nrb_tiny = lnet_nrb_tiny_calculate();
1419 nrb_small = lnet_nrb_small_calculate();
1423 nrb_large = lnet_nrb_large_calculate();
1427 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1429 sizeof(struct lnet_rtrbufpool));
1430 if (the_lnet.ln_rtrpools == NULL) {
1431 LCONSOLE_ERROR_MSG(0x10c,
1432 "Failed to initialize router buffe pool\n");
1436 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1437 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1438 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1443 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1444 LNET_NRB_SMALL_PAGES);
1445 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1450 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1451 LNET_NRB_LARGE_PAGES);
1452 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1458 lnet_net_lock(LNET_LOCK_EX);
1459 the_lnet.ln_routing = 1;
1460 lnet_net_unlock(LNET_LOCK_EX);
1461 complete(&the_lnet.ln_mt_wait_complete);
1465 lnet_rtrpools_free(0);
1470 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1475 struct lnet_rtrbufpool *rtrp;
1477 /* If the provided values for each buffer pool are different than the
1478 * configured values, we need to take action. */
1480 tiny_router_buffers = tiny;
1481 nrb = lnet_nrb_tiny_calculate();
1482 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1483 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1490 small_router_buffers = small;
1491 nrb = lnet_nrb_small_calculate();
1492 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1493 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1500 large_router_buffers = large;
1501 nrb = lnet_nrb_large_calculate();
1502 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1503 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1514 lnet_rtrpools_adjust(int tiny, int small, int large)
1516 /* this function doesn't revert the changes if adding new buffers
1517 * failed. It's up to the user space caller to revert the
1520 if (!the_lnet.ln_routing)
1523 return lnet_rtrpools_adjust_helper(tiny, small, large);
1527 lnet_rtrpools_enable(void)
1531 if (the_lnet.ln_routing)
1534 if (the_lnet.ln_rtrpools == NULL)
1535 /* If routing is turned off, and we have never
1536 * initialized the pools before, just call the
1537 * standard buffer pool allocation routine as
1538 * if we are just configuring this for the first
1540 rc = lnet_rtrpools_alloc(1);
1542 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1546 lnet_net_lock(LNET_LOCK_EX);
1547 the_lnet.ln_routing = 1;
1549 the_lnet.ln_ping_target->pb_info.pi_features &=
1550 ~LNET_PING_FEAT_RTE_DISABLED;
1551 lnet_net_unlock(LNET_LOCK_EX);
1553 if (lnet_peer_discovery_disabled)
1554 CWARN("Consider turning discovery on to enable full "
1555 "Multi-Rail routing functionality\n");
1561 lnet_rtrpools_disable(void)
1563 if (!the_lnet.ln_routing)
1566 lnet_net_lock(LNET_LOCK_EX);
1567 the_lnet.ln_routing = 0;
1568 the_lnet.ln_ping_target->pb_info.pi_features |=
1569 LNET_PING_FEAT_RTE_DISABLED;
1571 tiny_router_buffers = 0;
1572 small_router_buffers = 0;
1573 large_router_buffers = 0;
1574 lnet_net_unlock(LNET_LOCK_EX);
1575 lnet_rtrpools_free(1);
1579 lnet_notify_peer_down(struct lnet_ni *ni, lnet_nid_t nid)
1581 if (ni->ni_net->net_lnd->lnd_notify_peer_down != NULL)
1582 (ni->ni_net->net_lnd->lnd_notify_peer_down)(nid);
1586 * ni: local NI used to communicate with the peer
1588 * alive: true if peer is alive, false otherwise
1589 * reset: reset health value. This is requested by the LND.
1590 * when: notificaiton time.
1593 lnet_notify(struct lnet_ni *ni, lnet_nid_t nid, bool alive, bool reset,
1596 struct lnet_peer_ni *lpni = NULL;
1597 struct lnet_route *route;
1598 struct lnet_peer *lp;
1599 time64_t now = ktime_get_seconds();
1602 LASSERT (!in_interrupt ());
1604 CDEBUG (D_NET, "%s notifying %s: %s\n",
1605 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1606 libcfs_nid2str(nid),
1607 alive ? "up" : "down");
1610 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1611 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1612 libcfs_nid2str(nid), alive ? "birth" : "death",
1613 libcfs_nid2str(ni->ni_nid));
1617 /* can't do predictions... */
1619 CWARN("Ignoring prediction from %s of %s %s "
1620 "%lld seconds in the future\n",
1621 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1622 libcfs_nid2str(nid), alive ? "up" : "down", when - now);
1626 if (ni != NULL && !alive && /* LND telling me she's down */
1627 !auto_down) { /* auto-down disabled */
1628 CDEBUG(D_NET, "Auto-down disabled\n");
1632 /* must lock 0 since this is used for synchronization */
1635 if (the_lnet.ln_state != LNET_STATE_RUNNING) {
1640 lpni = lnet_find_peer_ni_locked(nid);
1644 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1650 lnet_set_healthv(&lpni->lpni_healthv,
1651 LNET_MAX_HEALTH_VALUE);
1653 lnet_inc_healthv(&lpni->lpni_healthv);
1655 lnet_handle_remote_failure_locked(lpni);
1658 /* recalculate aliveness */
1659 alive = lnet_is_peer_ni_alive(lpni);
1662 if (ni != NULL && !alive)
1663 lnet_notify_peer_down(ni, lpni->lpni_nid);
1665 cpt = lpni->lpni_cpt;
1667 lnet_peer_ni_decref_locked(lpni);
1668 if (lpni && lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer) {
1669 lp = lpni->lpni_peer_net->lpn_peer;
1670 list_for_each_entry(route, &lp->lp_routes, lr_gwlist)
1671 lnet_set_route_aliveness(route, alive);
1673 lnet_net_unlock(cpt);
1677 EXPORT_SYMBOL(lnet_notify);