2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, 2016, Intel Corporation.
6 * This file is part of Lustre, https://wiki.hpdd.intel.com/
8 * Portals is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Portals is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Portals; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_LNET
24 #include <lnet/lib-lnet.h>
26 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
27 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
28 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
29 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
30 #define LNET_NRB_SMALL_PAGES 1
31 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
32 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
33 #define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
36 static char *forwarding = "";
37 module_param(forwarding, charp, 0444);
38 MODULE_PARM_DESC(forwarding, "Explicitly enable/disable forwarding between networks");
40 static int tiny_router_buffers;
41 module_param(tiny_router_buffers, int, 0444);
42 MODULE_PARM_DESC(tiny_router_buffers, "# of 0 payload messages to buffer in the router");
43 static int small_router_buffers;
44 module_param(small_router_buffers, int, 0444);
45 MODULE_PARM_DESC(small_router_buffers, "# of small (1 page) messages to buffer in the router");
46 static int large_router_buffers;
47 module_param(large_router_buffers, int, 0444);
48 MODULE_PARM_DESC(large_router_buffers, "# of large messages to buffer in the router");
49 static int peer_buffer_credits;
50 module_param(peer_buffer_credits, int, 0444);
51 MODULE_PARM_DESC(peer_buffer_credits, "# router buffer credits per peer");
53 static int auto_down = 1;
54 module_param(auto_down, int, 0444);
55 MODULE_PARM_DESC(auto_down, "Automatically mark peers down on comms error");
58 lnet_peer_buffer_credits(struct lnet_net *net)
60 /* NI option overrides LNet default */
61 if (net->net_tunables.lct_peer_rtr_credits > 0)
62 return net->net_tunables.lct_peer_rtr_credits;
63 if (peer_buffer_credits > 0)
64 return peer_buffer_credits;
66 /* As an approximation, allow this peer the same number of router
67 * buffers as it is allowed outstanding sends */
68 return net->net_tunables.lct_peer_tx_credits;
72 static int lnet_router_checker(void *);
74 static int check_routers_before_use;
75 module_param(check_routers_before_use, int, 0444);
76 MODULE_PARM_DESC(check_routers_before_use, "Assume routers are down and ping them before use");
78 int avoid_asym_router_failure = 1;
79 module_param(avoid_asym_router_failure, int, 0644);
80 MODULE_PARM_DESC(avoid_asym_router_failure, "Avoid asymmetrical router failures (0 to disable)");
82 static int dead_router_check_interval = 60;
83 module_param(dead_router_check_interval, int, 0644);
84 MODULE_PARM_DESC(dead_router_check_interval, "Seconds between dead router health checks (<= 0 to disable)");
86 static int live_router_check_interval = 60;
87 module_param(live_router_check_interval, int, 0644);
88 MODULE_PARM_DESC(live_router_check_interval, "Seconds between live router health checks (<= 0 to disable)");
90 static int router_ping_timeout = 50;
91 module_param(router_ping_timeout, int, 0644);
92 MODULE_PARM_DESC(router_ping_timeout, "Seconds to wait for the reply to a router health query");
95 lnet_peers_start_down(void)
97 return check_routers_before_use;
101 lnet_notify_locked(struct lnet_peer_ni *lp, int notifylnd, int alive,
104 if (cfs_time_before(when, lp->lpni_timestamp)) { /* out of date information */
105 CDEBUG(D_NET, "Out of date\n");
109 lp->lpni_timestamp = when; /* update timestamp */
110 lp->lpni_ping_deadline = 0; /* disable ping timeout */
112 if (lp->lpni_alive_count != 0 && /* got old news */
113 (!lp->lpni_alive) == (!alive)) { /* new date for old news */
114 CDEBUG(D_NET, "Old news\n");
118 /* Flag that notification is outstanding */
120 lp->lpni_alive_count++;
121 lp->lpni_alive = !(!alive); /* 1 bit! */
123 lp->lpni_notifylnd |= notifylnd;
125 lp->lpni_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
127 CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lpni_nid), alive);
131 lnet_ni_notify_locked(lnet_ni_t *ni, struct lnet_peer_ni *lp)
136 /* Notify only in 1 thread at any time to ensure ordered notification.
137 * NB individual events can be missed; the only guarantee is that you
138 * always get the most recent news */
140 if (lp->lpni_notifying || ni == NULL)
143 lp->lpni_notifying = 1;
145 while (lp->lpni_notify) {
146 alive = lp->lpni_alive;
147 notifylnd = lp->lpni_notifylnd;
149 lp->lpni_notifylnd = 0;
152 if (notifylnd && ni->ni_net->net_lnd->lnd_notify != NULL) {
153 lnet_net_unlock(lp->lpni_cpt);
155 /* A new notification could happen now; I'll handle it
156 * when control returns to me */
158 (ni->ni_net->net_lnd->lnd_notify)(ni, lp->lpni_nid,
161 lnet_net_lock(lp->lpni_cpt);
165 lp->lpni_notifying = 0;
170 lnet_rtr_addref_locked(struct lnet_peer_ni *lp)
172 LASSERT(atomic_read(&lp->lpni_refcount) > 0);
173 LASSERT(lp->lpni_rtr_refcount >= 0);
175 /* lnet_net_lock must be exclusively locked */
176 lp->lpni_rtr_refcount++;
177 if (lp->lpni_rtr_refcount == 1) {
178 struct list_head *pos;
180 /* a simple insertion sort */
181 list_for_each_prev(pos, &the_lnet.ln_routers) {
182 struct lnet_peer_ni *rtr =
183 list_entry(pos, struct lnet_peer_ni,
186 if (rtr->lpni_nid < lp->lpni_nid)
190 list_add(&lp->lpni_rtr_list, pos);
191 /* addref for the_lnet.ln_routers */
192 lnet_peer_ni_addref_locked(lp);
193 the_lnet.ln_routers_version++;
198 lnet_rtr_decref_locked(struct lnet_peer_ni *lp)
200 LASSERT(atomic_read(&lp->lpni_refcount) > 0);
201 LASSERT(lp->lpni_rtr_refcount > 0);
203 /* lnet_net_lock must be exclusively locked */
204 lp->lpni_rtr_refcount--;
205 if (lp->lpni_rtr_refcount == 0) {
206 LASSERT(list_empty(&lp->lpni_routes));
208 if (lp->lpni_rcd != NULL) {
209 list_add(&lp->lpni_rcd->rcd_list,
210 &the_lnet.ln_rcd_deathrow);
214 list_del(&lp->lpni_rtr_list);
215 /* decref for the_lnet.ln_routers */
216 lnet_peer_ni_decref_locked(lp);
217 the_lnet.ln_routers_version++;
222 lnet_find_rnet_locked(__u32 net)
224 lnet_remotenet_t *rnet;
225 struct list_head *tmp;
226 struct list_head *rn_list;
228 LASSERT(!the_lnet.ln_shutdown);
230 rn_list = lnet_net2rnethash(net);
231 list_for_each(tmp, rn_list) {
232 rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
234 if (rnet->lrn_net == net)
240 static void lnet_shuffle_seed(void)
245 struct timespec64 ts;
246 lnet_ni_t *ni = NULL;
251 cfs_get_random_bytes(seed, sizeof(seed));
253 /* Nodes with small feet have little entropy
254 * the NID for this node gives the most entropy in the low bits */
255 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
256 lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
258 if (lnd_type != LOLND)
259 seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
263 cfs_srand(ts.tv_sec ^ seed[0], ts.tv_nsec ^ seed[1]);
268 /* NB expects LNET_LOCK held */
270 lnet_add_route_to_rnet(lnet_remotenet_t *rnet, lnet_route_t *route)
272 unsigned int len = 0;
273 unsigned int offset = 0;
278 list_for_each(e, &rnet->lrn_routes) {
282 /* len+1 positions to add a new entry, also prevents division by 0 */
283 offset = cfs_rand() % (len + 1);
284 list_for_each(e, &rnet->lrn_routes) {
289 list_add(&route->lr_list, e);
290 list_add(&route->lr_gwlist, &route->lr_gateway->lpni_routes);
292 the_lnet.ln_remote_nets_version++;
293 lnet_rtr_addref_locked(route->lr_gateway);
297 lnet_add_route(__u32 net, __u32 hops, lnet_nid_t gateway,
298 unsigned int priority)
301 lnet_remotenet_t *rnet;
302 lnet_remotenet_t *rnet2;
308 CDEBUG(D_NET, "Add route: net %s hops %d priority %u gw %s\n",
309 libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
311 if (gateway == LNET_NID_ANY ||
312 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
313 net == LNET_NIDNET(LNET_NID_ANY) ||
314 LNET_NETTYP(net) == LOLND ||
315 LNET_NIDNET(gateway) == net ||
316 (hops != LNET_UNDEFINED_HOPS && (hops < 1 || hops > 255)))
319 if (lnet_islocalnet(net)) /* it's a local network */
322 /* Assume net, route, all new */
323 LIBCFS_ALLOC(route, sizeof(*route));
324 LIBCFS_ALLOC(rnet, sizeof(*rnet));
325 if (route == NULL || rnet == NULL) {
326 CERROR("Out of memory creating route %s %d %s\n",
327 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
329 LIBCFS_FREE(route, sizeof(*route));
331 LIBCFS_FREE(rnet, sizeof(*rnet));
335 INIT_LIST_HEAD(&rnet->lrn_routes);
337 route->lr_hops = hops;
339 route->lr_priority = priority;
341 lnet_net_lock(LNET_LOCK_EX);
343 rc = lnet_nid2peerni_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
345 lnet_net_unlock(LNET_LOCK_EX);
347 LIBCFS_FREE(route, sizeof(*route));
348 LIBCFS_FREE(rnet, sizeof(*rnet));
350 if (rc == -EHOSTUNREACH) /* gateway is not on a local net. */
351 return rc; /* ignore the route entry */
352 CERROR("Error %d creating route %s %d %s\n", rc,
353 libcfs_net2str(net), hops,
354 libcfs_nid2str(gateway));
358 LASSERT(!the_lnet.ln_shutdown);
360 rnet2 = lnet_find_rnet_locked(net);
363 list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
367 /* Search for a duplicate route (it's a NOOP if it is) */
369 list_for_each(e, &rnet2->lrn_routes) {
370 lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
372 if (route2->lr_gateway == route->lr_gateway) {
377 /* our lookups must be true */
378 LASSERT(route2->lr_gateway->lpni_nid != gateway);
382 lnet_peer_ni_addref_locked(route->lr_gateway); /* +1 for notify */
383 lnet_add_route_to_rnet(rnet2, route);
385 ni = lnet_get_next_ni_locked(route->lr_gateway->lpni_net, NULL);
386 lnet_net_unlock(LNET_LOCK_EX);
388 /* XXX Assume alive */
389 if (ni->ni_net->net_lnd->lnd_notify != NULL)
390 (ni->ni_net->net_lnd->lnd_notify)(ni, gateway, 1);
392 lnet_net_lock(LNET_LOCK_EX);
395 /* -1 for notify or !add_route */
396 lnet_peer_ni_decref_locked(route->lr_gateway);
397 lnet_net_unlock(LNET_LOCK_EX);
403 LIBCFS_FREE(route, sizeof(*route));
407 LIBCFS_FREE(rnet, sizeof(*rnet));
409 /* indicate to startup the router checker if configured */
410 wake_up(&the_lnet.ln_rc_waitq);
416 lnet_check_routes(void)
418 lnet_remotenet_t *rnet;
420 lnet_route_t *route2;
421 struct list_head *e1;
422 struct list_head *e2;
424 struct list_head *rn_list;
427 cpt = lnet_net_lock_current();
429 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
430 rn_list = &the_lnet.ln_remote_nets_hash[i];
431 list_for_each(e1, rn_list) {
432 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
435 list_for_each(e2, &rnet->lrn_routes) {
440 route = list_entry(e2, lnet_route_t,
443 if (route2 == NULL) {
448 if (route->lr_gateway->lpni_net ==
449 route2->lr_gateway->lpni_net)
452 nid1 = route->lr_gateway->lpni_nid;
453 nid2 = route2->lr_gateway->lpni_nid;
456 lnet_net_unlock(cpt);
458 CERROR("Routes to %s via %s and %s not "
461 libcfs_nid2str(nid1),
462 libcfs_nid2str(nid2));
468 lnet_net_unlock(cpt);
473 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
475 struct lnet_peer_ni *gateway;
476 lnet_remotenet_t *rnet;
478 struct list_head *e1;
479 struct list_head *e2;
481 struct list_head *rn_list;
484 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
485 libcfs_net2str(net), libcfs_nid2str(gw_nid));
487 /* NB Caller may specify either all routes via the given gateway
488 * or a specific route entry actual NIDs) */
490 lnet_net_lock(LNET_LOCK_EX);
491 if (net == LNET_NIDNET(LNET_NID_ANY))
492 rn_list = &the_lnet.ln_remote_nets_hash[0];
494 rn_list = lnet_net2rnethash(net);
497 list_for_each(e1, rn_list) {
498 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
500 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
501 net == rnet->lrn_net))
504 list_for_each(e2, &rnet->lrn_routes) {
505 route = list_entry(e2, lnet_route_t, lr_list);
507 gateway = route->lr_gateway;
508 if (!(gw_nid == LNET_NID_ANY ||
509 gw_nid == gateway->lpni_nid))
512 list_del(&route->lr_list);
513 list_del(&route->lr_gwlist);
514 the_lnet.ln_remote_nets_version++;
516 if (list_empty(&rnet->lrn_routes))
517 list_del(&rnet->lrn_list);
521 lnet_rtr_decref_locked(gateway);
522 lnet_peer_ni_decref_locked(gateway);
524 lnet_net_unlock(LNET_LOCK_EX);
526 LIBCFS_FREE(route, sizeof(*route));
529 LIBCFS_FREE(rnet, sizeof(*rnet));
532 lnet_net_lock(LNET_LOCK_EX);
537 if (net == LNET_NIDNET(LNET_NID_ANY) &&
538 ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
539 rn_list = &the_lnet.ln_remote_nets_hash[idx];
542 lnet_net_unlock(LNET_LOCK_EX);
548 lnet_destroy_routes (void)
550 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
553 int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg)
555 int i, rc = -ENOENT, j;
557 if (the_lnet.ln_rtrpools == NULL)
560 for (i = 0; i < LNET_NRBPOOLS; i++) {
561 lnet_rtrbufpool_t *rbp;
563 lnet_net_lock(LNET_LOCK_EX);
564 cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) {
568 pool_cfg->pl_pools[i].pl_npages = rbp[i].rbp_npages;
569 pool_cfg->pl_pools[i].pl_nbuffers = rbp[i].rbp_nbuffers;
570 pool_cfg->pl_pools[i].pl_credits = rbp[i].rbp_credits;
571 pool_cfg->pl_pools[i].pl_mincredits = rbp[i].rbp_mincredits;
575 lnet_net_unlock(LNET_LOCK_EX);
578 lnet_net_lock(LNET_LOCK_EX);
579 pool_cfg->pl_routing = the_lnet.ln_routing;
580 lnet_net_unlock(LNET_LOCK_EX);
586 lnet_get_route(int idx, __u32 *net, __u32 *hops,
587 lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
589 struct list_head *e1;
590 struct list_head *e2;
591 lnet_remotenet_t *rnet;
595 struct list_head *rn_list;
597 cpt = lnet_net_lock_current();
599 for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
600 rn_list = &the_lnet.ln_remote_nets_hash[i];
601 list_for_each(e1, rn_list) {
602 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
604 list_for_each(e2, &rnet->lrn_routes) {
605 route = list_entry(e2, lnet_route_t,
609 *net = rnet->lrn_net;
610 *hops = route->lr_hops;
611 *priority = route->lr_priority;
612 *gateway = route->lr_gateway->lpni_nid;
613 *alive = lnet_is_route_alive(route);
614 lnet_net_unlock(cpt);
621 lnet_net_unlock(cpt);
626 lnet_swap_pinginfo(struct lnet_ping_info *info)
629 struct lnet_ni_status *stat;
631 __swab32s(&info->pi_magic);
632 __swab32s(&info->pi_features);
633 __swab32s(&info->pi_pid);
634 __swab32s(&info->pi_nnis);
635 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
636 stat = &info->pi_ni[i];
637 __swab64s(&stat->ns_nid);
638 __swab32s(&stat->ns_status);
644 * parse router-checker pinginfo, record number of down NIs for remote
645 * networks on that router.
648 lnet_parse_rc_info(lnet_rc_data_t *rcd)
650 struct lnet_ping_info *info = rcd->rcd_pinginfo;
651 struct lnet_peer_ni *gw = rcd->rcd_gateway;
657 if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
658 lnet_swap_pinginfo(info);
660 /* NB always racing with network! */
661 if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
662 CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
663 libcfs_nid2str(gw->lpni_nid), info->pi_magic);
664 gw->lpni_ping_feats = LNET_PING_FEAT_INVAL;
668 gw->lpni_ping_feats = info->pi_features;
669 if ((gw->lpni_ping_feats & LNET_PING_FEAT_MASK) == 0) {
670 CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
671 libcfs_nid2str(gw->lpni_nid), gw->lpni_ping_feats);
672 return; /* nothing I can understand */
675 if ((gw->lpni_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
676 return; /* can't carry NI status info */
678 list_for_each_entry(rte, &gw->lpni_routes, lr_gwlist) {
683 if ((gw->lpni_ping_feats & LNET_PING_FEAT_RTE_DISABLED) != 0) {
688 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
689 struct lnet_ni_status *stat = &info->pi_ni[i];
690 lnet_nid_t nid = stat->ns_nid;
692 if (nid == LNET_NID_ANY) {
693 CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
694 libcfs_nid2str(gw->lpni_nid));
695 gw->lpni_ping_feats = LNET_PING_FEAT_INVAL;
699 if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
702 if (stat->ns_status == LNET_NI_STATUS_DOWN) {
707 if (stat->ns_status == LNET_NI_STATUS_UP) {
708 if (LNET_NIDNET(nid) == rte->lr_net) {
715 CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
716 libcfs_nid2str(gw->lpni_nid), stat->ns_status);
717 gw->lpni_ping_feats = LNET_PING_FEAT_INVAL;
721 if (up) { /* ignore downed NIs if NI for dest network is up */
725 /* if @down is zero and this route is single-hop, it means
726 * we can't find NI for target network */
727 if (down == 0 && rte->lr_hops == 1)
730 rte->lr_downis = down;
735 lnet_router_checker_event(lnet_event_t *event)
737 lnet_rc_data_t *rcd = event->md.user_ptr;
738 struct lnet_peer_ni *lp;
740 LASSERT(rcd != NULL);
742 if (event->unlinked) {
743 LNetInvalidateHandle(&rcd->rcd_mdh);
747 LASSERT(event->type == LNET_EVENT_SEND ||
748 event->type == LNET_EVENT_REPLY);
750 lp = rcd->rcd_gateway;
753 /* NB: it's called with holding lnet_res_lock, we have a few
754 * places need to hold both locks at the same time, please take
755 * care of lock ordering */
756 lnet_net_lock(lp->lpni_cpt);
757 if (!lnet_isrouter(lp) || lp->lpni_rcd != rcd) {
758 /* ignore if no longer a router or rcd is replaced */
762 if (event->type == LNET_EVENT_SEND) {
763 lp->lpni_ping_notsent = 0;
764 if (event->status == 0)
768 /* LNET_EVENT_REPLY */
769 /* A successful REPLY means the router is up. If _any_ comms
770 * to the router fail I assume it's down (this will happen if
771 * we ping alive routers to try to detect router death before
772 * apps get burned). */
774 lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
775 /* The router checker will wake up very shortly and do the
776 * actual notification.
777 * XXX If 'lp' stops being a router before then, it will still
778 * have the notification pending!!! */
780 if (avoid_asym_router_failure && event->status == 0)
781 lnet_parse_rc_info(rcd);
784 lnet_net_unlock(lp->lpni_cpt);
788 lnet_wait_known_routerstate(void)
790 struct lnet_peer_ni *rtr;
791 struct list_head *entry;
794 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
797 int cpt = lnet_net_lock_current();
800 list_for_each(entry, &the_lnet.ln_routers) {
801 rtr = list_entry(entry, struct lnet_peer_ni,
804 if (rtr->lpni_alive_count == 0) {
810 lnet_net_unlock(cpt);
815 set_current_state(TASK_UNINTERRUPTIBLE);
816 schedule_timeout(cfs_time_seconds(1));
821 lnet_router_ni_update_locked(struct lnet_peer_ni *gw, __u32 net)
825 if ((gw->lpni_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
826 list_for_each_entry(rte, &gw->lpni_routes, lr_gwlist) {
827 if (rte->lr_net == net) {
836 lnet_update_ni_status_locked(void)
838 lnet_ni_t *ni = NULL;
842 LASSERT(the_lnet.ln_routing);
844 timeout = router_ping_timeout +
845 MAX(live_router_check_interval, dead_router_check_interval);
847 now = ktime_get_real_seconds();
848 while ((ni = lnet_get_next_ni_locked(NULL, ni))) {
849 if (ni->ni_net->net_lnd->lnd_type == LOLND)
852 if (now < ni->ni_last_alive + timeout)
856 /* re-check with lock */
857 if (now < ni->ni_last_alive + timeout) {
862 LASSERT(ni->ni_status != NULL);
864 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
865 CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
866 libcfs_nid2str(ni->ni_nid), timeout);
867 /* NB: so far, this is the only place to set
868 * NI status to "down" */
869 ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
876 lnet_destroy_rc_data(lnet_rc_data_t *rcd)
878 LASSERT(list_empty(&rcd->rcd_list));
879 /* detached from network */
880 LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
882 if (rcd->rcd_gateway != NULL) {
883 int cpt = rcd->rcd_gateway->lpni_cpt;
886 lnet_peer_ni_decref_locked(rcd->rcd_gateway);
887 lnet_net_unlock(cpt);
890 if (rcd->rcd_pinginfo != NULL)
891 LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
893 LIBCFS_FREE(rcd, sizeof(*rcd));
896 static lnet_rc_data_t *
897 lnet_create_rc_data_locked(struct lnet_peer_ni *gateway)
899 lnet_rc_data_t *rcd = NULL;
900 struct lnet_ping_info *pi;
904 lnet_net_unlock(gateway->lpni_cpt);
906 LIBCFS_ALLOC(rcd, sizeof(*rcd));
910 LNetInvalidateHandle(&rcd->rcd_mdh);
911 INIT_LIST_HEAD(&rcd->rcd_list);
913 LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
917 for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
918 pi->pi_ni[i].ns_nid = LNET_NID_ANY;
919 pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
921 rcd->rcd_pinginfo = pi;
923 LASSERT(!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
924 rc = LNetMDBind((lnet_md_t){.start = pi,
926 .length = LNET_PINGINFO_SIZE,
927 .threshold = LNET_MD_THRESH_INF,
928 .options = LNET_MD_TRUNCATE,
929 .eq_handle = the_lnet.ln_rc_eqh},
933 CERROR("Can't bind MD: %d\n", rc);
938 lnet_net_lock(gateway->lpni_cpt);
939 /* router table changed or someone has created rcd for this gateway */
940 if (!lnet_isrouter(gateway) || gateway->lpni_rcd != NULL) {
941 lnet_net_unlock(gateway->lpni_cpt);
945 lnet_peer_ni_addref_locked(gateway);
946 rcd->rcd_gateway = gateway;
947 gateway->lpni_rcd = rcd;
948 gateway->lpni_ping_notsent = 0;
954 if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
955 rc = LNetMDUnlink(rcd->rcd_mdh);
958 lnet_destroy_rc_data(rcd);
961 lnet_net_lock(gateway->lpni_cpt);
962 return gateway->lpni_rcd;
966 lnet_router_check_interval (struct lnet_peer_ni *rtr)
970 secs = rtr->lpni_alive ? live_router_check_interval :
971 dead_router_check_interval;
979 lnet_ping_router_locked (struct lnet_peer_ni *rtr)
981 lnet_rc_data_t *rcd = NULL;
982 cfs_time_t now = cfs_time_current();
986 lnet_peer_ni_addref_locked(rtr);
988 if (rtr->lpni_ping_deadline != 0 && /* ping timed out? */
989 cfs_time_after(now, rtr->lpni_ping_deadline))
990 lnet_notify_locked(rtr, 1, 0, now);
992 /* Run any outstanding notifications */
993 ni = lnet_get_next_ni_locked(rtr->lpni_net, NULL);
994 lnet_ni_notify_locked(ni, rtr);
996 if (!lnet_isrouter(rtr) ||
997 the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
998 /* router table changed or router checker is shutting down */
999 lnet_peer_ni_decref_locked(rtr);
1003 rcd = rtr->lpni_rcd != NULL ?
1004 rtr->lpni_rcd : lnet_create_rc_data_locked(rtr);
1009 secs = lnet_router_check_interval(rtr);
1012 "rtr %s %d: deadline %lu ping_notsent %d alive %d "
1013 "alive_count %d lpni_ping_timestamp %lu\n",
1014 libcfs_nid2str(rtr->lpni_nid), secs,
1015 rtr->lpni_ping_deadline, rtr->lpni_ping_notsent,
1016 rtr->lpni_alive, rtr->lpni_alive_count, rtr->lpni_ping_timestamp);
1018 if (secs != 0 && !rtr->lpni_ping_notsent &&
1019 cfs_time_after(now, cfs_time_add(rtr->lpni_ping_timestamp,
1020 cfs_time_seconds(secs)))) {
1022 lnet_process_id_t id;
1023 lnet_handle_md_t mdh;
1025 id.nid = rtr->lpni_nid;
1026 id.pid = LNET_PID_LUSTRE;
1027 CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
1029 rtr->lpni_ping_notsent = 1;
1030 rtr->lpni_ping_timestamp = now;
1034 if (rtr->lpni_ping_deadline == 0) {
1035 rtr->lpni_ping_deadline =
1036 cfs_time_shift(router_ping_timeout);
1039 lnet_net_unlock(rtr->lpni_cpt);
1041 rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
1042 LNET_PROTO_PING_MATCHBITS, 0);
1044 lnet_net_lock(rtr->lpni_cpt);
1046 rtr->lpni_ping_notsent = 0; /* no event pending */
1049 lnet_peer_ni_decref_locked(rtr);
1054 lnet_router_checker_start(void)
1058 struct task_struct *task;
1060 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1062 if (check_routers_before_use &&
1063 dead_router_check_interval <= 0) {
1064 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
1065 " set if 'check_routers_before_use' is set"
1070 sema_init(&the_lnet.ln_rc_signal, 0);
1072 rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
1074 CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
1078 the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
1079 task = kthread_run(lnet_router_checker, NULL, "router_checker");
1082 CERROR("Can't start router checker thread: %d\n", rc);
1083 /* block until event callback signals exit */
1084 down(&the_lnet.ln_rc_signal);
1085 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1087 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1091 if (check_routers_before_use) {
1092 /* Note that a helpful side-effect of pinging all known routers
1093 * at startup is that it makes them drop stale connections they
1094 * may have to a previous instance of me. */
1095 lnet_wait_known_routerstate();
1102 lnet_router_checker_stop (void)
1106 if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
1109 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1110 the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
1111 /* wakeup the RC thread if it's sleeping */
1112 wake_up(&the_lnet.ln_rc_waitq);
1114 /* block until event callback signals exit */
1115 down(&the_lnet.ln_rc_signal);
1116 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1118 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1124 lnet_prune_rc_data(int wait_unlink)
1126 lnet_rc_data_t *rcd;
1127 lnet_rc_data_t *tmp;
1128 struct lnet_peer_ni *lp;
1129 struct list_head head;
1132 if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
1133 list_empty(&the_lnet.ln_rcd_deathrow) &&
1134 list_empty(&the_lnet.ln_rcd_zombie)))
1137 INIT_LIST_HEAD(&head);
1139 lnet_net_lock(LNET_LOCK_EX);
1141 if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
1142 /* router checker is stopping, prune all */
1143 list_for_each_entry(lp, &the_lnet.ln_routers,
1145 if (lp->lpni_rcd == NULL)
1148 LASSERT(list_empty(&lp->lpni_rcd->rcd_list));
1149 list_add(&lp->lpni_rcd->rcd_list,
1150 &the_lnet.ln_rcd_deathrow);
1151 lp->lpni_rcd = NULL;
1155 /* unlink all RCDs on deathrow list */
1156 list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
1158 if (!list_empty(&head)) {
1159 lnet_net_unlock(LNET_LOCK_EX);
1161 list_for_each_entry(rcd, &head, rcd_list)
1162 LNetMDUnlink(rcd->rcd_mdh);
1164 lnet_net_lock(LNET_LOCK_EX);
1167 list_splice_init(&head, &the_lnet.ln_rcd_zombie);
1169 /* release all zombie RCDs */
1170 while (!list_empty(&the_lnet.ln_rcd_zombie)) {
1171 list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
1173 if (LNetHandleIsInvalid(rcd->rcd_mdh))
1174 list_move(&rcd->rcd_list, &head);
1177 wait_unlink = wait_unlink &&
1178 !list_empty(&the_lnet.ln_rcd_zombie);
1180 lnet_net_unlock(LNET_LOCK_EX);
1182 while (!list_empty(&head)) {
1183 rcd = list_entry(head.next,
1184 lnet_rc_data_t, rcd_list);
1185 list_del_init(&rcd->rcd_list);
1186 lnet_destroy_rc_data(rcd);
1193 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
1194 "Waiting for rc buffers to unlink\n");
1195 set_current_state(TASK_UNINTERRUPTIBLE);
1196 schedule_timeout(cfs_time_seconds(1) / 4);
1198 lnet_net_lock(LNET_LOCK_EX);
1201 lnet_net_unlock(LNET_LOCK_EX);
1205 * This function is called to check if the RC should block indefinitely.
1206 * It's called from lnet_router_checker() as well as being passed to
1207 * wait_event_interruptible() to avoid the lost wake_up problem.
1209 * When it's called from wait_event_interruptible() it is necessary to
1210 * also not sleep if the rc state is not running to avoid a deadlock
1211 * when the system is shutting down
1214 lnet_router_checker_active(void)
1216 if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING)
1219 /* Router Checker thread needs to run when routing is enabled in
1220 * order to call lnet_update_ni_status_locked() */
1221 if (the_lnet.ln_routing)
1224 return !list_empty(&the_lnet.ln_routers) &&
1225 (live_router_check_interval > 0 ||
1226 dead_router_check_interval > 0);
1230 lnet_router_checker(void *arg)
1232 struct lnet_peer_ni *rtr;
1233 struct list_head *entry;
1235 cfs_block_allsigs();
1237 while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
1242 cpt = lnet_net_lock_current();
1244 version = the_lnet.ln_routers_version;
1246 list_for_each(entry, &the_lnet.ln_routers) {
1247 rtr = list_entry(entry, struct lnet_peer_ni,
1250 cpt2 = rtr->lpni_cpt;
1252 lnet_net_unlock(cpt);
1255 /* the routers list has changed */
1256 if (version != the_lnet.ln_routers_version)
1260 lnet_ping_router_locked(rtr);
1262 /* NB dropped lock */
1263 if (version != the_lnet.ln_routers_version) {
1264 /* the routers list has changed */
1269 if (the_lnet.ln_routing)
1270 lnet_update_ni_status_locked();
1272 lnet_net_unlock(cpt);
1274 lnet_prune_rc_data(0); /* don't wait for UNLINK */
1276 /* Call schedule_timeout() here always adds 1 to load average
1277 * because kernel counts # active tasks as nr_running
1278 * + nr_uninterruptible. */
1279 /* if there are any routes then wakeup every second. If
1280 * there are no routes then sleep indefinitely until woken
1281 * up by a user adding a route */
1282 if (!lnet_router_checker_active())
1283 wait_event_interruptible(the_lnet.ln_rc_waitq,
1284 lnet_router_checker_active());
1286 wait_event_interruptible_timeout(the_lnet.ln_rc_waitq,
1288 cfs_time_seconds(1));
1291 lnet_prune_rc_data(1); /* wait for UNLINK */
1293 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1294 up(&the_lnet.ln_rc_signal);
1295 /* The unlink event callback will signal final completion */
1300 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
1302 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1304 while (--npages >= 0)
1305 __free_page(rb->rb_kiov[npages].kiov_page);
1307 LIBCFS_FREE(rb, sz);
1310 static lnet_rtrbuf_t *
1311 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1313 int npages = rbp->rbp_npages;
1314 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1319 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1325 for (i = 0; i < npages; i++) {
1326 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1327 GFP_KERNEL | __GFP_ZERO);
1330 __free_page(rb->rb_kiov[i].kiov_page);
1332 LIBCFS_FREE(rb, sz);
1336 rb->rb_kiov[i].kiov_len = PAGE_SIZE;
1337 rb->rb_kiov[i].kiov_offset = 0;
1338 rb->rb_kiov[i].kiov_page = page;
1345 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt)
1347 int npages = rbp->rbp_npages;
1349 struct list_head tmp;
1351 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1354 INIT_LIST_HEAD(&tmp);
1357 lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt);
1358 list_splice_init(&rbp->rbp_bufs, &tmp);
1359 rbp->rbp_req_nbuffers = 0;
1360 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1361 rbp->rbp_mincredits = 0;
1362 lnet_net_unlock(cpt);
1364 /* Free buffers on the free list. */
1365 while (!list_empty(&tmp)) {
1366 rb = list_entry(tmp.next, lnet_rtrbuf_t, rb_list);
1367 list_del(&rb->rb_list);
1368 lnet_destroy_rtrbuf(rb, npages);
1373 lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
1375 struct list_head rb_list;
1378 int num_buffers = 0;
1380 int npages = rbp->rbp_npages;
1383 /* If we are called for less buffers than already in the pool, we
1384 * just lower the req_nbuffers number and excess buffers will be
1385 * thrown away as they are returned to the free list. Credits
1386 * then get adjusted as well.
1387 * If we already have enough buffers allocated to serve the
1388 * increase requested, then we can treat that the same way as we
1389 * do the decrease. */
1390 num_rb = nbufs - rbp->rbp_nbuffers;
1391 if (nbufs <= rbp->rbp_req_nbuffers || num_rb <= 0) {
1392 rbp->rbp_req_nbuffers = nbufs;
1393 lnet_net_unlock(cpt);
1396 /* store the older value of rbp_req_nbuffers and then set it to
1397 * the new request to prevent lnet_return_rx_credits_locked() from
1398 * freeing buffers that we need to keep around */
1399 old_req_nbufs = rbp->rbp_req_nbuffers;
1400 rbp->rbp_req_nbuffers = nbufs;
1401 lnet_net_unlock(cpt);
1403 INIT_LIST_HEAD(&rb_list);
1405 /* allocate the buffers on a local list first. If all buffers are
1406 * allocated successfully then join this list to the rbp buffer
1407 * list. If not then free all allocated buffers. */
1408 while (num_rb-- > 0) {
1409 rb = lnet_new_rtrbuf(rbp, cpt);
1411 CERROR("Failed to allocate %d route bufs of %d pages\n",
1415 rbp->rbp_req_nbuffers = old_req_nbufs;
1416 lnet_net_unlock(cpt);
1421 list_add(&rb->rb_list, &rb_list);
1427 list_splice_tail(&rb_list, &rbp->rbp_bufs);
1428 rbp->rbp_nbuffers += num_buffers;
1429 rbp->rbp_credits += num_buffers;
1430 rbp->rbp_mincredits = rbp->rbp_credits;
1431 /* We need to schedule blocked msg using the newly
1433 while (!list_empty(&rbp->rbp_bufs) &&
1434 !list_empty(&rbp->rbp_msgs))
1435 lnet_schedule_blocked_locked(rbp);
1437 lnet_net_unlock(cpt);
1442 while (!list_empty(&rb_list)) {
1443 rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list);
1444 list_del(&rb->rb_list);
1445 lnet_destroy_rtrbuf(rb, npages);
1452 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
1454 INIT_LIST_HEAD(&rbp->rbp_msgs);
1455 INIT_LIST_HEAD(&rbp->rbp_bufs);
1457 rbp->rbp_npages = npages;
1458 rbp->rbp_credits = 0;
1459 rbp->rbp_mincredits = 0;
1463 lnet_rtrpools_free(int keep_pools)
1465 lnet_rtrbufpool_t *rtrp;
1468 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1471 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1472 lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
1473 lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
1474 lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
1478 cfs_percpt_free(the_lnet.ln_rtrpools);
1479 the_lnet.ln_rtrpools = NULL;
1484 lnet_nrb_tiny_calculate(void)
1486 int nrbs = LNET_NRB_TINY;
1488 if (tiny_router_buffers < 0) {
1489 LCONSOLE_ERROR_MSG(0x10c,
1490 "tiny_router_buffers=%d invalid when "
1491 "routing enabled\n", tiny_router_buffers);
1495 if (tiny_router_buffers > 0)
1496 nrbs = tiny_router_buffers;
1498 nrbs /= LNET_CPT_NUMBER;
1499 return max(nrbs, LNET_NRB_TINY_MIN);
1503 lnet_nrb_small_calculate(void)
1505 int nrbs = LNET_NRB_SMALL;
1507 if (small_router_buffers < 0) {
1508 LCONSOLE_ERROR_MSG(0x10c,
1509 "small_router_buffers=%d invalid when "
1510 "routing enabled\n", small_router_buffers);
1514 if (small_router_buffers > 0)
1515 nrbs = small_router_buffers;
1517 nrbs /= LNET_CPT_NUMBER;
1518 return max(nrbs, LNET_NRB_SMALL_MIN);
1522 lnet_nrb_large_calculate(void)
1524 int nrbs = LNET_NRB_LARGE;
1526 if (large_router_buffers < 0) {
1527 LCONSOLE_ERROR_MSG(0x10c,
1528 "large_router_buffers=%d invalid when "
1529 "routing enabled\n", large_router_buffers);
1533 if (large_router_buffers > 0)
1534 nrbs = large_router_buffers;
1536 nrbs /= LNET_CPT_NUMBER;
1537 return max(nrbs, LNET_NRB_LARGE_MIN);
1541 lnet_rtrpools_alloc(int im_a_router)
1543 lnet_rtrbufpool_t *rtrp;
1550 if (!strcmp(forwarding, "")) {
1551 /* not set either way */
1554 } else if (!strcmp(forwarding, "disabled")) {
1555 /* explicitly disabled */
1557 } else if (!strcmp(forwarding, "enabled")) {
1558 /* explicitly enabled */
1560 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1561 "'enabled' or 'disabled'\n");
1565 nrb_tiny = lnet_nrb_tiny_calculate();
1569 nrb_small = lnet_nrb_small_calculate();
1573 nrb_large = lnet_nrb_large_calculate();
1577 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1579 sizeof(lnet_rtrbufpool_t));
1580 if (the_lnet.ln_rtrpools == NULL) {
1581 LCONSOLE_ERROR_MSG(0x10c,
1582 "Failed to initialize router buffe pool\n");
1586 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1587 lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
1588 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1593 lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
1594 LNET_NRB_SMALL_PAGES);
1595 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1600 lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
1601 LNET_NRB_LARGE_PAGES);
1602 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1608 lnet_net_lock(LNET_LOCK_EX);
1609 the_lnet.ln_routing = 1;
1610 lnet_net_unlock(LNET_LOCK_EX);
1614 lnet_rtrpools_free(0);
1619 lnet_rtrpools_adjust_helper(int tiny, int small, int large)
1624 lnet_rtrbufpool_t *rtrp;
1626 /* If the provided values for each buffer pool are different than the
1627 * configured values, we need to take action. */
1629 tiny_router_buffers = tiny;
1630 nrb = lnet_nrb_tiny_calculate();
1631 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1632 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
1639 small_router_buffers = small;
1640 nrb = lnet_nrb_small_calculate();
1641 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1642 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
1649 large_router_buffers = large;
1650 nrb = lnet_nrb_large_calculate();
1651 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1652 rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
1663 lnet_rtrpools_adjust(int tiny, int small, int large)
1665 /* this function doesn't revert the changes if adding new buffers
1666 * failed. It's up to the user space caller to revert the
1669 if (!the_lnet.ln_routing)
1672 return lnet_rtrpools_adjust_helper(tiny, small, large);
1676 lnet_rtrpools_enable(void)
1680 if (the_lnet.ln_routing)
1683 if (the_lnet.ln_rtrpools == NULL)
1684 /* If routing is turned off, and we have never
1685 * initialized the pools before, just call the
1686 * standard buffer pool allocation routine as
1687 * if we are just configuring this for the first
1689 rc = lnet_rtrpools_alloc(1);
1691 rc = lnet_rtrpools_adjust_helper(0, 0, 0);
1695 lnet_net_lock(LNET_LOCK_EX);
1696 the_lnet.ln_routing = 1;
1698 the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED;
1699 lnet_net_unlock(LNET_LOCK_EX);
1705 lnet_rtrpools_disable(void)
1707 if (!the_lnet.ln_routing)
1710 lnet_net_lock(LNET_LOCK_EX);
1711 the_lnet.ln_routing = 0;
1712 the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
1714 tiny_router_buffers = 0;
1715 small_router_buffers = 0;
1716 large_router_buffers = 0;
1717 lnet_net_unlock(LNET_LOCK_EX);
1718 lnet_rtrpools_free(1);
1722 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
1724 struct lnet_peer_ni *lp = NULL;
1725 cfs_time_t now = cfs_time_current();
1726 int cpt = lnet_cpt_of_nid(nid, ni);
1728 LASSERT (!in_interrupt ());
1730 CDEBUG (D_NET, "%s notifying %s: %s\n",
1731 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1732 libcfs_nid2str(nid),
1733 alive ? "up" : "down");
1736 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1737 CWARN("Ignoring notification of %s %s by %s (different net)\n",
1738 libcfs_nid2str(nid), alive ? "birth" : "death",
1739 libcfs_nid2str(ni->ni_nid));
1743 /* can't do predictions... */
1744 if (cfs_time_after(when, now)) {
1745 CWARN("Ignoring prediction from %s of %s %s "
1746 "%ld seconds in the future\n",
1747 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1748 libcfs_nid2str(nid), alive ? "up" : "down",
1749 cfs_duration_sec(cfs_time_sub(when, now)));
1753 if (ni != NULL && !alive && /* LND telling me she's down */
1754 !auto_down) { /* auto-down disabled */
1755 CDEBUG(D_NET, "Auto-down disabled\n");
1761 if (the_lnet.ln_shutdown) {
1762 lnet_net_unlock(cpt);
1766 lp = lnet_find_peer_ni_locked(nid);
1769 lnet_net_unlock(cpt);
1770 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1774 /* We can't fully trust LND on reporting exact peer last_alive
1775 * if he notifies us about dead peer. For example ksocklnd can
1776 * call us with when == _time_when_the_node_was_booted_ if
1777 * no connections were successfully established */
1778 if (ni != NULL && !alive && when < lp->lpni_last_alive)
1779 when = lp->lpni_last_alive;
1781 lnet_notify_locked(lp, ni == NULL, alive, when);
1784 lnet_ni_notify_locked(ni, lp);
1786 lnet_peer_ni_decref_locked(lp);
1788 lnet_net_unlock(cpt);
1791 EXPORT_SYMBOL(lnet_notify);