2 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
4 * Copyright (c) 2011, Whamcloud, Inc.
6 * This file is part of Portals
7 * http://sourceforge.net/projects/sandiaportals/
9 * Portals is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Portals is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Portals; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LNET
25 #include <lnet/lib-lnet.h>
27 #if defined(__KERNEL__) && defined(LNET_ROUTER)
29 #define LNET_NRB_TINY_MIN 512 /* min value for each CPT */
30 #define LNET_NRB_TINY (LNET_NRB_TINY_MIN * 4)
31 #define LNET_NRB_SMALL_MIN 4096 /* min value for each CPT */
32 #define LNET_NRB_SMALL (LNET_NRB_SMALL_MIN * 4)
33 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
34 #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
36 static char *forwarding = "";
37 CFS_MODULE_PARM(forwarding, "s", charp, 0444,
38 "Explicitly enable/disable forwarding between networks");
40 static int tiny_router_buffers;
41 CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
42 "# of 0 payload messages to buffer in the router");
43 static int small_router_buffers;
44 CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
45 "# of small (1 page) messages to buffer in the router");
46 static int large_router_buffers;
47 CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
48 "# of large messages to buffer in the router");
49 static int peer_buffer_credits = 0;
50 CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
51 "# router buffer credits per peer");
53 static int auto_down = 1;
54 CFS_MODULE_PARM(auto_down, "i", int, 0444,
55 "Automatically mark peers down on comms error");
58 lnet_peer_buffer_credits(lnet_ni_t *ni)
60 /* NI option overrides LNet default */
61 if (ni->ni_peerrtrcredits > 0)
62 return ni->ni_peerrtrcredits;
63 if (peer_buffer_credits > 0)
64 return peer_buffer_credits;
66 /* As an approximation, allow this peer the same number of router
67 * buffers as it is allowed outstanding sends */
68 return ni->ni_peertxcredits;
72 static int lnet_router_checker(void *);
76 lnet_peer_buffer_credits(lnet_ni_t *ni)
83 static int check_routers_before_use = 0;
84 CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
85 "Assume routers are down and ping them before use");
87 static int avoid_asym_router_failure = 0;
88 CFS_MODULE_PARM(avoid_asym_router_failure, "i", int, 0644,
89 "Avoid asymmetrical failures: reserved, use at your own risk");
91 static int dead_router_check_interval = 0;
92 CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0644,
93 "Seconds between dead router health checks (<= 0 to disable)");
95 static int live_router_check_interval = 0;
96 CFS_MODULE_PARM(live_router_check_interval, "i", int, 0644,
97 "Seconds between live router health checks (<= 0 to disable)");
99 static int router_ping_timeout = 50;
100 CFS_MODULE_PARM(router_ping_timeout, "i", int, 0644,
101 "Seconds to wait for the reply to a router health query");
104 lnet_peers_start_down(void)
106 return check_routers_before_use;
110 lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when)
112 if (cfs_time_before(when, lp->lp_timestamp)) { /* out of date information */
113 CDEBUG(D_NET, "Out of date\n");
117 lp->lp_timestamp = when; /* update timestamp */
118 lp->lp_ping_deadline = 0; /* disable ping timeout */
120 if (lp->lp_alive_count != 0 && /* got old news */
121 (!lp->lp_alive) == (!alive)) { /* new date for old news */
122 CDEBUG(D_NET, "Old news\n");
126 /* Flag that notification is outstanding */
128 lp->lp_alive_count++;
129 lp->lp_alive = !(!alive); /* 1 bit! */
131 lp->lp_notifylnd |= notifylnd;
133 lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
135 CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
139 lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
144 /* Notify only in 1 thread at any time to ensure ordered notification.
145 * NB individual events can be missed; the only guarantee is that you
146 * always get the most recent news */
148 if (lp->lp_notifying)
151 lp->lp_notifying = 1;
153 while (lp->lp_notify) {
154 alive = lp->lp_alive;
155 notifylnd = lp->lp_notifylnd;
157 lp->lp_notifylnd = 0;
160 if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
161 lnet_net_unlock(lp->lp_cpt);
163 /* A new notification could happen now; I'll handle it
164 * when control returns to me */
166 (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
168 lnet_net_lock(lp->lp_cpt);
172 lp->lp_notifying = 0;
177 lnet_rtr_addref_locked(lnet_peer_t *lp)
179 LASSERT(lp->lp_refcount > 0);
180 LASSERT(lp->lp_rtr_refcount >= 0);
182 /* lnet_net_lock must be exclusively locked */
183 lp->lp_rtr_refcount++;
184 if (lp->lp_rtr_refcount == 1) {
187 /* a simple insertion sort */
188 cfs_list_for_each_prev(pos, &the_lnet.ln_routers) {
189 lnet_peer_t *rtr = cfs_list_entry(pos, lnet_peer_t,
192 if (rtr->lp_nid < lp->lp_nid)
196 cfs_list_add(&lp->lp_rtr_list, pos);
197 /* addref for the_lnet.ln_routers */
198 lnet_peer_addref_locked(lp);
199 the_lnet.ln_routers_version++;
204 lnet_rtr_decref_locked(lnet_peer_t *lp)
206 LASSERT(lp->lp_refcount > 0);
207 LASSERT(lp->lp_rtr_refcount > 0);
209 /* lnet_net_lock must be exclusively locked */
210 lp->lp_rtr_refcount--;
211 if (lp->lp_rtr_refcount == 0) {
212 LASSERT(cfs_list_empty(&lp->lp_routes));
214 if (lp->lp_rcd != NULL) {
215 cfs_list_add(&lp->lp_rcd->rcd_list,
216 &the_lnet.ln_rcd_deathrow);
220 cfs_list_del(&lp->lp_rtr_list);
221 /* decref for the_lnet.ln_routers */
222 lnet_peer_decref_locked(lp);
223 the_lnet.ln_routers_version++;
228 lnet_find_net_locked (__u32 net)
230 lnet_remotenet_t *rnet;
233 LASSERT (!the_lnet.ln_shutdown);
235 cfs_list_for_each (tmp, &the_lnet.ln_remote_nets) {
236 rnet = cfs_list_entry(tmp, lnet_remotenet_t, lrn_list);
238 if (rnet->lrn_net == net)
244 static void lnet_shuffle_seed(void)
246 static int seeded = 0;
247 int lnd_type, seed[2];
255 cfs_get_random_bytes(seed, sizeof(seed));
257 /* Nodes with small feet have little entropy
258 * the NID for this node gives the most entropy in the low bits */
259 cfs_list_for_each(tmp, &the_lnet.ln_nis) {
260 ni = cfs_list_entry(tmp, lnet_ni_t, ni_list);
261 lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
263 if (lnd_type != LOLND)
264 seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
267 cfs_gettimeofday(&tv);
268 cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
273 /* NB expects LNET_LOCK held */
275 lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
277 unsigned int len = 0;
278 unsigned int offset = 0;
283 cfs_list_for_each (e, &rnet->lrn_routes) {
287 /* len+1 positions to add a new entry, also prevents division by 0 */
288 offset = cfs_rand() % (len + 1);
289 cfs_list_for_each (e, &rnet->lrn_routes) {
294 cfs_list_add(&route->lr_list, e);
295 cfs_list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
297 the_lnet.ln_remote_nets_version++;
298 lnet_rtr_addref_locked(route->lr_gateway);
302 lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
305 lnet_remotenet_t *rnet;
306 lnet_remotenet_t *rnet2;
312 CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
313 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
315 if (gateway == LNET_NID_ANY ||
316 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
317 net == LNET_NIDNET(LNET_NID_ANY) ||
318 LNET_NETTYP(net) == LOLND ||
319 LNET_NIDNET(gateway) == net ||
320 hops < 1 || hops > 255)
323 if (lnet_islocalnet(net)) /* it's a local network */
324 return 0; /* ignore the route entry */
326 /* Assume net, route, all new */
327 LIBCFS_ALLOC(route, sizeof(*route));
328 LIBCFS_ALLOC(rnet, sizeof(*rnet));
329 if (route == NULL || rnet == NULL) {
330 CERROR("Out of memory creating route %s %d %s\n",
331 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
333 LIBCFS_FREE(route, sizeof(*route));
335 LIBCFS_FREE(rnet, sizeof(*rnet));
339 CFS_INIT_LIST_HEAD(&rnet->lrn_routes);
341 route->lr_hops = hops;
344 lnet_net_lock(LNET_LOCK_EX);
346 rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
348 lnet_net_unlock(LNET_LOCK_EX);
350 LIBCFS_FREE(route, sizeof(*route));
351 LIBCFS_FREE(rnet, sizeof(*rnet));
353 if (rc == -EHOSTUNREACH) { /* gateway is not on a local net */
354 return 0; /* ignore the route entry */
356 CERROR("Error %d creating route %s %d %s\n", rc,
357 libcfs_net2str(net), hops,
358 libcfs_nid2str(gateway));
363 LASSERT (!the_lnet.ln_shutdown);
365 rnet2 = lnet_find_net_locked(net);
368 cfs_list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
372 /* Search for a duplicate route (it's a NOOP if it is) */
374 cfs_list_for_each (e, &rnet2->lrn_routes) {
375 lnet_route_t *route2 = cfs_list_entry(e, lnet_route_t, lr_list);
377 if (route2->lr_gateway == route->lr_gateway) {
382 /* our lookups must be true */
383 LASSERT (route2->lr_gateway->lp_nid != gateway);
387 lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */
388 lnet_add_route_to_rnet(rnet2, route);
390 ni = route->lr_gateway->lp_ni;
391 lnet_net_unlock(LNET_LOCK_EX);
393 /* XXX Assume alive */
394 if (ni->ni_lnd->lnd_notify != NULL)
395 (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
397 lnet_net_lock(LNET_LOCK_EX);
400 /* -1 for notify or !add_route */
401 lnet_peer_decref_locked(route->lr_gateway);
402 lnet_net_unlock(LNET_LOCK_EX);
405 LIBCFS_FREE(route, sizeof(*route));
408 LIBCFS_FREE(rnet, sizeof(*rnet));
414 lnet_check_routes(void)
416 lnet_remotenet_t *rnet;
418 lnet_route_t *route2;
423 cpt = lnet_net_lock_current();
425 cfs_list_for_each(e1, &the_lnet.ln_remote_nets) {
426 rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
429 cfs_list_for_each(e2, &rnet->lrn_routes) {
434 route = cfs_list_entry(e2, lnet_route_t, lr_list);
436 if (route2 == NULL) {
441 if (route->lr_gateway->lp_ni ==
442 route2->lr_gateway->lp_ni)
445 nid1 = route->lr_gateway->lp_nid;
446 nid2 = route2->lr_gateway->lp_nid;
449 lnet_net_unlock(cpt);
451 CERROR("Routes to %s via %s and %s not supported\n",
452 libcfs_net2str(net), libcfs_nid2str(nid1),
453 libcfs_nid2str(nid2));
458 lnet_net_unlock(cpt);
463 lnet_del_route(__u32 net, lnet_nid_t gw_nid)
465 struct lnet_peer *gateway;
466 lnet_remotenet_t *rnet;
472 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
473 libcfs_net2str(net), libcfs_nid2str(gw_nid));
475 /* NB Caller may specify either all routes via the given gateway
476 * or a specific route entry actual NIDs) */
479 lnet_net_lock(LNET_LOCK_EX);
481 cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
482 rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
484 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
485 net == rnet->lrn_net))
488 cfs_list_for_each (e2, &rnet->lrn_routes) {
489 route = cfs_list_entry(e2, lnet_route_t, lr_list);
491 gateway = route->lr_gateway;
492 if (!(gw_nid == LNET_NID_ANY ||
493 gw_nid == gateway->lp_nid))
496 cfs_list_del(&route->lr_list);
497 cfs_list_del(&route->lr_gwlist);
498 the_lnet.ln_remote_nets_version++;
500 if (cfs_list_empty(&rnet->lrn_routes))
501 cfs_list_del(&rnet->lrn_list);
505 lnet_rtr_decref_locked(gateway);
506 lnet_peer_decref_locked(gateway);
508 lnet_net_unlock(LNET_LOCK_EX);
510 LIBCFS_FREE(route, sizeof (*route));
513 LIBCFS_FREE(rnet, sizeof(*rnet));
520 lnet_net_unlock(LNET_LOCK_EX);
525 lnet_destroy_routes (void)
527 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
531 lnet_get_route(int idx, __u32 *net, __u32 *hops,
532 lnet_nid_t *gateway, __u32 *alive)
536 lnet_remotenet_t *rnet;
540 cpt = lnet_net_lock_current();
542 cfs_list_for_each (e1, &the_lnet.ln_remote_nets) {
543 rnet = cfs_list_entry(e1, lnet_remotenet_t, lrn_list);
545 cfs_list_for_each (e2, &rnet->lrn_routes) {
546 route = cfs_list_entry(e2, lnet_route_t, lr_list);
549 *net = rnet->lrn_net;
550 *hops = route->lr_hops;
551 *gateway = route->lr_gateway->lp_nid;
552 *alive = route->lr_gateway->lp_alive;
553 lnet_net_unlock(cpt);
559 lnet_net_unlock(cpt);
564 lnet_swap_pinginfo(lnet_ping_info_t *info)
567 lnet_ni_status_t *stat;
569 __swab32s(&info->pi_magic);
570 __swab32s(&info->pi_features);
571 __swab32s(&info->pi_pid);
572 __swab32s(&info->pi_nnis);
573 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
574 stat = &info->pi_ni[i];
575 __swab64s(&stat->ns_nid);
576 __swab32s(&stat->ns_status);
582 * parse router-checker pinginfo, record number of down NIs for remote
583 * networks on that router.
586 lnet_parse_rc_info(lnet_rc_data_t *rcd)
588 lnet_ping_info_t *info = rcd->rcd_pinginfo;
589 struct lnet_peer *gw = rcd->rcd_gateway;
595 if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
596 lnet_swap_pinginfo(info);
598 /* NB always racing with network! */
599 if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
600 CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
601 libcfs_nid2str(gw->lp_nid), info->pi_magic);
602 gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
606 gw->lp_ping_feats = info->pi_features;
607 if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
608 CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
609 libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
610 return; /* nothing I can understand */
613 if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
614 return; /* can't carry NI status info */
616 cfs_list_for_each_entry(rtr, &gw->lp_routes, lr_gwlist) {
617 int ptl_status = LNET_NI_STATUS_INVALID;
622 for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
623 lnet_ni_status_t *stat = &info->pi_ni[i];
624 lnet_nid_t nid = stat->ns_nid;
626 if (nid == LNET_NID_ANY) {
627 CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
628 libcfs_nid2str(gw->lp_nid));
629 gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
633 if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
636 if (stat->ns_status == LNET_NI_STATUS_DOWN) {
637 if (LNET_NETTYP(LNET_NIDNET(nid)) != PTLLND)
639 else if (ptl_status != LNET_NI_STATUS_UP)
640 ptl_status = LNET_NI_STATUS_DOWN;
644 if (stat->ns_status == LNET_NI_STATUS_UP) {
645 if (LNET_NIDNET(nid) == rtr->lr_net) {
649 /* ptl NIs are considered down only when
650 * they're all down */
651 if (LNET_NETTYP(LNET_NIDNET(nid)) == PTLLND)
652 ptl_status = LNET_NI_STATUS_UP;
656 CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
657 libcfs_nid2str(gw->lp_nid), stat->ns_status);
658 gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
662 if (up) { /* ignore downed NIs if NI for dest network is up */
666 rtr->lr_downis = down + (ptl_status == LNET_NI_STATUS_DOWN);
671 lnet_router_checker_event(lnet_event_t *event)
673 lnet_rc_data_t *rcd = event->md.user_ptr;
674 struct lnet_peer *lp;
676 LASSERT(rcd != NULL);
678 if (event->unlinked) {
679 LNetInvalidateHandle(&rcd->rcd_mdh);
683 LASSERT(event->type == LNET_EVENT_SEND ||
684 event->type == LNET_EVENT_REPLY);
686 lp = rcd->rcd_gateway;
689 /* NB: it's called with holding lnet_res_lock, we have a few
690 * places need to hold both locks at the same time, please take
691 * care of lock ordering */
692 lnet_net_lock(lp->lp_cpt);
693 if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
694 /* ignore if no longer a router or rcd is replaced */
698 if (event->type == LNET_EVENT_SEND) {
699 lp->lp_ping_notsent = 0;
700 if (event->status == 0)
704 /* LNET_EVENT_REPLY */
705 /* A successful REPLY means the router is up. If _any_ comms
706 * to the router fail I assume it's down (this will happen if
707 * we ping alive routers to try to detect router death before
708 * apps get burned). */
710 lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
711 /* The router checker will wake up very shortly and do the
712 * actual notification.
713 * XXX If 'lp' stops being a router before then, it will still
714 * have the notification pending!!! */
716 if (avoid_asym_router_failure && event->status == 0)
717 lnet_parse_rc_info(rcd);
720 lnet_net_unlock(lp->lp_cpt);
724 lnet_wait_known_routerstate(void)
730 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
733 int cpt = lnet_net_lock_current();
736 cfs_list_for_each (entry, &the_lnet.ln_routers) {
737 rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
739 if (rtr->lp_alive_count == 0) {
745 lnet_net_unlock(cpt);
751 lnet_router_checker();
753 cfs_pause(cfs_time_seconds(1));
758 lnet_update_ni_status_locked(void)
764 LASSERT(the_lnet.ln_routing);
766 timeout = router_ping_timeout +
767 MAX(live_router_check_interval, dead_router_check_interval);
769 now = cfs_time_current_sec();
770 cfs_list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
771 if (ni->ni_lnd->lnd_type == LOLND)
774 if (now < ni->ni_last_alive + timeout)
778 /* re-check with lock */
779 if (now < ni->ni_last_alive + timeout) {
784 LASSERT(ni->ni_status != NULL);
786 if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
787 CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
788 libcfs_nid2str(ni->ni_nid), timeout);
789 /* NB: so far, this is the only place to set
790 * NI status to "down" */
791 ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
798 lnet_destroy_rc_data(lnet_rc_data_t *rcd)
800 LASSERT(cfs_list_empty(&rcd->rcd_list));
801 /* detached from network */
802 LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
804 if (rcd->rcd_gateway != NULL) {
805 int cpt = rcd->rcd_gateway->lp_cpt;
808 lnet_peer_decref_locked(rcd->rcd_gateway);
809 lnet_net_unlock(cpt);
812 if (rcd->rcd_pinginfo != NULL)
813 LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
815 LIBCFS_FREE(rcd, sizeof(*rcd));
819 lnet_create_rc_data_locked(lnet_peer_t *gateway)
821 lnet_rc_data_t *rcd = NULL;
822 lnet_ping_info_t *pi;
826 lnet_net_unlock(gateway->lp_cpt);
828 LIBCFS_ALLOC(rcd, sizeof(*rcd));
832 LNetInvalidateHandle(&rcd->rcd_mdh);
833 CFS_INIT_LIST_HEAD(&rcd->rcd_list);
835 LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
839 memset(pi, 0, LNET_PINGINFO_SIZE);
840 for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
841 pi->pi_ni[i].ns_nid = LNET_NID_ANY;
842 pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
844 rcd->rcd_pinginfo = pi;
846 LASSERT (!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
847 rc = LNetMDBind((lnet_md_t){.start = pi,
849 .length = LNET_PINGINFO_SIZE,
850 .threshold = LNET_MD_THRESH_INF,
851 .options = LNET_MD_TRUNCATE,
852 .eq_handle = the_lnet.ln_rc_eqh},
856 CERROR("Can't bind MD: %d\n", rc);
861 lnet_net_lock(gateway->lp_cpt);
862 /* router table changed or someone has created rcd for this gateway */
863 if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
864 lnet_net_unlock(gateway->lp_cpt);
868 lnet_peer_addref_locked(gateway);
869 rcd->rcd_gateway = gateway;
870 gateway->lp_rcd = rcd;
871 gateway->lp_ping_notsent = 0;
877 if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
878 rc = LNetMDUnlink(rcd->rcd_mdh);
881 lnet_destroy_rc_data(rcd);
884 lnet_net_lock(gateway->lp_cpt);
885 return gateway->lp_rcd;
889 lnet_router_check_interval (lnet_peer_t *rtr)
893 secs = rtr->lp_alive ? live_router_check_interval :
894 dead_router_check_interval;
902 lnet_ping_router_locked (lnet_peer_t *rtr)
904 lnet_rc_data_t *rcd = NULL;
905 cfs_time_t now = cfs_time_current();
908 lnet_peer_addref_locked(rtr);
910 if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
911 cfs_time_after(now, rtr->lp_ping_deadline))
912 lnet_notify_locked(rtr, 1, 0, now);
914 /* Run any outstanding notifications */
915 lnet_ni_notify_locked(rtr->lp_ni, rtr);
917 if (!lnet_isrouter(rtr) ||
918 the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
919 /* router table changed or router checker is shutting down */
920 lnet_peer_decref_locked(rtr);
924 rcd = rtr->lp_rcd != NULL ?
925 rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
930 secs = lnet_router_check_interval(rtr);
933 "rtr %s %d: deadline %lu ping_notsent %d alive %d "
934 "alive_count %d lp_ping_timestamp %lu\n",
935 libcfs_nid2str(rtr->lp_nid), secs,
936 rtr->lp_ping_deadline, rtr->lp_ping_notsent,
937 rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
939 if (secs != 0 && !rtr->lp_ping_notsent &&
940 cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
941 cfs_time_seconds(secs)))) {
943 lnet_process_id_t id;
944 lnet_handle_md_t mdh;
946 id.nid = rtr->lp_nid;
947 id.pid = LUSTRE_SRV_LNET_PID;
948 CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
950 rtr->lp_ping_notsent = 1;
951 rtr->lp_ping_timestamp = now;
955 if (rtr->lp_ping_deadline == 0) {
956 rtr->lp_ping_deadline =
957 cfs_time_shift(router_ping_timeout);
960 lnet_net_unlock(rtr->lp_cpt);
962 rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
963 LNET_PROTO_PING_MATCHBITS, 0);
965 lnet_net_lock(rtr->lp_cpt);
967 rtr->lp_ping_notsent = 0; /* no event pending */
970 lnet_peer_decref_locked(rtr);
975 lnet_router_checker_start(void)
983 int router_checker_max_eqsize = 10240;
985 LASSERT (check_routers_before_use);
986 LASSERT (dead_router_check_interval > 0);
990 /* As an approximation, allow each router the same number of
991 * outstanding events as it is allowed outstanding sends */
993 version = the_lnet.ln_routers_version;
994 cfs_list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
995 lnet_ni_t *ni = rtr->lp_ni;
996 lnet_process_id_t id;
999 eqsz += ni->ni_peertxcredits;
1001 /* one async ping reply per router */
1002 id.nid = rtr->lp_nid;
1003 id.pid = LUSTRE_SRV_LNET_PID;
1007 rc = LNetSetAsync(id, 1);
1009 CWARN("LNetSetAsync %s failed: %d\n",
1010 libcfs_id2str(id), rc);
1015 /* NB router list doesn't change in userspace */
1016 LASSERT(version == the_lnet.ln_routers_version);
1023 "No router found, not starting router checker\n");
1027 /* at least allow a SENT and a REPLY per router */
1028 if (router_checker_max_eqsize < 2 * nrtr)
1029 router_checker_max_eqsize = 2 * nrtr;
1032 if (eqsz > router_checker_max_eqsize)
1033 eqsz = router_checker_max_eqsize;
1036 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1038 if (check_routers_before_use &&
1039 dead_router_check_interval <= 0) {
1040 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
1041 " set if 'check_routers_before_use' is set"
1046 if (!the_lnet.ln_routing &&
1047 live_router_check_interval <= 0 &&
1048 dead_router_check_interval <= 0)
1052 sema_init(&the_lnet.ln_rc_signal, 0);
1053 /* EQ size doesn't matter; the callback is guaranteed to get every
1056 rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
1057 &the_lnet.ln_rc_eqh);
1059 rc = LNetEQAlloc(eqsz, LNET_EQ_HANDLER_NONE,
1060 &the_lnet.ln_rc_eqh);
1063 CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
1067 the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
1069 rc = cfs_create_thread(lnet_router_checker, NULL, 0);
1071 CERROR("Can't start router checker thread: %d\n", rc);
1072 /* block until event callback signals exit */
1073 down(&the_lnet.ln_rc_signal);
1074 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1076 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1081 if (check_routers_before_use) {
1082 /* Note that a helpful side-effect of pinging all known routers
1083 * at startup is that it makes them drop stale connections they
1084 * may have to a previous instance of me. */
1085 lnet_wait_known_routerstate();
1092 lnet_router_checker_stop (void)
1096 if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
1099 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1100 the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
1103 /* block until event callback signals exit */
1104 down(&the_lnet.ln_rc_signal);
1106 lnet_router_checker();
1108 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
1110 rc = LNetEQFree(the_lnet.ln_rc_eqh);
1116 lnet_prune_rc_data(int wait_unlink)
1118 lnet_rc_data_t *rcd;
1119 lnet_rc_data_t *tmp;
1124 if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
1125 cfs_list_empty(&the_lnet.ln_rcd_deathrow) &&
1126 cfs_list_empty(&the_lnet.ln_rcd_zombie)))
1129 CFS_INIT_LIST_HEAD(&head);
1131 lnet_net_lock(LNET_LOCK_EX);
1133 if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
1134 /* router checker is stopping, prune all */
1135 cfs_list_for_each_entry(lp, &the_lnet.ln_routers,
1137 if (lp->lp_rcd == NULL)
1140 LASSERT(cfs_list_empty(&lp->lp_rcd->rcd_list));
1141 cfs_list_add(&lp->lp_rcd->rcd_list,
1142 &the_lnet.ln_rcd_deathrow);
1147 /* unlink all RCDs on deathrow list */
1148 cfs_list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
1150 if (!cfs_list_empty(&head)) {
1151 lnet_net_unlock(LNET_LOCK_EX);
1153 cfs_list_for_each_entry(rcd, &head, rcd_list)
1154 LNetMDUnlink(rcd->rcd_mdh);
1156 lnet_net_lock(LNET_LOCK_EX);
1159 cfs_list_splice_init(&head, &the_lnet.ln_rcd_zombie);
1161 /* release all zombie RCDs */
1162 while (!cfs_list_empty(&the_lnet.ln_rcd_zombie)) {
1163 cfs_list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
1165 if (LNetHandleIsInvalid(rcd->rcd_mdh))
1166 cfs_list_move(&rcd->rcd_list, &head);
1169 wait_unlink = wait_unlink &&
1170 !cfs_list_empty(&the_lnet.ln_rcd_zombie);
1172 lnet_net_unlock(LNET_LOCK_EX);
1174 while (!cfs_list_empty(&head)) {
1175 rcd = cfs_list_entry(head.next,
1176 lnet_rc_data_t, rcd_list);
1177 cfs_list_del_init(&rcd->rcd_list);
1178 lnet_destroy_rc_data(rcd);
1185 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
1186 "Waiting for rc buffers to unlink\n");
1187 cfs_pause(cfs_time_seconds(1) / 4);
1189 lnet_net_lock(LNET_LOCK_EX);
1192 lnet_net_unlock(LNET_LOCK_EX);
1196 #if defined(__KERNEL__) && defined(LNET_ROUTER)
1199 lnet_router_checker(void *arg)
1204 cfs_daemonize("router_checker");
1205 cfs_block_allsigs();
1207 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1209 while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
1214 cpt = lnet_net_lock_current();
1216 version = the_lnet.ln_routers_version;
1218 cfs_list_for_each(entry, &the_lnet.ln_routers) {
1219 rtr = cfs_list_entry(entry, lnet_peer_t, lp_rtr_list);
1221 cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
1223 lnet_net_unlock(cpt);
1226 /* the routers list has changed */
1227 if (version != the_lnet.ln_routers_version)
1231 lnet_ping_router_locked(rtr);
1233 /* NB dropped lock */
1234 if (version != the_lnet.ln_routers_version) {
1235 /* the routers list has changed */
1240 if (the_lnet.ln_routing)
1241 lnet_update_ni_status_locked();
1243 lnet_net_unlock(cpt);
1245 lnet_prune_rc_data(0); /* don't wait for UNLINK */
1247 /* Call cfs_pause() here always adds 1 to load average
1248 * because kernel counts # active tasks as nr_running
1249 * + nr_uninterruptible. */
1250 cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
1251 cfs_time_seconds(1));
1254 LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
1256 lnet_prune_rc_data(1); /* wait for UNLINK */
1258 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1259 up(&the_lnet.ln_rc_signal);
1260 /* The unlink event callback will signal final completion */
1265 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
1267 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1269 while (--npages >= 0)
1270 cfs_free_page(rb->rb_kiov[npages].kiov_page);
1272 LIBCFS_FREE(rb, sz);
1276 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
1278 int npages = rbp->rbp_npages;
1279 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
1284 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
1290 for (i = 0; i < npages; i++) {
1291 page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
1292 CFS_ALLOC_ZERO | CFS_ALLOC_STD);
1295 cfs_free_page(rb->rb_kiov[i].kiov_page);
1297 LIBCFS_FREE(rb, sz);
1301 rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
1302 rb->rb_kiov[i].kiov_offset = 0;
1303 rb->rb_kiov[i].kiov_page = page;
1310 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
1312 int npages = rbp->rbp_npages;
1316 if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
1319 LASSERT (cfs_list_empty(&rbp->rbp_msgs));
1320 LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
1322 while (!cfs_list_empty(&rbp->rbp_bufs)) {
1323 LASSERT (rbp->rbp_credits > 0);
1325 rb = cfs_list_entry(rbp->rbp_bufs.next,
1326 lnet_rtrbuf_t, rb_list);
1327 cfs_list_del(&rb->rb_list);
1328 lnet_destroy_rtrbuf(rb, npages);
1332 LASSERT (rbp->rbp_nbuffers == nbuffers);
1333 LASSERT (rbp->rbp_credits == nbuffers);
1335 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
1339 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
1344 if (rbp->rbp_nbuffers != 0) {
1345 LASSERT (rbp->rbp_nbuffers == nbufs);
1349 for (i = 0; i < nbufs; i++) {
1350 rb = lnet_new_rtrbuf(rbp, cpt);
1353 CERROR("Failed to allocate %d router bufs of %d pages\n",
1354 nbufs, rbp->rbp_npages);
1358 rbp->rbp_nbuffers++;
1360 rbp->rbp_mincredits++;
1361 cfs_list_add(&rb->rb_list, &rbp->rbp_bufs);
1363 /* No allocation "under fire" */
1364 /* Otherwise we'd need code to schedule blocked msgs etc */
1365 LASSERT (!the_lnet.ln_routing);
1368 LASSERT (rbp->rbp_credits == nbufs);
1373 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
1375 CFS_INIT_LIST_HEAD(&rbp->rbp_msgs);
1376 CFS_INIT_LIST_HEAD(&rbp->rbp_bufs);
1378 rbp->rbp_npages = npages;
1379 rbp->rbp_credits = 0;
1380 rbp->rbp_mincredits = 0;
1384 lnet_rtrpools_free(void)
1386 lnet_rtrbufpool_t *rtrp;
1389 if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
1392 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1393 lnet_rtrpool_free_bufs(&rtrp[0]);
1394 lnet_rtrpool_free_bufs(&rtrp[1]);
1395 lnet_rtrpool_free_bufs(&rtrp[2]);
1398 cfs_percpt_free(the_lnet.ln_rtrpools);
1399 the_lnet.ln_rtrpools = NULL;
1403 lnet_nrb_tiny_calculate(int npages)
1405 int nrbs = LNET_NRB_TINY;
1407 if (tiny_router_buffers < 0) {
1408 LCONSOLE_ERROR_MSG(0x10c,
1409 "tiny_router_buffers=%d invalid when "
1410 "routing enabled\n", tiny_router_buffers);
1414 if (tiny_router_buffers > 0)
1415 nrbs = tiny_router_buffers;
1417 nrbs /= LNET_CPT_NUMBER;
1418 return max(nrbs, LNET_NRB_TINY_MIN);
1422 lnet_nrb_small_calculate(int npages)
1424 int nrbs = LNET_NRB_SMALL;
1426 if (small_router_buffers < 0) {
1427 LCONSOLE_ERROR_MSG(0x10c,
1428 "small_router_buffers=%d invalid when "
1429 "routing enabled\n", small_router_buffers);
1433 if (small_router_buffers > 0)
1434 nrbs = small_router_buffers;
1436 nrbs /= LNET_CPT_NUMBER;
1437 return max(nrbs, LNET_NRB_SMALL_MIN);
1441 lnet_nrb_large_calculate(int npages)
1443 int nrbs = LNET_NRB_LARGE;
1445 if (large_router_buffers < 0) {
1446 LCONSOLE_ERROR_MSG(0x10c,
1447 "large_router_buffers=%d invalid when "
1448 "routing enabled\n", large_router_buffers);
1452 if (large_router_buffers > 0)
1453 nrbs = large_router_buffers;
1455 nrbs /= LNET_CPT_NUMBER;
1456 return max(nrbs, LNET_NRB_LARGE_MIN);
1460 lnet_rtrpools_alloc(int im_a_router)
1462 lnet_rtrbufpool_t *rtrp;
1463 int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
1464 int small_pages = 1;
1471 if (!strcmp(forwarding, "")) {
1472 /* not set either way */
1475 } else if (!strcmp(forwarding, "disabled")) {
1476 /* explicitly disabled */
1478 } else if (!strcmp(forwarding, "enabled")) {
1479 /* explicitly enabled */
1481 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1482 "'enabled' or 'disabled'\n");
1486 nrb_tiny = lnet_nrb_tiny_calculate(0);
1490 nrb_small = lnet_nrb_small_calculate(small_pages);
1494 nrb_large = lnet_nrb_large_calculate(large_pages);
1498 the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
1500 sizeof(lnet_rtrbufpool_t));
1501 if (the_lnet.ln_rtrpools == NULL) {
1502 LCONSOLE_ERROR_MSG(0x10c,
1503 "Failed to initialize router buffe pool\n");
1507 cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
1508 lnet_rtrpool_init(&rtrp[0], 0);
1509 rc = lnet_rtrpool_alloc_bufs(&rtrp[0], nrb_tiny, i);
1513 lnet_rtrpool_init(&rtrp[1], small_pages);
1514 rc = lnet_rtrpool_alloc_bufs(&rtrp[1], nrb_small, i);
1518 lnet_rtrpool_init(&rtrp[2], large_pages);
1519 rc = lnet_rtrpool_alloc_bufs(&rtrp[2], nrb_large, i);
1524 lnet_net_lock(LNET_LOCK_EX);
1525 the_lnet.ln_routing = 1;
1526 lnet_net_unlock(LNET_LOCK_EX);
1531 lnet_rtrpools_free();
1536 lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
1538 struct lnet_peer *lp = NULL;
1539 cfs_time_t now = cfs_time_current();
1540 int cpt = lnet_cpt_of_nid(nid);
1542 LASSERT (!cfs_in_interrupt ());
1544 CDEBUG (D_NET, "%s notifying %s: %s\n",
1545 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1546 libcfs_nid2str(nid),
1547 alive ? "up" : "down");
1550 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1551 CWARN ("Ignoring notification of %s %s by %s (different net)\n",
1552 libcfs_nid2str(nid), alive ? "birth" : "death",
1553 libcfs_nid2str(ni->ni_nid));
1557 /* can't do predictions... */
1558 if (cfs_time_after(when, now)) {
1559 CWARN ("Ignoring prediction from %s of %s %s "
1560 "%ld seconds in the future\n",
1561 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1562 libcfs_nid2str(nid), alive ? "up" : "down",
1563 cfs_duration_sec(cfs_time_sub(when, now)));
1567 if (ni != NULL && !alive && /* LND telling me she's down */
1568 !auto_down) { /* auto-down disabled */
1569 CDEBUG(D_NET, "Auto-down disabled\n");
1575 if (the_lnet.ln_shutdown) {
1576 lnet_net_unlock(cpt);
1580 lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
1583 lnet_net_unlock(cpt);
1584 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1588 /* We can't fully trust LND on reporting exact peer last_alive
1589 * if he notifies us about dead peer. For example ksocklnd can
1590 * call us with when == _time_when_the_node_was_booted_ if
1591 * no connections were successfully established */
1592 if (ni != NULL && !alive && when < lp->lp_last_alive)
1593 when = lp->lp_last_alive;
1595 lnet_notify_locked(lp, ni == NULL, alive, when);
1597 lnet_ni_notify_locked(ni, lp);
1599 lnet_peer_decref_locked(lp);
1601 lnet_net_unlock(cpt);
1604 EXPORT_SYMBOL(lnet_notify);
1607 lnet_get_tunables (void)
1615 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
1621 lnet_router_checker (void)
1623 static time_t last = 0;
1624 static int running = 0;
1626 time_t now = cfs_time_current_sec();
1627 int interval = now - last;
1632 /* It's no use to call me again within a sec - all intervals and
1633 * timeouts are measured in seconds */
1634 if (last != 0 && interval < 2)
1638 interval > MAX(live_router_check_interval,
1639 dead_router_check_interval))
1640 CNETERR("Checker(%d/%d) not called for %d seconds\n",
1641 live_router_check_interval, dead_router_check_interval,
1644 LASSERT(LNET_CPT_NUMBER == 1);
1647 LASSERT(!running); /* recursion check */
1653 if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING)
1654 lnet_prune_rc_data(0); /* unlink all rcd and nowait */
1656 /* consume all pending events */
1661 /* NB ln_rc_eqh must be the 1st in 'eventqs' otherwise the
1662 * recursion breaker in LNetEQPoll would fail */
1663 rc = LNetEQPoll(&the_lnet.ln_rc_eqh, 1, 0, &ev, &i);
1664 if (rc == 0) /* no event pending */
1667 /* NB a lost SENT prevents me from pinging a router again */
1668 if (rc == -EOVERFLOW) {
1669 CERROR("Dropped an event!!!\n");
1675 lnet_router_checker_event(&ev);
1678 if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING) {
1679 lnet_prune_rc_data(1); /* release rcd */
1680 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
1685 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1689 version = the_lnet.ln_routers_version;
1690 cfs_list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
1691 lnet_ping_router_locked(rtr);
1692 LASSERT (version == the_lnet.ln_routers_version);
1697 running = 0; /* lock only needed for the recursion check */
1701 /* NB lnet_peers_start_down depends on me,
1702 * so must be called before any peer creation */
1704 lnet_get_tunables (void)
1708 s = getenv("LNET_ROUTER_PING_TIMEOUT");
1709 if (s != NULL) router_ping_timeout = atoi(s);
1711 s = getenv("LNET_LIVE_ROUTER_CHECK_INTERVAL");
1712 if (s != NULL) live_router_check_interval = atoi(s);
1714 s = getenv("LNET_DEAD_ROUTER_CHECK_INTERVAL");
1715 if (s != NULL) dead_router_check_interval = atoi(s);
1717 /* This replaces old lnd_notify mechanism */
1718 check_routers_before_use = 1;
1719 if (dead_router_check_interval <= 0)
1720 dead_router_check_interval = 30;
1724 lnet_rtrpools_free(void)
1729 lnet_rtrpools_alloc(int im_a_arouter)