1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2002 Cluster File Systems, Inc.
6 * This file is part of Portals
7 * http://sourceforge.net/projects/sandiaportals/
9 * Portals is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Portals is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Portals; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 #define DEBUG_SUBSYSTEM S_LNET
25 #include <lnet/lib-lnet.h>
27 #if defined(__KERNEL__) && defined(LNET_ROUTER)
29 static char *forwarding = "";
30 CFS_MODULE_PARM(forwarding, "s", charp, 0444,
31 "Explicitly enable/disable forwarding between networks");
33 static int tiny_router_buffers = 512;
34 CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
35 "# of 0 payload messages to buffer in the router");
36 static int small_router_buffers = 256;
37 CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
38 "# of small (1 page) messages to buffer in the router");
39 static int large_router_buffers = 32;
40 CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
41 "# of large messages to buffer in the router");
43 static int auto_down = 1;
44 CFS_MODULE_PARM(auto_down, "i", int, 0444,
45 "Automatically mark peers down on comms error");
47 static int check_routers_before_use = 0;
48 CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
49 "Assume routers are down and ping them before use");
51 static int dead_router_check_interval = 0;
52 CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0444,
53 "Seconds between dead router health checks (<= 0 to disable)");
55 static int live_router_check_interval = 0;
56 CFS_MODULE_PARM(live_router_check_interval, "i", int, 0444,
57 "Seconds between live router health checks (<= 0 to disable)");
59 static int router_ping_timeout = 50;
60 CFS_MODULE_PARM(router_ping_timeout, "i", int, 0444,
61 "Seconds to wait for the reply to a router health query");
64 lnet_peers_start_down(void)
66 return check_routers_before_use;
70 lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, time_t when)
72 if (when < lp->lp_timestamp) { /* out of date information */
73 CDEBUG(D_NET, "Out of date\n");
77 lp->lp_timestamp = when; /* update timestamp */
78 lp->lp_ping_deadline = 0; /* disable ping timeout */
80 if (lp->lp_alive_count != 0 && /* got old news */
81 (!lp->lp_alive) == (!alive)) { /* new date for old news */
82 CDEBUG(D_NET, "Old news\n");
86 /* Flag that notification is outstanding */
89 lp->lp_alive = !(!alive); /* 1 bit! */
91 lp->lp_notifylnd |= notifylnd;
93 CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
97 lnet_do_notify (lnet_peer_t *lp)
99 lnet_ni_t *ni = lp->lp_ni;
105 /* Notify only in 1 thread at any time to ensure ordered notification.
106 * NB individual events can be missed; the only guarantee is that you
107 * always get the most recent news */
109 if (lp->lp_notifying) {
114 lp->lp_notifying = 1;
116 while (lp->lp_notify) {
117 alive = lp->lp_alive;
118 notifylnd = lp->lp_notifylnd;
120 lp->lp_notifylnd = 0;
123 if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
126 /* A new notification could happen now; I'll handle it
127 * when control returns to me */
129 (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
135 lp->lp_notifying = 0;
141 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
143 lnet_peer_t *lp = NULL;
144 time_t now = cfs_time_current_sec();
146 LASSERT (!in_interrupt ());
148 CDEBUG (D_NET, "%s notifying %s: %s\n",
149 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
151 alive ? "up" : "down");
154 LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
155 CWARN ("Ignoring notification of %s %s by %s (different net)\n",
156 libcfs_nid2str(nid), alive ? "birth" : "death",
157 libcfs_nid2str(ni->ni_nid));
161 /* can't do predictions... */
163 CWARN ("Ignoring prediction from %s of %s %s "
164 "%ld seconds in the future\n",
165 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
166 libcfs_nid2str(nid), alive ? "up" : "down",
171 if (ni != NULL && !alive && /* LND telling me she's down */
172 !auto_down) { /* auto-down disabled */
173 CDEBUG(D_NET, "Auto-down disabled\n");
179 lp = lnet_find_peer_locked(nid);
183 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
187 lnet_notify_locked(lp, ni == NULL, alive, when);
195 lnet_peer_decref_locked(lp);
200 EXPORT_SYMBOL(lnet_notify);
205 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
213 lnet_rtr_addref_locked(lnet_peer_t *lp)
215 LASSERT (lp->lp_refcount > 0);
216 LASSERT (lp->lp_rtr_refcount >= 0);
218 lp->lp_rtr_refcount++;
219 if (lp->lp_rtr_refcount == 1) {
220 struct list_head *pos;
222 /* a simple insertion sort */
223 list_for_each_prev(pos, &the_lnet.ln_routers) {
224 lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
227 if (rtr->lp_nid < lp->lp_nid)
231 list_add(&lp->lp_rtr_list, pos);
232 /* addref for the_lnet.ln_routers */
233 lnet_peer_addref_locked(lp);
234 the_lnet.ln_routers_version++;
239 lnet_rtr_decref_locked(lnet_peer_t *lp)
241 LASSERT (lp->lp_refcount > 0);
242 LASSERT (lp->lp_rtr_refcount > 0);
244 lp->lp_rtr_refcount--;
245 if (lp->lp_rtr_refcount == 0) {
246 list_del(&lp->lp_rtr_list);
247 /* decref for the_lnet.ln_routers */
248 lnet_peer_decref_locked(lp);
249 the_lnet.ln_routers_version++;
254 lnet_find_net_locked (__u32 net)
256 lnet_remotenet_t *rnet;
257 struct list_head *tmp;
259 LASSERT (!the_lnet.ln_shutdown);
261 list_for_each (tmp, &the_lnet.ln_remote_nets) {
262 rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
264 if (rnet->lrn_net == net)
271 lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
273 struct list_head zombies;
275 lnet_remotenet_t *rnet;
276 lnet_remotenet_t *rnet2;
278 lnet_route_t *route2;
283 CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
284 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
286 if (gateway == LNET_NID_ANY ||
287 LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
288 net == LNET_NIDNET(LNET_NID_ANY) ||
289 LNET_NETTYP(net) == LOLND ||
290 LNET_NIDNET(gateway) == net ||
291 hops < 1 || hops > 255)
294 if (lnet_islocalnet(net)) /* it's a local network */
295 return 0; /* ignore the route entry */
297 /* Assume net, route, all new */
298 LIBCFS_ALLOC(route, sizeof(*route));
299 LIBCFS_ALLOC(rnet, sizeof(*rnet));
300 if (route == NULL || rnet == NULL) {
301 CERROR("Out of memory creating route %s %d %s\n",
302 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
304 LIBCFS_FREE(route, sizeof(*route));
306 LIBCFS_FREE(rnet, sizeof(*rnet));
310 INIT_LIST_HEAD(&rnet->lrn_routes);
312 rnet->lrn_hops = hops;
316 rc = lnet_nid2peer_locked(&route->lr_gateway, gateway);
320 LIBCFS_FREE(route, sizeof(*route));
321 LIBCFS_FREE(rnet, sizeof(*rnet));
323 if (rc == -EHOSTUNREACH) /* gateway is not on a local net */
324 return 0; /* ignore the route entry */
326 CERROR("Error %d creating route %s %d %s\n", rc,
327 libcfs_net2str(net), hops, libcfs_nid2str(gateway));
331 LASSERT (!the_lnet.ln_shutdown);
332 CFS_INIT_LIST_HEAD(&zombies);
334 rnet2 = lnet_find_net_locked(net);
337 list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
341 if (hops > rnet2->lrn_hops) {
342 /* New route is longer; ignore it */
344 } else if (hops < rnet2->lrn_hops) {
345 /* new route supercedes all currently known routes to this
347 list_add(&zombies, &rnet2->lrn_routes);
348 list_del_init(&rnet2->lrn_routes);
352 /* New route has the same hopcount as existing routes; search
353 * for a duplicate route (it's a NOOP if it is) */
354 list_for_each (e, &rnet2->lrn_routes) {
355 route2 = list_entry(e, lnet_route_t, lr_list);
357 if (route2->lr_gateway == route->lr_gateway) {
362 /* our loopups must be true */
363 LASSERT (route2->lr_gateway->lp_nid != gateway);
368 ni = route->lr_gateway->lp_ni;
369 lnet_ni_addref_locked(ni);
372 list_add_tail(&route->lr_list, &rnet2->lrn_routes);
373 the_lnet.ln_remote_nets_version++;
375 lnet_rtr_addref_locked(route->lr_gateway);
379 /* XXX Assume alive */
380 if (ni->ni_lnd->lnd_notify != NULL)
381 (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
385 lnet_peer_decref_locked(route->lr_gateway);
387 LIBCFS_FREE(route, sizeof(*route));
391 LIBCFS_FREE(rnet, sizeof(*rnet));
393 while (!list_empty(&zombies)) {
394 route = list_entry(zombies.next, lnet_route_t, lr_list);
395 list_del(&route->lr_list);
398 lnet_peer_decref_locked(route->lr_gateway);
400 LIBCFS_FREE(route, sizeof(*route));
407 lnet_check_routes (void)
409 lnet_remotenet_t *rnet;
411 lnet_route_t *route2;
412 struct list_head *e1;
413 struct list_head *e2;
417 list_for_each (e1, &the_lnet.ln_remote_nets) {
418 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
421 list_for_each (e2, &rnet->lrn_routes) {
422 route = list_entry(e2, lnet_route_t, lr_list);
426 else if (route->lr_gateway->lp_ni !=
427 route2->lr_gateway->lp_ni) {
430 CERROR("Routes to %s via %s and %s not supported\n",
431 libcfs_net2str(rnet->lrn_net),
432 libcfs_nid2str(route->lr_gateway->lp_nid),
433 libcfs_nid2str(route2->lr_gateway->lp_nid));
444 lnet_del_route (__u32 net, lnet_nid_t gw_nid)
446 lnet_remotenet_t *rnet;
448 struct list_head *e1;
449 struct list_head *e2;
452 CDEBUG(D_NET, "Del route: net %s : gw %s\n",
453 libcfs_net2str(net), libcfs_nid2str(gw_nid));
455 /* NB Caller may specify either all routes via the given gateway
456 * or a specific route entry actual NIDs) */
461 list_for_each (e1, &the_lnet.ln_remote_nets) {
462 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
464 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
465 net == rnet->lrn_net))
468 list_for_each (e2, &rnet->lrn_routes) {
469 route = list_entry(e2, lnet_route_t, lr_list);
471 if (!(gw_nid == LNET_NID_ANY ||
472 gw_nid == route->lr_gateway->lp_nid))
475 list_del(&route->lr_list);
476 the_lnet.ln_remote_nets_version++;
478 if (list_empty(&rnet->lrn_routes))
479 list_del(&rnet->lrn_list);
483 lnet_rtr_decref_locked(route->lr_gateway);
484 lnet_peer_decref_locked(route->lr_gateway);
487 LIBCFS_FREE(route, sizeof (*route));
490 LIBCFS_FREE(rnet, sizeof(*rnet));
502 lnet_destroy_routes (void)
504 lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
508 lnet_get_route (int idx, __u32 *net, __u32 *hops,
509 lnet_nid_t *gateway, __u32 *alive)
511 struct list_head *e1;
512 struct list_head *e2;
513 lnet_remotenet_t *rnet;
518 list_for_each (e1, &the_lnet.ln_remote_nets) {
519 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
521 list_for_each (e2, &rnet->lrn_routes) {
522 route = list_entry(e2, lnet_route_t, lr_list);
525 *net = rnet->lrn_net;
526 *hops = rnet->lrn_hops;
527 *gateway = route->lr_gateway->lp_nid;
528 *alive = route->lr_gateway->lp_alive;
539 #if defined(__KERNEL__) && defined(LNET_ROUTER)
541 lnet_router_checker_event (lnet_event_t *event)
543 /* CAVEAT EMPTOR: I'm called with LNET_LOCKed and I'm not allowed to
544 * drop it (that's how come I see _every_ event, even ones that would
549 if (event->unlinked) {
550 /* The router checker thread has unlinked the rc_md
552 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
553 the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
554 mutex_up(&the_lnet.ln_rc_signal);
558 LASSERT (event->type == LNET_EVENT_SEND ||
559 event->type == LNET_EVENT_REPLY);
561 nid = (event->type == LNET_EVENT_SEND) ?
562 event->target.nid : event->initiator.nid;
564 lp = lnet_find_peer_locked(nid);
566 /* router may have been removed */
567 CDEBUG(D_NET, "Router %s not found\n", libcfs_nid2str(nid));
571 if (event->type == LNET_EVENT_SEND) /* re-enable another ping */
572 lp->lp_ping_notsent = 0;
574 if (lnet_isrouter(lp) && /* ignore if no longer a router */
575 (event->status != 0 ||
576 event->type == LNET_EVENT_REPLY)) {
578 /* A successful REPLY means the router is up. If _any_ comms
579 * to the router fail I assume it's down (this will happen if
580 * we ping alive routers to try to detect router death before
581 * apps get burned). */
583 lnet_notify_locked(lp, 1, (event->status == 0),
584 cfs_time_current_sec());
586 /* The router checker will wake up very shortly and do the
587 * actual notification.
588 * XXX If 'lp' stops being a router before then, it will still
589 * have the notification pending!!! */
592 /* This decref will NOT drop LNET_LOCK (it had to have 1 ref when it
593 * was in the peer table and I've not dropped the lock, so no-one else
594 * can have reduced the refcount) */
595 LASSERT(lp->lp_refcount > 1);
597 lnet_peer_decref_locked(lp);
601 lnet_router_checker(void *arg)
603 static lnet_ping_info_t pinginfo;
606 lnet_handle_md_t mdh;
608 struct list_head *entry;
610 lnet_process_id_t rtr_id;
613 cfs_daemonize("router_checker");
616 rtr_id.pid = LUSTRE_SRV_LNET_PID;
618 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
620 rc = LNetMDBind((lnet_md_t){.start = &pinginfo,
621 .length = sizeof(pinginfo),
622 .threshold = LNET_MD_THRESH_INF,
623 .options = LNET_MD_TRUNCATE,
624 .eq_handle = the_lnet.ln_rc_eqh},
629 CERROR("Can't bind MD: %d\n", rc);
630 the_lnet.ln_rc_state = rc;
631 mutex_up(&the_lnet.ln_rc_signal);
637 the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
638 mutex_up(&the_lnet.ln_rc_signal); /* let my parent go */
640 while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
645 version = the_lnet.ln_routers_version;
647 list_for_each (entry, &the_lnet.ln_routers) {
648 rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
650 lnet_peer_addref_locked(rtr);
652 now = cfs_time_current_sec();
654 if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
655 now > rtr->lp_ping_deadline)
656 lnet_notify_locked(rtr, 1, 0, now);
660 /* Run any outstanding notificiations */
664 secs = live_router_check_interval;
666 secs = dead_router_check_interval;
672 !rtr->lp_ping_notsent &&
673 now > rtr->lp_ping_timestamp + secs) {
674 CDEBUG(D_NET, "Check: %s\n",
675 libcfs_nid2str(rtr->lp_nid));
678 rtr_id.nid = rtr->lp_nid;
679 rtr->lp_ping_notsent = 1;
680 rtr->lp_ping_timestamp = now;
682 if (rtr->lp_ping_deadline == 0)
683 rtr->lp_ping_deadline =
684 now + router_ping_timeout;
688 LNetGet(LNET_NID_ANY, mdh, rtr_id,
689 LNET_RESERVED_PORTAL,
690 LNET_PROTO_PING_MATCHBITS, 0);
694 lnet_peer_decref_locked(rtr);
696 if (version != the_lnet.ln_routers_version) {
697 /* the routers list has changed */
704 /* Call cfs_pause() here always adds 1 to load average
705 * because kernel counts # active tasks as nr_running
706 * + nr_uninterruptible. */
707 set_current_state(CFS_TASK_INTERRUPTIBLE);
708 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
709 cfs_time_seconds(1));
712 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_STOPTHREAD);
713 the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
715 rc = LNetMDUnlink(mdh);
718 /* The unlink event callback will signal final completion */
725 lnet_wait_known_routerstate(void)
728 struct list_head *entry;
735 list_for_each (entry, &the_lnet.ln_routers) {
736 rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
738 if (rtr->lp_alive_count == 0) {
749 cfs_pause(cfs_time_seconds(1));
754 lnet_router_checker_stop(void)
758 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING ||
759 the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
761 if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
764 the_lnet.ln_rc_state = LNET_RC_STATE_STOPTHREAD;
765 /* block until event callback signals exit */
766 mutex_down(&the_lnet.ln_rc_signal);
768 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKED);
770 rc = LNetEQFree(the_lnet.ln_rc_eqh);
773 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
777 lnet_router_checker_start(void)
781 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
783 if (check_routers_before_use &&
784 dead_router_check_interval <= 0) {
785 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
786 " set if 'check_routers_before_use' is set"
791 if (live_router_check_interval <= 0 &&
792 dead_router_check_interval <= 0)
795 init_mutex_locked(&the_lnet.ln_rc_signal);
797 /* EQ size doesn't matter; the callback is guaranteed to get every
799 rc = LNetEQAlloc(1, lnet_router_checker_event,
800 &the_lnet.ln_rc_eqh);
802 CERROR("Can't allocate EQ: %d\n", rc);
806 rc = (int)cfs_kernel_thread(lnet_router_checker, NULL, 0);
808 CERROR("Can't start router checker thread: %d\n", rc);
812 mutex_down(&the_lnet.ln_rc_signal); /* wait for checker to startup */
814 rc = the_lnet.ln_rc_state;
816 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
820 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
822 if (check_routers_before_use) {
823 /* Note that a helpful side-effect of pinging all known routers
824 * at startup is that it makes them drop stale connections they
825 * may have to a previous instance of me. */
826 lnet_wait_known_routerstate();
832 rc = LNetEQFree(the_lnet.ln_rc_eqh);
838 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
840 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
842 while (--npages >= 0)
843 cfs_free_page(rb->rb_kiov[npages].kiov_page);
849 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp)
851 int npages = rbp->rbp_npages;
852 int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
857 LIBCFS_ALLOC(rb, sz);
861 for (i = 0; i < npages; i++) {
862 page = cfs_alloc_page(CFS_ALLOC_ZERO | CFS_ALLOC_STD);
865 cfs_free_page(rb->rb_kiov[i].kiov_page);
871 rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
872 rb->rb_kiov[i].kiov_offset = 0;
873 rb->rb_kiov[i].kiov_page = page;
880 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
882 int npages = rbp->rbp_npages;
886 LASSERT (list_empty(&rbp->rbp_msgs));
887 LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
889 while (!list_empty(&rbp->rbp_bufs)) {
890 LASSERT (rbp->rbp_credits > 0);
892 rb = list_entry(rbp->rbp_bufs.next,
893 lnet_rtrbuf_t, rb_list);
894 list_del(&rb->rb_list);
895 lnet_destroy_rtrbuf(rb, npages);
899 LASSERT (rbp->rbp_nbuffers == nbuffers);
900 LASSERT (rbp->rbp_credits == nbuffers);
902 rbp->rbp_nbuffers = rbp->rbp_credits = 0;
906 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs)
911 if (rbp->rbp_nbuffers != 0) {
912 LASSERT (rbp->rbp_nbuffers == nbufs);
916 for (i = 0; i < nbufs; i++) {
917 rb = lnet_new_rtrbuf(rbp);
920 CERROR("Failed to allocate %d router bufs of %d pages\n",
921 nbufs, rbp->rbp_npages);
927 rbp->rbp_mincredits++;
928 list_add(&rb->rb_list, &rbp->rbp_bufs);
930 /* No allocation "under fire" */
931 /* Otherwise we'd need code to schedule blocked msgs etc */
932 LASSERT (!the_lnet.ln_routing);
935 LASSERT (rbp->rbp_credits == nbufs);
940 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
942 CFS_INIT_LIST_HEAD(&rbp->rbp_msgs);
943 CFS_INIT_LIST_HEAD(&rbp->rbp_bufs);
945 rbp->rbp_npages = npages;
946 rbp->rbp_credits = 0;
947 rbp->rbp_mincredits = 0;
951 lnet_free_rtrpools(void)
953 lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[0]);
954 lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[1]);
955 lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[2]);
959 lnet_init_rtrpools(void)
962 int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
964 lnet_rtrpool_init(&the_lnet.ln_rtrpools[0], 0);
965 lnet_rtrpool_init(&the_lnet.ln_rtrpools[1], small_pages);
966 lnet_rtrpool_init(&the_lnet.ln_rtrpools[2], large_pages);
971 lnet_alloc_rtrpools(int im_a_router)
975 if (!strcmp(forwarding, "")) {
976 /* not set either way */
979 } else if (!strcmp(forwarding, "disabled")) {
980 /* explicitly disabled */
982 } else if (!strcmp(forwarding, "enabled")) {
983 /* explicitly enabled */
985 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
986 "'enabled' or 'disabled'\n");
990 if (tiny_router_buffers <= 0) {
991 LCONSOLE_ERROR_MSG(0x10c, "tiny_router_buffers=%d invalid when "
992 "routing enabled\n", tiny_router_buffers);
997 rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[0],
998 tiny_router_buffers);
1002 if (small_router_buffers <= 0) {
1003 LCONSOLE_ERROR_MSG(0x10d, "small_router_buffers=%d invalid when"
1004 " routing enabled\n", small_router_buffers);
1009 rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[1],
1010 small_router_buffers);
1014 if (large_router_buffers <= 0) {
1015 LCONSOLE_ERROR_MSG(0x10e, "large_router_buffers=%d invalid when"
1016 " routing enabled\n", large_router_buffers);
1021 rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[2],
1022 large_router_buffers);
1027 the_lnet.ln_routing = 1;
1033 lnet_free_rtrpools();
1040 lnet_peers_start_down(void)
1046 lnet_router_checker_stop(void)
1052 lnet_router_checker_start(void)
1058 lnet_free_rtrpools (void)
1063 lnet_init_rtrpools (void)
1068 lnet_alloc_rtrpools (int im_a_arouter)