Whamcloud - gitweb
b=18414
[fs/lustre-release.git] / lnet / lnet / router.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
5  *
6  *   This file is part of Portals
7  *   http://sourceforge.net/projects/sandiaportals/
8  *
9  *   Portals is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Portals is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Portals; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 #define DEBUG_SUBSYSTEM S_LNET
25 #include <lnet/lib-lnet.h>
26
27 #if defined(__KERNEL__) && defined(LNET_ROUTER)
28
29 static char *forwarding = "";
30 CFS_MODULE_PARM(forwarding, "s", charp, 0444,
31                 "Explicitly enable/disable forwarding between networks");
32
33 static int tiny_router_buffers = 1024;
34 CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
35                 "# of 0 payload messages to buffer in the router");
36 static int small_router_buffers = 8192;
37 CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
38                 "# of small (1 page) messages to buffer in the router");
39 static int large_router_buffers = 512;
40 CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
41                 "# of large messages to buffer in the router");
42
43 static int auto_down = 1;
44 CFS_MODULE_PARM(auto_down, "i", int, 0444,
45                 "Automatically mark peers down on comms error");
46
47 static int check_routers_before_use = 0;
48 CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
49                 "Assume routers are down and ping them before use");
50
51 static int dead_router_check_interval = 0;
52 CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0444,
53                 "Seconds between dead router health checks (<= 0 to disable)");
54
55 static int live_router_check_interval = 0;
56 CFS_MODULE_PARM(live_router_check_interval, "i", int, 0444,
57                 "Seconds between live router health checks (<= 0 to disable)");
58
59 static int router_ping_timeout = 50;
60 CFS_MODULE_PARM(router_ping_timeout, "i", int, 0444,
61                 "Seconds to wait for the reply to a router health query");
62
63 int
64 lnet_peers_start_down(void)
65 {
66         return check_routers_before_use;
67 }
68
69 void
70 lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, time_t when)
71 {
72         if (when < lp->lp_timestamp) {          /* out of date information */
73                 CDEBUG(D_NET, "Out of date\n");
74                 return;
75         }
76
77         lp->lp_timestamp = when;                /* update timestamp */
78         lp->lp_ping_deadline = 0;               /* disable ping timeout */
79
80         if (lp->lp_alive_count != 0 &&          /* got old news */
81             (!lp->lp_alive) == (!alive)) {      /* new date for old news */
82                 CDEBUG(D_NET, "Old news\n");
83                 return;
84         }
85
86         /* Flag that notification is outstanding */
87
88         lp->lp_alive_count++;
89         lp->lp_alive = !(!alive);               /* 1 bit! */
90         lp->lp_notify = 1;
91         lp->lp_notifylnd |= notifylnd;
92
93         CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
94 }
95
96 void
97 lnet_do_notify (lnet_peer_t *lp)
98 {
99         lnet_ni_t *ni = lp->lp_ni;
100         int        alive;
101         int        notifylnd;
102
103         LNET_LOCK();
104
105         /* Notify only in 1 thread at any time to ensure ordered notification.
106          * NB individual events can be missed; the only guarantee is that you
107          * always get the most recent news */
108
109         if (lp->lp_notifying) {
110                 LNET_UNLOCK();
111                 return;
112         }
113
114         lp->lp_notifying = 1;
115
116         while (lp->lp_notify) {
117                 alive     = lp->lp_alive;
118                 notifylnd = lp->lp_notifylnd;
119
120                 lp->lp_notifylnd = 0;
121                 lp->lp_notify    = 0;
122
123                 if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
124                         LNET_UNLOCK();
125
126                         /* A new notification could happen now; I'll handle it
127                          * when control returns to me */
128
129                         (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
130
131                         LNET_LOCK();
132                 }
133         }
134
135         lp->lp_notifying = 0;
136
137         LNET_UNLOCK();
138 }
139
140 int
141 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
142 {
143         lnet_peer_t         *lp = NULL;
144         time_t               now = cfs_time_current_sec();
145
146         LASSERT (!in_interrupt ());
147
148         CDEBUG (D_NET, "%s notifying %s: %s\n",
149                 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
150                 libcfs_nid2str(nid),
151                 alive ? "up" : "down");
152
153         if (ni != NULL &&
154             LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
155                 CWARN ("Ignoring notification of %s %s by %s (different net)\n",
156                         libcfs_nid2str(nid), alive ? "birth" : "death",
157                         libcfs_nid2str(ni->ni_nid));
158                 return -EINVAL;
159         }
160
161         /* can't do predictions... */
162         if (when > now) {
163                 CWARN ("Ignoring prediction from %s of %s %s "
164                        "%ld seconds in the future\n",
165                        (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
166                        libcfs_nid2str(nid), alive ? "up" : "down",
167                        when - now);
168                 return -EINVAL;
169         }
170
171         if (ni != NULL && !alive &&             /* LND telling me she's down */
172             !auto_down) {                       /* auto-down disabled */
173                 CDEBUG(D_NET, "Auto-down disabled\n");
174                 return 0;
175         }
176
177         LNET_LOCK();
178
179         lp = lnet_find_peer_locked(nid);
180         if (lp == NULL) {
181                 /* nid not found */
182                 LNET_UNLOCK();
183                 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
184                 return 0;
185         }
186
187         /* We can't fully trust LND on reporting exact peer last_alive
188          * if he notifies us about dead peer. For example ksocklnd can
189          * call us with when == _time_when_the_node_was_booted_ if
190          * no connections were successfully established */
191         if (ni != NULL && !alive && when < lp->lp_last_alive)
192                 when = lp->lp_last_alive;
193
194         lnet_notify_locked(lp, ni == NULL, alive, when);
195
196         LNET_UNLOCK();
197
198         lnet_do_notify(lp);
199
200         LNET_LOCK();
201
202         lnet_peer_decref_locked(lp);
203
204         LNET_UNLOCK();
205         return 0;
206 }
207 EXPORT_SYMBOL(lnet_notify);
208
209 #else
210
211 int
212 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
213 {
214         return -EOPNOTSUPP;
215 }
216
217 void
218 lnet_notify_locked (lnet_peer_t *lp, int notifylnd, int alive, time_t when)
219 {
220         return;
221 }
222
223 #endif
224
225 static void
226 lnet_rtr_addref_locked(lnet_peer_t *lp)
227 {
228         LASSERT (lp->lp_refcount > 0);
229         LASSERT (lp->lp_rtr_refcount >= 0);
230
231         lp->lp_rtr_refcount++;
232         if (lp->lp_rtr_refcount == 1) {
233                 struct list_head *pos;
234
235                 /* a simple insertion sort */
236                 list_for_each_prev(pos, &the_lnet.ln_routers) {
237                         lnet_peer_t *rtr = list_entry(pos, lnet_peer_t, 
238                                                       lp_rtr_list);
239
240                         if (rtr->lp_nid < lp->lp_nid)
241                                 break;
242                 }
243
244                 list_add(&lp->lp_rtr_list, pos);
245                 /* addref for the_lnet.ln_routers */
246                 lnet_peer_addref_locked(lp);
247                 the_lnet.ln_routers_version++;
248         }
249 }
250
251 static void
252 lnet_rtr_decref_locked(lnet_peer_t *lp)
253 {
254         LASSERT (lp->lp_refcount > 0);
255         LASSERT (lp->lp_rtr_refcount > 0);
256
257         lp->lp_rtr_refcount--;
258         if (lp->lp_rtr_refcount == 0) {
259                 list_del(&lp->lp_rtr_list);
260                 /* decref for the_lnet.ln_routers */
261                 lnet_peer_decref_locked(lp);
262                 the_lnet.ln_routers_version++;
263         }
264 }
265
266 lnet_remotenet_t *
267 lnet_find_net_locked (__u32 net)
268 {
269         lnet_remotenet_t *rnet;
270         struct list_head *tmp;
271
272         LASSERT (!the_lnet.ln_shutdown);
273
274         list_for_each (tmp, &the_lnet.ln_remote_nets) {
275                 rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
276
277                 if (rnet->lrn_net == net)
278                         return rnet;
279         }
280         return NULL;
281 }
282
283 int
284 lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
285 {
286         struct list_head     zombies;
287         struct list_head    *e;
288         lnet_remotenet_t    *rnet;
289         lnet_remotenet_t    *rnet2;
290         lnet_route_t        *route;
291         lnet_route_t        *route2;
292         lnet_ni_t           *ni;
293         int                  add_route;
294         int                  rc;
295
296         CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
297                libcfs_net2str(net), hops, libcfs_nid2str(gateway));
298
299         if (gateway == LNET_NID_ANY ||
300             LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
301             net == LNET_NIDNET(LNET_NID_ANY) ||
302             LNET_NETTYP(net) == LOLND ||
303             LNET_NIDNET(gateway) == net ||
304             hops < 1 || hops > 255)
305                 return (-EINVAL);
306
307         if (lnet_islocalnet(net))               /* it's a local network */
308                 return 0;                       /* ignore the route entry */
309
310         /* Assume net, route, all new */
311         LIBCFS_ALLOC(route, sizeof(*route));
312         LIBCFS_ALLOC(rnet, sizeof(*rnet));
313         if (route == NULL || rnet == NULL) {
314                 CERROR("Out of memory creating route %s %d %s\n",
315                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
316                 if (route != NULL)
317                         LIBCFS_FREE(route, sizeof(*route));
318                 if (rnet != NULL)
319                         LIBCFS_FREE(rnet, sizeof(*rnet));
320                 return -ENOMEM;
321         }
322
323         CFS_INIT_LIST_HEAD(&rnet->lrn_routes);
324         rnet->lrn_net = net;
325         rnet->lrn_hops = hops;
326
327         LNET_LOCK();
328
329         rc = lnet_nid2peer_locked(&route->lr_gateway, gateway);
330         if (rc != 0) {
331                 LNET_UNLOCK();
332
333                 LIBCFS_FREE(route, sizeof(*route));
334                 LIBCFS_FREE(rnet, sizeof(*rnet));
335
336                 if (rc == -EHOSTUNREACH)        /* gateway is not on a local net */
337                         return 0;               /* ignore the route entry */
338
339                 CERROR("Error %d creating route %s %d %s\n", rc,
340                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
341                 return rc;
342         }
343
344         LASSERT (!the_lnet.ln_shutdown);
345         CFS_INIT_LIST_HEAD(&zombies);
346
347         rnet2 = lnet_find_net_locked(net);
348         if (rnet2 == NULL) {
349                 /* new network */
350                 list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
351                 rnet2 = rnet;
352         }
353
354         if (hops > rnet2->lrn_hops) {
355                 /* New route is longer; ignore it */
356                 add_route = 0;
357         } else if (hops < rnet2->lrn_hops) {
358                 /* new route supercedes all currently known routes to this
359                  * net */
360                 list_add(&zombies, &rnet2->lrn_routes);
361                 list_del_init(&rnet2->lrn_routes);
362                 add_route = 1;
363         } else {
364                 add_route = 1;
365                 /* New route has the same hopcount as existing routes; search
366                  * for a duplicate route (it's a NOOP if it is) */
367                 list_for_each (e, &rnet2->lrn_routes) {
368                         route2 = list_entry(e, lnet_route_t, lr_list);
369
370                         if (route2->lr_gateway == route->lr_gateway) {
371                                 add_route = 0;
372                                 break;
373                         }
374
375                         /* our loopups must be true */
376                         LASSERT (route2->lr_gateway->lp_nid != gateway);
377                 }
378         }
379
380         if (add_route) {
381                 ni = route->lr_gateway->lp_ni;
382                 lnet_ni_addref_locked(ni);
383
384                 LASSERT (rc == 0);
385                 list_add_tail(&route->lr_list, &rnet2->lrn_routes);
386                 the_lnet.ln_remote_nets_version++;
387
388                 lnet_rtr_addref_locked(route->lr_gateway);
389
390                 LNET_UNLOCK();
391
392                 /* XXX Assume alive */
393                 if (ni->ni_lnd->lnd_notify != NULL)
394                         (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
395
396                 lnet_ni_decref(ni);
397         } else {
398                 lnet_peer_decref_locked(route->lr_gateway);
399                 LNET_UNLOCK();
400                 LIBCFS_FREE(route, sizeof(*route));
401         }
402
403         if (rnet != rnet2)
404                 LIBCFS_FREE(rnet, sizeof(*rnet));
405
406         while (!list_empty(&zombies)) {
407                 route = list_entry(zombies.next, lnet_route_t, lr_list);
408                 list_del(&route->lr_list);
409
410                 LNET_LOCK();
411                 lnet_rtr_decref_locked(route->lr_gateway);
412                 lnet_peer_decref_locked(route->lr_gateway);
413                 LNET_UNLOCK();
414                 LIBCFS_FREE(route, sizeof(*route));
415         }
416
417         return rc;
418 }
419
420 int
421 lnet_check_routes (void)
422 {
423         lnet_remotenet_t    *rnet;
424         lnet_route_t        *route;
425         lnet_route_t        *route2;
426         struct list_head    *e1;
427         struct list_head    *e2;
428
429         LNET_LOCK();
430
431         list_for_each (e1, &the_lnet.ln_remote_nets) {
432                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
433
434                 route2 = NULL;
435                 list_for_each (e2, &rnet->lrn_routes) {
436                         route = list_entry(e2, lnet_route_t, lr_list);
437
438                         if (route2 == NULL)
439                                 route2 = route;
440                         else if (route->lr_gateway->lp_ni !=
441                                  route2->lr_gateway->lp_ni) {
442                                 LNET_UNLOCK();
443
444                                 CERROR("Routes to %s via %s and %s not supported\n",
445                                        libcfs_net2str(rnet->lrn_net),
446                                        libcfs_nid2str(route->lr_gateway->lp_nid),
447                                        libcfs_nid2str(route2->lr_gateway->lp_nid));
448                                 return -EINVAL;
449                         }
450                 }
451         }
452
453         LNET_UNLOCK();
454         return 0;
455 }
456
457 int
458 lnet_del_route (__u32 net, lnet_nid_t gw_nid)
459 {
460         lnet_remotenet_t    *rnet;
461         lnet_route_t        *route;
462         struct list_head    *e1;
463         struct list_head    *e2;
464         int                  rc = -ENOENT;
465
466         CDEBUG(D_NET, "Del route: net %s : gw %s\n",
467                libcfs_net2str(net), libcfs_nid2str(gw_nid));
468
469         /* NB Caller may specify either all routes via the given gateway
470          * or a specific route entry actual NIDs) */
471
472  again:
473         LNET_LOCK();
474
475         list_for_each (e1, &the_lnet.ln_remote_nets) {
476                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
477
478                 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
479                       net == rnet->lrn_net))
480                         continue;
481
482                 list_for_each (e2, &rnet->lrn_routes) {
483                         route = list_entry(e2, lnet_route_t, lr_list);
484
485                         if (!(gw_nid == LNET_NID_ANY ||
486                               gw_nid == route->lr_gateway->lp_nid))
487                                 continue;
488
489                         list_del(&route->lr_list);
490                         the_lnet.ln_remote_nets_version++;
491
492                         if (list_empty(&rnet->lrn_routes))
493                                 list_del(&rnet->lrn_list);
494                         else
495                                 rnet = NULL;
496
497                         lnet_rtr_decref_locked(route->lr_gateway);
498                         lnet_peer_decref_locked(route->lr_gateway);
499                         LNET_UNLOCK();
500
501                         LIBCFS_FREE(route, sizeof (*route));
502
503                         if (rnet != NULL)
504                                 LIBCFS_FREE(rnet, sizeof(*rnet));
505
506                         rc = 0;
507                         goto again;
508                 }
509         }
510
511         LNET_UNLOCK();
512         return rc;
513 }
514
515 void
516 lnet_destroy_routes (void)
517 {
518         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
519 }
520
521 int
522 lnet_get_route (int idx, __u32 *net, __u32 *hops,
523                lnet_nid_t *gateway, __u32 *alive)
524 {
525         struct list_head    *e1;
526         struct list_head    *e2;
527         lnet_remotenet_t    *rnet;
528         lnet_route_t        *route;
529
530         LNET_LOCK();
531
532         list_for_each (e1, &the_lnet.ln_remote_nets) {
533                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
534
535                 list_for_each (e2, &rnet->lrn_routes) {
536                         route = list_entry(e2, lnet_route_t, lr_list);
537
538                         if (idx-- == 0) {
539                                 *net     = rnet->lrn_net;
540                                 *hops    = rnet->lrn_hops;
541                                 *gateway = route->lr_gateway->lp_nid;
542                                 *alive   = route->lr_gateway->lp_alive;
543                                 LNET_UNLOCK();
544                                 return 0;
545                         }
546                 }
547         }
548
549         LNET_UNLOCK();
550         return -ENOENT;
551 }
552
553 #if defined(__KERNEL__) && defined(LNET_ROUTER)
554 static void
555 lnet_router_checker_event (lnet_event_t *event)
556 {
557         /* CAVEAT EMPTOR: I'm called with LNET_LOCKed and I'm not allowed to
558          * drop it (that's how come I see _every_ event, even ones that would
559          * overflow my EQ) */
560         lnet_peer_t   *lp;
561         lnet_nid_t     nid;
562
563         if (event->unlinked) {
564                 /* The router checker thread has unlinked the rc_md
565                  * and exited. */
566                 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
567                 the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
568                 mutex_up(&the_lnet.ln_rc_signal);
569                 return;
570         }
571
572         LASSERT (event->type == LNET_EVENT_SEND ||
573                  event->type == LNET_EVENT_REPLY);
574
575         nid = (event->type == LNET_EVENT_SEND) ?
576               event->target.nid : event->initiator.nid;
577
578         lp = lnet_find_peer_locked(nid);
579         if (lp == NULL) {
580                 /* router may have been removed */
581                 CDEBUG(D_NET, "Router %s not found\n", libcfs_nid2str(nid));
582                 return;
583         }
584
585         if (event->type == LNET_EVENT_SEND)     /* re-enable another ping */
586                 lp->lp_ping_notsent = 0;
587
588         if (lnet_isrouter(lp) &&                /* ignore if no longer a router */
589             (event->status != 0 ||
590              event->type == LNET_EVENT_REPLY)) {
591
592                 /* A successful REPLY means the router is up.  If _any_ comms
593                  * to the router fail I assume it's down (this will happen if
594                  * we ping alive routers to try to detect router death before
595                  * apps get burned). */
596
597                 lnet_notify_locked(lp, 1, (event->status == 0),
598                                    cfs_time_current_sec());
599
600                 /* The router checker will wake up very shortly and do the
601                  * actual notification.  
602                  * XXX If 'lp' stops being a router before then, it will still
603                  * have the notification pending!!! */
604         }
605
606         /* This decref will NOT drop LNET_LOCK (it had to have 1 ref when it
607          * was in the peer table and I've not dropped the lock, so no-one else
608          * can have reduced the refcount) */
609         LASSERT(lp->lp_refcount > 1);
610
611         lnet_peer_decref_locked(lp);
612 }
613
614 static int
615 lnet_router_checker(void *arg)
616 {
617         static lnet_ping_info_t   pinginfo;
618
619         int                  rc;
620         lnet_handle_md_t     mdh;
621         lnet_peer_t         *rtr;
622         lnet_md_t            md = {0};
623         struct list_head    *entry;
624         time_t               now;
625         lnet_process_id_t    rtr_id;
626         int                  secs;
627
628         cfs_daemonize("router_checker");
629         cfs_block_allsigs();
630
631         rtr_id.pid = LUSTRE_SRV_LNET_PID;
632
633         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
634
635         /* initialize md content */
636         md.start     = &pinginfo;
637         md.length    = sizeof(pinginfo);
638         md.threshold = LNET_MD_THRESH_INF;
639         md.max_size  = 0;
640         md.options   = LNET_MD_TRUNCATE,
641         md.user_ptr  = NULL;
642         md.eq_handle = the_lnet.ln_rc_eqh;
643
644         rc = LNetMDBind(md, LNET_UNLINK, &mdh);
645
646         if (rc < 0) {
647                 CERROR("Can't bind MD: %d\n", rc);
648                 the_lnet.ln_rc_state = rc;
649                 mutex_up(&the_lnet.ln_rc_signal);
650                 return rc;
651         }
652
653         LASSERT (rc == 0);
654
655         the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
656         mutex_up(&the_lnet.ln_rc_signal);       /* let my parent go */
657
658         while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
659                 __u64 version;
660
661                 LNET_LOCK();
662 rescan:
663                 version = the_lnet.ln_routers_version;
664
665                 list_for_each (entry, &the_lnet.ln_routers) {
666                         rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
667
668                         lnet_peer_addref_locked(rtr);
669
670                         now = cfs_time_current_sec();
671
672                         if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
673                             now > rtr->lp_ping_deadline)
674                                 lnet_notify_locked(rtr, 1, 0, now);
675
676                         LNET_UNLOCK();
677
678                         /* Run any outstanding notificiations */
679                         lnet_do_notify(rtr);
680
681                         if (rtr->lp_alive) {
682                                 secs = live_router_check_interval;
683                         } else {
684                                 secs = dead_router_check_interval;
685                         }
686                         if (secs <= 0)
687                                 secs = 0;
688
689                         if (secs != 0 &&
690                             !rtr->lp_ping_notsent &&
691                             now > rtr->lp_ping_timestamp + secs) {
692                                 CDEBUG(D_NET, "Check: %s\n",
693                                        libcfs_nid2str(rtr->lp_nid));
694
695                                 LNET_LOCK();
696                                 rtr_id.nid = rtr->lp_nid;
697                                 rtr->lp_ping_notsent = 1;
698                                 rtr->lp_ping_timestamp = now;
699
700                                 if (rtr->lp_ping_deadline == 0)
701                                         rtr->lp_ping_deadline = 
702                                                 now + router_ping_timeout;
703
704                                 LNET_UNLOCK();
705
706                                 LNetGet(LNET_NID_ANY, mdh, rtr_id,
707                                         LNET_RESERVED_PORTAL,
708                                         LNET_PROTO_PING_MATCHBITS, 0);
709                         }
710
711                         LNET_LOCK();
712                         lnet_peer_decref_locked(rtr);
713
714                         if (version != the_lnet.ln_routers_version) {
715                                 /* the routers list has changed */
716                                 goto rescan;
717                         }
718                 }
719
720                 LNET_UNLOCK();
721
722                 /* Call cfs_pause() here always adds 1 to load average 
723                  * because kernel counts # active tasks as nr_running 
724                  * + nr_uninterruptible. */
725                 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
726                                      cfs_time_seconds(1));
727         }
728
729         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_STOPTHREAD);
730         the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
731
732         rc = LNetMDUnlink(mdh);
733         LASSERT (rc == 0);
734
735         /* The unlink event callback will signal final completion */
736         return 0;
737 }
738
739
740 void
741 lnet_wait_known_routerstate(void)
742 {
743         lnet_peer_t         *rtr;
744         struct list_head    *entry;
745         int                  all_known;
746
747         for (;;) {
748                 LNET_LOCK();
749
750                 all_known = 1;
751                 list_for_each (entry, &the_lnet.ln_routers) {
752                         rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
753
754                         if (rtr->lp_alive_count == 0) {
755                                 all_known = 0;
756                                 break;
757                         }
758                 }
759
760                 LNET_UNLOCK();
761
762                 if (all_known)
763                         return;
764
765                 cfs_pause(cfs_time_seconds(1));
766         }
767 }
768
769 void
770 lnet_router_checker_stop(void)
771 {
772         int       rc;
773
774         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING ||
775                  the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
776
777         if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
778                 return;
779
780         the_lnet.ln_rc_state = LNET_RC_STATE_STOPTHREAD;
781         /* block until event callback signals exit */
782         mutex_down(&the_lnet.ln_rc_signal);
783
784         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKED);
785
786         rc = LNetEQFree(the_lnet.ln_rc_eqh);
787         LASSERT (rc == 0);
788
789         the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
790 }
791
792 int
793 lnet_router_checker_start(void)
794 {
795         int  rc;
796
797         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
798
799         if (check_routers_before_use &&
800             dead_router_check_interval <= 0) {
801                 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
802                                    " set if 'check_routers_before_use' is set"
803                                    "\n");
804                 return -EINVAL;
805         }
806
807         if (live_router_check_interval <= 0 &&
808             dead_router_check_interval <= 0)
809                 return 0;
810
811         init_mutex_locked(&the_lnet.ln_rc_signal);
812
813         /* EQ size doesn't matter; the callback is guaranteed to get every
814          * event */
815         rc = LNetEQAlloc(1, lnet_router_checker_event,
816                          &the_lnet.ln_rc_eqh);
817         if (rc != 0) {
818                 CERROR("Can't allocate EQ: %d\n", rc);
819                 return -ENOMEM;
820         }
821
822         rc = (int)cfs_kernel_thread(lnet_router_checker, NULL, 0);
823         if (rc < 0) {
824                 CERROR("Can't start router checker thread: %d\n", rc);
825                 goto failed;
826         }
827
828         mutex_down(&the_lnet.ln_rc_signal);     /* wait for checker to startup */
829
830         rc = the_lnet.ln_rc_state;
831         if (rc < 0) {
832                 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
833                 goto failed;
834         }
835
836         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
837
838         if (check_routers_before_use) {
839                 /* Note that a helpful side-effect of pinging all known routers
840                  * at startup is that it makes them drop stale connections they
841                  * may have to a previous instance of me. */
842                 lnet_wait_known_routerstate();
843         }
844
845         return 0;
846
847  failed:
848         rc = LNetEQFree(the_lnet.ln_rc_eqh);
849         LASSERT (rc == 0);
850         return rc;
851 }
852
853 void
854 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
855 {
856         int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
857
858         while (--npages >= 0)
859                 cfs_free_page(rb->rb_kiov[npages].kiov_page);
860
861         LIBCFS_FREE(rb, sz);
862 }
863
864 lnet_rtrbuf_t *
865 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp)
866 {
867         int            npages = rbp->rbp_npages;
868         int            sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
869         struct page   *page;
870         lnet_rtrbuf_t *rb;
871         int            i;
872
873         LIBCFS_ALLOC(rb, sz);
874         if (rb == NULL)
875                 return NULL;
876
877         rb->rb_pool = rbp;
878
879         for (i = 0; i < npages; i++) {
880                 page = cfs_alloc_page(CFS_ALLOC_ZERO | CFS_ALLOC_STD);
881                 if (page == NULL) {
882                         while (--i >= 0)
883                                 cfs_free_page(rb->rb_kiov[i].kiov_page);
884
885                         LIBCFS_FREE(rb, sz);
886                         return NULL;
887                 }
888
889                 rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
890                 rb->rb_kiov[i].kiov_offset = 0;
891                 rb->rb_kiov[i].kiov_page = page;
892         }
893
894         return rb;
895 }
896
897 void
898 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
899 {
900         int            npages = rbp->rbp_npages;
901         int            nbuffers = 0;
902         lnet_rtrbuf_t *rb;
903
904         LASSERT (list_empty(&rbp->rbp_msgs));
905         LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
906
907         while (!list_empty(&rbp->rbp_bufs)) {
908                 LASSERT (rbp->rbp_credits > 0);
909
910                 rb = list_entry(rbp->rbp_bufs.next,
911                                 lnet_rtrbuf_t, rb_list);
912                 list_del(&rb->rb_list);
913                 lnet_destroy_rtrbuf(rb, npages);
914                 nbuffers++;
915         }
916
917         LASSERT (rbp->rbp_nbuffers == nbuffers);
918         LASSERT (rbp->rbp_credits == nbuffers);
919
920         rbp->rbp_nbuffers = rbp->rbp_credits = 0;
921 }
922
923 int
924 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs)
925 {
926         lnet_rtrbuf_t *rb;
927         int            i;
928
929         if (rbp->rbp_nbuffers != 0) {
930                 LASSERT (rbp->rbp_nbuffers == nbufs);
931                 return 0;
932         }
933
934         for (i = 0; i < nbufs; i++) {
935                 rb = lnet_new_rtrbuf(rbp);
936
937                 if (rb == NULL) {
938                         CERROR("Failed to allocate %d router bufs of %d pages\n",
939                                nbufs, rbp->rbp_npages);
940                         return -ENOMEM;
941                 }
942
943                 rbp->rbp_nbuffers++;
944                 rbp->rbp_credits++;
945                 rbp->rbp_mincredits++;
946                 list_add(&rb->rb_list, &rbp->rbp_bufs);
947
948                 /* No allocation "under fire" */
949                 /* Otherwise we'd need code to schedule blocked msgs etc */
950                 LASSERT (!the_lnet.ln_routing);
951         }
952
953         LASSERT (rbp->rbp_credits == nbufs);
954         return 0;
955 }
956
957 void
958 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
959 {
960         CFS_INIT_LIST_HEAD(&rbp->rbp_msgs);
961         CFS_INIT_LIST_HEAD(&rbp->rbp_bufs);
962
963         rbp->rbp_npages = npages;
964         rbp->rbp_credits = 0;
965         rbp->rbp_mincredits = 0;
966 }
967
968 void
969 lnet_free_rtrpools(void)
970 {
971         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[0]);
972         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[1]);
973         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[2]);
974 }
975
976 void
977 lnet_init_rtrpools(void)
978 {
979         int small_pages = 1;
980         int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
981
982         lnet_rtrpool_init(&the_lnet.ln_rtrpools[0], 0);
983         lnet_rtrpool_init(&the_lnet.ln_rtrpools[1], small_pages);
984         lnet_rtrpool_init(&the_lnet.ln_rtrpools[2], large_pages);
985 }
986
987
988 int
989 lnet_alloc_rtrpools(int im_a_router)
990 {
991         int       rc;
992
993         if (!strcmp(forwarding, "")) {
994                 /* not set either way */
995                 if (!im_a_router)
996                         return 0;
997         } else if (!strcmp(forwarding, "disabled")) {
998                 /* explicitly disabled */
999                 return 0;
1000         } else if (!strcmp(forwarding, "enabled")) {
1001                 /* explicitly enabled */
1002         } else {
1003                 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1004                                    "'enabled' or 'disabled'\n");
1005                 return -EINVAL;
1006         }
1007
1008         if (tiny_router_buffers <= 0) {
1009                 LCONSOLE_ERROR_MSG(0x10c, "tiny_router_buffers=%d invalid when "
1010                                    "routing enabled\n", tiny_router_buffers);
1011                 rc = -EINVAL;
1012                 goto failed;
1013         }
1014
1015         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[0],
1016                                      tiny_router_buffers);
1017         if (rc != 0)
1018                 goto failed;
1019
1020         if (small_router_buffers <= 0) {
1021                 LCONSOLE_ERROR_MSG(0x10d, "small_router_buffers=%d invalid when"
1022                                    " routing enabled\n", small_router_buffers);
1023                 rc = -EINVAL;
1024                 goto failed;
1025         }
1026
1027         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[1],
1028                                      small_router_buffers);
1029         if (rc != 0)
1030                 goto failed;
1031
1032         if (large_router_buffers <= 0) {
1033                 LCONSOLE_ERROR_MSG(0x10e, "large_router_buffers=%d invalid when"
1034                                    " routing enabled\n", large_router_buffers);
1035                 rc = -EINVAL;
1036                 goto failed;
1037         }
1038
1039         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[2],
1040                                      large_router_buffers);
1041         if (rc != 0)
1042                 goto failed;
1043
1044         LNET_LOCK();
1045         the_lnet.ln_routing = 1;
1046         LNET_UNLOCK();
1047
1048         return 0;
1049
1050  failed:
1051         lnet_free_rtrpools();
1052         return rc;
1053 }
1054
1055 #else
1056
1057 int
1058 lnet_peers_start_down(void)
1059 {
1060         return 0;
1061 }
1062
1063 void
1064 lnet_router_checker_stop(void)
1065 {
1066         return;
1067 }
1068
1069 int
1070 lnet_router_checker_start(void)
1071 {
1072         return 0;
1073 }
1074
1075 void
1076 lnet_free_rtrpools (void)
1077 {
1078 }
1079
1080 void
1081 lnet_init_rtrpools (void)
1082 {
1083 }
1084
1085 int
1086 lnet_alloc_rtrpools (int im_a_arouter)
1087 {
1088         return 0;
1089 }
1090
1091 #endif