Whamcloud - gitweb
42df3a2ae8be8171a425fe5228ebf908bce08057
[fs/lustre-release.git] / lnet / lnet / router.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
5  *
6  *   This file is part of Portals
7  *   http://sourceforge.net/projects/sandiaportals/
8  *
9  *   Portals is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Portals is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Portals; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 #define DEBUG_SUBSYSTEM S_LNET
25 #include <lnet/lib-lnet.h>
26
27 #if defined(__KERNEL__) && defined(LNET_ROUTER)
28
29 static char *forwarding = "";
30 CFS_MODULE_PARM(forwarding, "s", charp, 0444,
31                 "Explicitly enable/disable forwarding between networks");
32
33 static int tiny_router_buffers = 1024;
34 CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
35                 "# of 0 payload messages to buffer in the router");
36 static int small_router_buffers = 8192;
37 CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
38                 "# of small (1 page) messages to buffer in the router");
39 static int large_router_buffers = 512;
40 CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
41                 "# of large messages to buffer in the router");
42 static int peer_buffer_credits = 0;
43 CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
44                 "# router buffer credits per peer");
45
46 static int auto_down = 1;
47 CFS_MODULE_PARM(auto_down, "i", int, 0444,
48                 "Automatically mark peers down on comms error");
49
50 static int check_routers_before_use = 0;
51 CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
52                 "Assume routers are down and ping them before use");
53
54 static int dead_router_check_interval = 0;
55 CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0444,
56                 "Seconds between dead router health checks (<= 0 to disable)");
57
58 static int live_router_check_interval = 0;
59 CFS_MODULE_PARM(live_router_check_interval, "i", int, 0444,
60                 "Seconds between live router health checks (<= 0 to disable)");
61
62 static int router_ping_timeout = 50;
63 CFS_MODULE_PARM(router_ping_timeout, "i", int, 0444,
64                 "Seconds to wait for the reply to a router health query");
65
66 int
67 lnet_peers_start_down(void)
68 {
69         return check_routers_before_use;
70 }
71
72 int
73 lnet_peer_buffer_credits(lnet_ni_t *ni)
74 {
75         /* NI option overrides LNet default */
76         if (ni->ni_peerrtrcredits > 0)
77                 return ni->ni_peerrtrcredits;
78         if (peer_buffer_credits > 0)
79                 return peer_buffer_credits;
80
81         /* As an approximation, allow this peer the same number of router
82          * buffers as it is allowed outstanding sends */
83         return ni->ni_peertxcredits;
84 }
85
86 void
87 lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, time_t when)
88 {
89         if (when < lp->lp_timestamp) {          /* out of date information */
90                 CDEBUG(D_NET, "Out of date\n");
91                 return;
92         }
93
94         lp->lp_timestamp = when;                /* update timestamp */
95         lp->lp_ping_deadline = 0;               /* disable ping timeout */
96
97         if (lp->lp_alive_count != 0 &&          /* got old news */
98             (!lp->lp_alive) == (!alive)) {      /* new date for old news */
99                 CDEBUG(D_NET, "Old news\n");
100                 return;
101         }
102
103         /* Flag that notification is outstanding */
104
105         lp->lp_alive_count++;
106         lp->lp_alive = !(!alive);               /* 1 bit! */
107         lp->lp_notify = 1;
108         lp->lp_notifylnd |= notifylnd;
109
110         CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
111 }
112
113 void
114 lnet_do_notify (lnet_peer_t *lp)
115 {
116         lnet_ni_t *ni = lp->lp_ni;
117         int        alive;
118         int        notifylnd;
119
120         LNET_LOCK();
121
122         /* Notify only in 1 thread at any time to ensure ordered notification.
123          * NB individual events can be missed; the only guarantee is that you
124          * always get the most recent news */
125
126         if (lp->lp_notifying) {
127                 LNET_UNLOCK();
128                 return;
129         }
130
131         lp->lp_notifying = 1;
132
133         while (lp->lp_notify) {
134                 alive     = lp->lp_alive;
135                 notifylnd = lp->lp_notifylnd;
136
137                 lp->lp_notifylnd = 0;
138                 lp->lp_notify    = 0;
139
140                 if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
141                         LNET_UNLOCK();
142
143                         /* A new notification could happen now; I'll handle it
144                          * when control returns to me */
145
146                         (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
147
148                         LNET_LOCK();
149                 }
150         }
151
152         lp->lp_notifying = 0;
153
154         LNET_UNLOCK();
155 }
156
157 int
158 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
159 {
160         lnet_peer_t         *lp = NULL;
161         time_t               now = cfs_time_current_sec();
162
163         LASSERT (!in_interrupt ());
164
165         CDEBUG (D_NET, "%s notifying %s: %s\n",
166                 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
167                 libcfs_nid2str(nid),
168                 alive ? "up" : "down");
169
170         if (ni != NULL &&
171             LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
172                 CWARN ("Ignoring notification of %s %s by %s (different net)\n",
173                         libcfs_nid2str(nid), alive ? "birth" : "death",
174                         libcfs_nid2str(ni->ni_nid));
175                 return -EINVAL;
176         }
177
178         /* can't do predictions... */
179         if (when > now) {
180                 CWARN ("Ignoring prediction from %s of %s %s "
181                        "%ld seconds in the future\n",
182                        (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
183                        libcfs_nid2str(nid), alive ? "up" : "down",
184                        when - now);
185                 return -EINVAL;
186         }
187
188         if (ni != NULL && !alive &&             /* LND telling me she's down */
189             !auto_down) {                       /* auto-down disabled */
190                 CDEBUG(D_NET, "Auto-down disabled\n");
191                 return 0;
192         }
193
194         LNET_LOCK();
195
196         lp = lnet_find_peer_locked(nid);
197         if (lp == NULL) {
198                 /* nid not found */
199                 LNET_UNLOCK();
200                 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
201                 return 0;
202         }
203
204         /* We can't fully trust LND on reporting exact peer last_alive
205          * if he notifies us about dead peer. For example ksocklnd can
206          * call us with when == _time_when_the_node_was_booted_ if
207          * no connections were successfully established */
208         if (ni != NULL && !alive && when < lp->lp_last_alive)
209                 when = lp->lp_last_alive;
210
211         lnet_notify_locked(lp, ni == NULL, alive, when);
212
213         LNET_UNLOCK();
214
215         lnet_do_notify(lp);
216
217         LNET_LOCK();
218
219         lnet_peer_decref_locked(lp);
220
221         LNET_UNLOCK();
222         return 0;
223 }
224 EXPORT_SYMBOL(lnet_notify);
225
226 #else
227
228 int
229 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
230 {
231         return -EOPNOTSUPP;
232 }
233
234 void
235 lnet_notify_locked (lnet_peer_t *lp, int notifylnd, int alive, time_t when)
236 {
237         return;
238 }
239
240 #endif
241
242 static void
243 lnet_rtr_addref_locked(lnet_peer_t *lp)
244 {
245         LASSERT (lp->lp_refcount > 0);
246         LASSERT (lp->lp_rtr_refcount >= 0);
247
248         lp->lp_rtr_refcount++;
249         if (lp->lp_rtr_refcount == 1) {
250                 struct list_head *pos;
251
252                 /* a simple insertion sort */
253                 list_for_each_prev(pos, &the_lnet.ln_routers) {
254                         lnet_peer_t *rtr = list_entry(pos, lnet_peer_t, 
255                                                       lp_rtr_list);
256
257                         if (rtr->lp_nid < lp->lp_nid)
258                                 break;
259                 }
260
261                 list_add(&lp->lp_rtr_list, pos);
262                 /* addref for the_lnet.ln_routers */
263                 lnet_peer_addref_locked(lp);
264                 the_lnet.ln_routers_version++;
265         }
266 }
267
268 static void
269 lnet_rtr_decref_locked(lnet_peer_t *lp)
270 {
271         LASSERT (lp->lp_refcount > 0);
272         LASSERT (lp->lp_rtr_refcount > 0);
273
274         lp->lp_rtr_refcount--;
275         if (lp->lp_rtr_refcount == 0) {
276                 list_del(&lp->lp_rtr_list);
277                 /* decref for the_lnet.ln_routers */
278                 lnet_peer_decref_locked(lp);
279                 the_lnet.ln_routers_version++;
280         }
281 }
282
283 lnet_remotenet_t *
284 lnet_find_net_locked (__u32 net)
285 {
286         lnet_remotenet_t *rnet;
287         struct list_head *tmp;
288
289         LASSERT (!the_lnet.ln_shutdown);
290
291         list_for_each (tmp, &the_lnet.ln_remote_nets) {
292                 rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
293
294                 if (rnet->lrn_net == net)
295                         return rnet;
296         }
297         return NULL;
298 }
299
300 int
301 lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
302 {
303         struct list_head     zombies;
304         struct list_head    *e;
305         lnet_remotenet_t    *rnet;
306         lnet_remotenet_t    *rnet2;
307         lnet_route_t        *route;
308         lnet_route_t        *route2;
309         lnet_ni_t           *ni;
310         int                  add_route;
311         int                  rc;
312
313         CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
314                libcfs_net2str(net), hops, libcfs_nid2str(gateway));
315
316         if (gateway == LNET_NID_ANY ||
317             LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
318             net == LNET_NIDNET(LNET_NID_ANY) ||
319             LNET_NETTYP(net) == LOLND ||
320             LNET_NIDNET(gateway) == net ||
321             hops < 1 || hops > 255)
322                 return (-EINVAL);
323
324         if (lnet_islocalnet(net))               /* it's a local network */
325                 return 0;                       /* ignore the route entry */
326
327         /* Assume net, route, all new */
328         LIBCFS_ALLOC(route, sizeof(*route));
329         LIBCFS_ALLOC(rnet, sizeof(*rnet));
330         if (route == NULL || rnet == NULL) {
331                 CERROR("Out of memory creating route %s %d %s\n",
332                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
333                 if (route != NULL)
334                         LIBCFS_FREE(route, sizeof(*route));
335                 if (rnet != NULL)
336                         LIBCFS_FREE(rnet, sizeof(*rnet));
337                 return -ENOMEM;
338         }
339
340         CFS_INIT_LIST_HEAD(&rnet->lrn_routes);
341         rnet->lrn_net = net;
342         rnet->lrn_hops = hops;
343
344         LNET_LOCK();
345
346         rc = lnet_nid2peer_locked(&route->lr_gateway, gateway);
347         if (rc != 0) {
348                 LNET_UNLOCK();
349
350                 LIBCFS_FREE(route, sizeof(*route));
351                 LIBCFS_FREE(rnet, sizeof(*rnet));
352
353                 if (rc == -EHOSTUNREACH)        /* gateway is not on a local net */
354                         return 0;               /* ignore the route entry */
355
356                 CERROR("Error %d creating route %s %d %s\n", rc,
357                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
358                 return rc;
359         }
360
361         LASSERT (!the_lnet.ln_shutdown);
362         CFS_INIT_LIST_HEAD(&zombies);
363
364         rnet2 = lnet_find_net_locked(net);
365         if (rnet2 == NULL) {
366                 /* new network */
367                 list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
368                 rnet2 = rnet;
369         }
370
371         if (hops > rnet2->lrn_hops) {
372                 /* New route is longer; ignore it */
373                 add_route = 0;
374         } else if (hops < rnet2->lrn_hops) {
375                 /* new route supercedes all currently known routes to this
376                  * net */
377                 list_add(&zombies, &rnet2->lrn_routes);
378                 list_del_init(&rnet2->lrn_routes);
379                 add_route = 1;
380         } else {
381                 add_route = 1;
382                 /* New route has the same hopcount as existing routes; search
383                  * for a duplicate route (it's a NOOP if it is) */
384                 list_for_each (e, &rnet2->lrn_routes) {
385                         route2 = list_entry(e, lnet_route_t, lr_list);
386
387                         if (route2->lr_gateway == route->lr_gateway) {
388                                 add_route = 0;
389                                 break;
390                         }
391
392                         /* our loopups must be true */
393                         LASSERT (route2->lr_gateway->lp_nid != gateway);
394                 }
395         }
396
397         if (add_route) {
398                 ni = route->lr_gateway->lp_ni;
399                 lnet_ni_addref_locked(ni);
400
401                 LASSERT (rc == 0);
402                 list_add_tail(&route->lr_list, &rnet2->lrn_routes);
403                 the_lnet.ln_remote_nets_version++;
404
405                 lnet_rtr_addref_locked(route->lr_gateway);
406
407                 LNET_UNLOCK();
408
409                 /* XXX Assume alive */
410                 if (ni->ni_lnd->lnd_notify != NULL)
411                         (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
412
413                 lnet_ni_decref(ni);
414         } else {
415                 lnet_peer_decref_locked(route->lr_gateway);
416                 LNET_UNLOCK();
417                 LIBCFS_FREE(route, sizeof(*route));
418         }
419
420         if (rnet != rnet2)
421                 LIBCFS_FREE(rnet, sizeof(*rnet));
422
423         while (!list_empty(&zombies)) {
424                 route = list_entry(zombies.next, lnet_route_t, lr_list);
425                 list_del(&route->lr_list);
426
427                 LNET_LOCK();
428                 lnet_rtr_decref_locked(route->lr_gateway);
429                 lnet_peer_decref_locked(route->lr_gateway);
430                 LNET_UNLOCK();
431                 LIBCFS_FREE(route, sizeof(*route));
432         }
433
434         return rc;
435 }
436
437 int
438 lnet_check_routes (void)
439 {
440         lnet_remotenet_t    *rnet;
441         lnet_route_t        *route;
442         lnet_route_t        *route2;
443         struct list_head    *e1;
444         struct list_head    *e2;
445
446         LNET_LOCK();
447
448         list_for_each (e1, &the_lnet.ln_remote_nets) {
449                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
450
451                 route2 = NULL;
452                 list_for_each (e2, &rnet->lrn_routes) {
453                         route = list_entry(e2, lnet_route_t, lr_list);
454
455                         if (route2 == NULL)
456                                 route2 = route;
457                         else if (route->lr_gateway->lp_ni !=
458                                  route2->lr_gateway->lp_ni) {
459                                 LNET_UNLOCK();
460
461                                 CERROR("Routes to %s via %s and %s not supported\n",
462                                        libcfs_net2str(rnet->lrn_net),
463                                        libcfs_nid2str(route->lr_gateway->lp_nid),
464                                        libcfs_nid2str(route2->lr_gateway->lp_nid));
465                                 return -EINVAL;
466                         }
467                 }
468         }
469
470         LNET_UNLOCK();
471         return 0;
472 }
473
474 int
475 lnet_del_route (__u32 net, lnet_nid_t gw_nid)
476 {
477         lnet_remotenet_t    *rnet;
478         lnet_route_t        *route;
479         struct list_head    *e1;
480         struct list_head    *e2;
481         int                  rc = -ENOENT;
482
483         CDEBUG(D_NET, "Del route: net %s : gw %s\n",
484                libcfs_net2str(net), libcfs_nid2str(gw_nid));
485
486         /* NB Caller may specify either all routes via the given gateway
487          * or a specific route entry actual NIDs) */
488
489  again:
490         LNET_LOCK();
491
492         list_for_each (e1, &the_lnet.ln_remote_nets) {
493                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
494
495                 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
496                       net == rnet->lrn_net))
497                         continue;
498
499                 list_for_each (e2, &rnet->lrn_routes) {
500                         route = list_entry(e2, lnet_route_t, lr_list);
501
502                         if (!(gw_nid == LNET_NID_ANY ||
503                               gw_nid == route->lr_gateway->lp_nid))
504                                 continue;
505
506                         list_del(&route->lr_list);
507                         the_lnet.ln_remote_nets_version++;
508
509                         if (list_empty(&rnet->lrn_routes))
510                                 list_del(&rnet->lrn_list);
511                         else
512                                 rnet = NULL;
513
514                         lnet_rtr_decref_locked(route->lr_gateway);
515                         lnet_peer_decref_locked(route->lr_gateway);
516                         LNET_UNLOCK();
517
518                         LIBCFS_FREE(route, sizeof (*route));
519
520                         if (rnet != NULL)
521                                 LIBCFS_FREE(rnet, sizeof(*rnet));
522
523                         rc = 0;
524                         goto again;
525                 }
526         }
527
528         LNET_UNLOCK();
529         return rc;
530 }
531
532 void
533 lnet_destroy_routes (void)
534 {
535         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
536 }
537
538 int
539 lnet_get_route (int idx, __u32 *net, __u32 *hops,
540                lnet_nid_t *gateway, __u32 *alive)
541 {
542         struct list_head    *e1;
543         struct list_head    *e2;
544         lnet_remotenet_t    *rnet;
545         lnet_route_t        *route;
546
547         LNET_LOCK();
548
549         list_for_each (e1, &the_lnet.ln_remote_nets) {
550                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
551
552                 list_for_each (e2, &rnet->lrn_routes) {
553                         route = list_entry(e2, lnet_route_t, lr_list);
554
555                         if (idx-- == 0) {
556                                 *net     = rnet->lrn_net;
557                                 *hops    = rnet->lrn_hops;
558                                 *gateway = route->lr_gateway->lp_nid;
559                                 *alive   = route->lr_gateway->lp_alive;
560                                 LNET_UNLOCK();
561                                 return 0;
562                         }
563                 }
564         }
565
566         LNET_UNLOCK();
567         return -ENOENT;
568 }
569
570 #if defined(__KERNEL__) && defined(LNET_ROUTER)
571 static void
572 lnet_router_checker_event (lnet_event_t *event)
573 {
574         /* CAVEAT EMPTOR: I'm called with LNET_LOCKed and I'm not allowed to
575          * drop it (that's how come I see _every_ event, even ones that would
576          * overflow my EQ) */
577         lnet_peer_t   *lp;
578         lnet_nid_t     nid;
579
580         if (event->unlinked) {
581                 /* The router checker thread has unlinked the rc_md
582                  * and exited. */
583                 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
584                 the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
585                 mutex_up(&the_lnet.ln_rc_signal);
586                 return;
587         }
588
589         LASSERT (event->type == LNET_EVENT_SEND ||
590                  event->type == LNET_EVENT_REPLY);
591
592         nid = (event->type == LNET_EVENT_SEND) ?
593               event->target.nid : event->initiator.nid;
594
595         lp = lnet_find_peer_locked(nid);
596         if (lp == NULL) {
597                 /* router may have been removed */
598                 CDEBUG(D_NET, "Router %s not found\n", libcfs_nid2str(nid));
599                 return;
600         }
601
602         if (event->type == LNET_EVENT_SEND)     /* re-enable another ping */
603                 lp->lp_ping_notsent = 0;
604
605         if (lnet_isrouter(lp) &&                /* ignore if no longer a router */
606             (event->status != 0 ||
607              event->type == LNET_EVENT_REPLY)) {
608
609                 /* A successful REPLY means the router is up.  If _any_ comms
610                  * to the router fail I assume it's down (this will happen if
611                  * we ping alive routers to try to detect router death before
612                  * apps get burned). */
613
614                 lnet_notify_locked(lp, 1, (event->status == 0),
615                                    cfs_time_current_sec());
616
617                 /* The router checker will wake up very shortly and do the
618                  * actual notification.  
619                  * XXX If 'lp' stops being a router before then, it will still
620                  * have the notification pending!!! */
621         }
622
623         /* This decref will NOT drop LNET_LOCK (it had to have 1 ref when it
624          * was in the peer table and I've not dropped the lock, so no-one else
625          * can have reduced the refcount) */
626         LASSERT(lp->lp_refcount > 1);
627
628         lnet_peer_decref_locked(lp);
629 }
630
631 static int
632 lnet_router_checker(void *arg)
633 {
634         static lnet_ping_info_t   pinginfo;
635
636         int                  rc;
637         lnet_handle_md_t     mdh;
638         lnet_peer_t         *rtr;
639         lnet_md_t            md = {0};
640         struct list_head    *entry;
641         time_t               now;
642         lnet_process_id_t    rtr_id;
643         int                  secs;
644
645         cfs_daemonize("router_checker");
646         cfs_block_allsigs();
647
648         rtr_id.pid = LUSTRE_SRV_LNET_PID;
649
650         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
651
652         /* initialize md content */
653         md.start     = &pinginfo;
654         md.length    = sizeof(pinginfo);
655         md.threshold = LNET_MD_THRESH_INF;
656         md.max_size  = 0;
657         md.options   = LNET_MD_TRUNCATE,
658         md.user_ptr  = NULL;
659         md.eq_handle = the_lnet.ln_rc_eqh;
660
661         rc = LNetMDBind(md, LNET_UNLINK, &mdh);
662
663         if (rc < 0) {
664                 CERROR("Can't bind MD: %d\n", rc);
665                 the_lnet.ln_rc_state = rc;
666                 mutex_up(&the_lnet.ln_rc_signal);
667                 return rc;
668         }
669
670         LASSERT (rc == 0);
671
672         the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
673         mutex_up(&the_lnet.ln_rc_signal);       /* let my parent go */
674
675         while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
676                 __u64 version;
677
678                 LNET_LOCK();
679 rescan:
680                 version = the_lnet.ln_routers_version;
681
682                 list_for_each (entry, &the_lnet.ln_routers) {
683                         rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
684
685                         lnet_peer_addref_locked(rtr);
686
687                         now = cfs_time_current_sec();
688
689                         if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
690                             now > rtr->lp_ping_deadline)
691                                 lnet_notify_locked(rtr, 1, 0, now);
692
693                         LNET_UNLOCK();
694
695                         /* Run any outstanding notificiations */
696                         lnet_do_notify(rtr);
697
698                         if (rtr->lp_alive) {
699                                 secs = live_router_check_interval;
700                         } else {
701                                 secs = dead_router_check_interval;
702                         }
703                         if (secs <= 0)
704                                 secs = 0;
705
706                         if (secs != 0 &&
707                             !rtr->lp_ping_notsent &&
708                             now > rtr->lp_ping_timestamp + secs) {
709                                 CDEBUG(D_NET, "Check: %s\n",
710                                        libcfs_nid2str(rtr->lp_nid));
711
712                                 LNET_LOCK();
713                                 rtr_id.nid = rtr->lp_nid;
714                                 rtr->lp_ping_notsent = 1;
715                                 rtr->lp_ping_timestamp = now;
716
717                                 if (rtr->lp_ping_deadline == 0)
718                                         rtr->lp_ping_deadline = 
719                                                 now + router_ping_timeout;
720
721                                 LNET_UNLOCK();
722
723                                 LNetGet(LNET_NID_ANY, mdh, rtr_id,
724                                         LNET_RESERVED_PORTAL,
725                                         LNET_PROTO_PING_MATCHBITS, 0);
726                         }
727
728                         LNET_LOCK();
729                         lnet_peer_decref_locked(rtr);
730
731                         if (version != the_lnet.ln_routers_version) {
732                                 /* the routers list has changed */
733                                 goto rescan;
734                         }
735                 }
736
737                 LNET_UNLOCK();
738
739                 /* Call cfs_pause() here always adds 1 to load average 
740                  * because kernel counts # active tasks as nr_running 
741                  * + nr_uninterruptible. */
742                 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
743                                      cfs_time_seconds(1));
744         }
745
746         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_STOPTHREAD);
747         the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
748
749         rc = LNetMDUnlink(mdh);
750         LASSERT (rc == 0);
751
752         /* The unlink event callback will signal final completion */
753         return 0;
754 }
755
756
757 void
758 lnet_wait_known_routerstate(void)
759 {
760         lnet_peer_t         *rtr;
761         struct list_head    *entry;
762         int                  all_known;
763
764         for (;;) {
765                 LNET_LOCK();
766
767                 all_known = 1;
768                 list_for_each (entry, &the_lnet.ln_routers) {
769                         rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
770
771                         if (rtr->lp_alive_count == 0) {
772                                 all_known = 0;
773                                 break;
774                         }
775                 }
776
777                 LNET_UNLOCK();
778
779                 if (all_known)
780                         return;
781
782                 cfs_pause(cfs_time_seconds(1));
783         }
784 }
785
786 void
787 lnet_router_checker_stop(void)
788 {
789         int       rc;
790
791         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING ||
792                  the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
793
794         if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
795                 return;
796
797         the_lnet.ln_rc_state = LNET_RC_STATE_STOPTHREAD;
798         /* block until event callback signals exit */
799         mutex_down(&the_lnet.ln_rc_signal);
800
801         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKED);
802
803         rc = LNetEQFree(the_lnet.ln_rc_eqh);
804         LASSERT (rc == 0);
805
806         the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
807 }
808
809 int
810 lnet_router_checker_start(void)
811 {
812         int  rc;
813
814         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
815
816         if (check_routers_before_use &&
817             dead_router_check_interval <= 0) {
818                 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
819                                    " set if 'check_routers_before_use' is set"
820                                    "\n");
821                 return -EINVAL;
822         }
823
824         if (live_router_check_interval <= 0 &&
825             dead_router_check_interval <= 0)
826                 return 0;
827
828         init_mutex_locked(&the_lnet.ln_rc_signal);
829
830         /* EQ size doesn't matter; the callback is guaranteed to get every
831          * event */
832         rc = LNetEQAlloc(1, lnet_router_checker_event,
833                          &the_lnet.ln_rc_eqh);
834         if (rc != 0) {
835                 CERROR("Can't allocate EQ: %d\n", rc);
836                 return -ENOMEM;
837         }
838
839         rc = (int)cfs_kernel_thread(lnet_router_checker, NULL, 0);
840         if (rc < 0) {
841                 CERROR("Can't start router checker thread: %d\n", rc);
842                 goto failed;
843         }
844
845         mutex_down(&the_lnet.ln_rc_signal);     /* wait for checker to startup */
846
847         rc = the_lnet.ln_rc_state;
848         if (rc < 0) {
849                 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
850                 goto failed;
851         }
852
853         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
854
855         if (check_routers_before_use) {
856                 /* Note that a helpful side-effect of pinging all known routers
857                  * at startup is that it makes them drop stale connections they
858                  * may have to a previous instance of me. */
859                 lnet_wait_known_routerstate();
860         }
861
862         return 0;
863
864  failed:
865         rc = LNetEQFree(the_lnet.ln_rc_eqh);
866         LASSERT (rc == 0);
867         return rc;
868 }
869
870 void
871 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
872 {
873         int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
874
875         while (--npages >= 0)
876                 cfs_free_page(rb->rb_kiov[npages].kiov_page);
877
878         LIBCFS_FREE(rb, sz);
879 }
880
881 lnet_rtrbuf_t *
882 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp)
883 {
884         int            npages = rbp->rbp_npages;
885         int            sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
886         struct page   *page;
887         lnet_rtrbuf_t *rb;
888         int            i;
889
890         LIBCFS_ALLOC(rb, sz);
891         if (rb == NULL)
892                 return NULL;
893
894         rb->rb_pool = rbp;
895
896         for (i = 0; i < npages; i++) {
897                 page = cfs_alloc_page(CFS_ALLOC_ZERO | CFS_ALLOC_STD);
898                 if (page == NULL) {
899                         while (--i >= 0)
900                                 cfs_free_page(rb->rb_kiov[i].kiov_page);
901
902                         LIBCFS_FREE(rb, sz);
903                         return NULL;
904                 }
905
906                 rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
907                 rb->rb_kiov[i].kiov_offset = 0;
908                 rb->rb_kiov[i].kiov_page = page;
909         }
910
911         return rb;
912 }
913
914 void
915 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
916 {
917         int            npages = rbp->rbp_npages;
918         int            nbuffers = 0;
919         lnet_rtrbuf_t *rb;
920
921         LASSERT (list_empty(&rbp->rbp_msgs));
922         LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
923
924         while (!list_empty(&rbp->rbp_bufs)) {
925                 LASSERT (rbp->rbp_credits > 0);
926
927                 rb = list_entry(rbp->rbp_bufs.next,
928                                 lnet_rtrbuf_t, rb_list);
929                 list_del(&rb->rb_list);
930                 lnet_destroy_rtrbuf(rb, npages);
931                 nbuffers++;
932         }
933
934         LASSERT (rbp->rbp_nbuffers == nbuffers);
935         LASSERT (rbp->rbp_credits == nbuffers);
936
937         rbp->rbp_nbuffers = rbp->rbp_credits = 0;
938 }
939
940 int
941 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs)
942 {
943         lnet_rtrbuf_t *rb;
944         int            i;
945
946         if (rbp->rbp_nbuffers != 0) {
947                 LASSERT (rbp->rbp_nbuffers == nbufs);
948                 return 0;
949         }
950
951         for (i = 0; i < nbufs; i++) {
952                 rb = lnet_new_rtrbuf(rbp);
953
954                 if (rb == NULL) {
955                         CERROR("Failed to allocate %d router bufs of %d pages\n",
956                                nbufs, rbp->rbp_npages);
957                         return -ENOMEM;
958                 }
959
960                 rbp->rbp_nbuffers++;
961                 rbp->rbp_credits++;
962                 rbp->rbp_mincredits++;
963                 list_add(&rb->rb_list, &rbp->rbp_bufs);
964
965                 /* No allocation "under fire" */
966                 /* Otherwise we'd need code to schedule blocked msgs etc */
967                 LASSERT (!the_lnet.ln_routing);
968         }
969
970         LASSERT (rbp->rbp_credits == nbufs);
971         return 0;
972 }
973
974 void
975 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
976 {
977         CFS_INIT_LIST_HEAD(&rbp->rbp_msgs);
978         CFS_INIT_LIST_HEAD(&rbp->rbp_bufs);
979
980         rbp->rbp_npages = npages;
981         rbp->rbp_credits = 0;
982         rbp->rbp_mincredits = 0;
983 }
984
985 void
986 lnet_free_rtrpools(void)
987 {
988         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[0]);
989         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[1]);
990         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[2]);
991 }
992
993 void
994 lnet_init_rtrpools(void)
995 {
996         int small_pages = 1;
997         int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
998
999         lnet_rtrpool_init(&the_lnet.ln_rtrpools[0], 0);
1000         lnet_rtrpool_init(&the_lnet.ln_rtrpools[1], small_pages);
1001         lnet_rtrpool_init(&the_lnet.ln_rtrpools[2], large_pages);
1002 }
1003
1004
1005 int
1006 lnet_alloc_rtrpools(int im_a_router)
1007 {
1008         int       rc;
1009
1010         if (!strcmp(forwarding, "")) {
1011                 /* not set either way */
1012                 if (!im_a_router)
1013                         return 0;
1014         } else if (!strcmp(forwarding, "disabled")) {
1015                 /* explicitly disabled */
1016                 return 0;
1017         } else if (!strcmp(forwarding, "enabled")) {
1018                 /* explicitly enabled */
1019         } else {
1020                 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1021                                    "'enabled' or 'disabled'\n");
1022                 return -EINVAL;
1023         }
1024
1025         if (tiny_router_buffers <= 0) {
1026                 LCONSOLE_ERROR_MSG(0x10c, "tiny_router_buffers=%d invalid when "
1027                                    "routing enabled\n", tiny_router_buffers);
1028                 rc = -EINVAL;
1029                 goto failed;
1030         }
1031
1032         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[0],
1033                                      tiny_router_buffers);
1034         if (rc != 0)
1035                 goto failed;
1036
1037         if (small_router_buffers <= 0) {
1038                 LCONSOLE_ERROR_MSG(0x10d, "small_router_buffers=%d invalid when"
1039                                    " routing enabled\n", small_router_buffers);
1040                 rc = -EINVAL;
1041                 goto failed;
1042         }
1043
1044         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[1],
1045                                      small_router_buffers);
1046         if (rc != 0)
1047                 goto failed;
1048
1049         if (large_router_buffers <= 0) {
1050                 LCONSOLE_ERROR_MSG(0x10e, "large_router_buffers=%d invalid when"
1051                                    " routing enabled\n", large_router_buffers);
1052                 rc = -EINVAL;
1053                 goto failed;
1054         }
1055
1056         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[2],
1057                                      large_router_buffers);
1058         if (rc != 0)
1059                 goto failed;
1060
1061         LNET_LOCK();
1062         the_lnet.ln_routing = 1;
1063         LNET_UNLOCK();
1064
1065         return 0;
1066
1067  failed:
1068         lnet_free_rtrpools();
1069         return rc;
1070 }
1071
1072 #else
1073
1074 int
1075 lnet_peers_start_down(void)
1076 {
1077         return 0;
1078 }
1079
1080 int
1081 lnet_peer_buffer_credits(lnet_ni_t *ni)
1082 {
1083         return 0;
1084 }
1085
1086 void
1087 lnet_router_checker_stop(void)
1088 {
1089         return;
1090 }
1091
1092 int
1093 lnet_router_checker_start(void)
1094 {
1095         return 0;
1096 }
1097
1098 void
1099 lnet_free_rtrpools (void)
1100 {
1101 }
1102
1103 void
1104 lnet_init_rtrpools (void)
1105 {
1106 }
1107
1108 int
1109 lnet_alloc_rtrpools (int im_a_arouter)
1110 {
1111         return 0;
1112 }
1113
1114 #endif