Whamcloud - gitweb
i=liang,b=13065:
[fs/lustre-release.git] / lnet / lnet / router.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
5  *
6  *   This file is part of Portals
7  *   http://sourceforge.net/projects/sandiaportals/
8  *
9  *   Portals is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Portals is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Portals; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  */
23
24 #define DEBUG_SUBSYSTEM S_LNET
25 #include <lnet/lib-lnet.h>
26
27 #if defined(__KERNEL__) && defined(LNET_ROUTER)
28
29 static char *forwarding = "";
30 CFS_MODULE_PARM(forwarding, "s", charp, 0444,
31                 "Explicitly enable/disable forwarding between networks");
32
33 static int tiny_router_buffers = 1024;
34 CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
35                 "# of 0 payload messages to buffer in the router");
36 static int small_router_buffers = 8192;
37 CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
38                 "# of small (1 page) messages to buffer in the router");
39 static int large_router_buffers = 512;
40 CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
41                 "# of large messages to buffer in the router");
42 static int peer_buffer_credits = 0;
43 CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
44                 "# router buffer credits per peer");
45
46 static int auto_down = 1;
47 CFS_MODULE_PARM(auto_down, "i", int, 0444,
48                 "Automatically mark peers down on comms error");
49
50 int
51 lnet_peer_buffer_credits(lnet_ni_t *ni)
52 {
53         /* NI option overrides LNet default */
54         if (ni->ni_peerrtrcredits > 0)
55                 return ni->ni_peerrtrcredits;
56         if (peer_buffer_credits > 0)
57                 return peer_buffer_credits;
58
59         /* As an approximation, allow this peer the same number of router
60          * buffers as it is allowed outstanding sends */
61         return ni->ni_peertxcredits;
62 }
63
64 /* forward ref's */
65 static int lnet_router_checker(void *);
66 #else
67
68 int
69 lnet_peer_buffer_credits(lnet_ni_t *ni)
70 {
71         return 0;
72 }
73
74 #endif
75
76 static int check_routers_before_use = 0;
77 CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
78                 "Assume routers are down and ping them before use");
79
80 static int dead_router_check_interval = 0;
81 CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0444,
82                 "Seconds between dead router health checks (<= 0 to disable)");
83
84 static int live_router_check_interval = 0;
85 CFS_MODULE_PARM(live_router_check_interval, "i", int, 0444,
86                 "Seconds between live router health checks (<= 0 to disable)");
87
88 static int router_ping_timeout = 50;
89 CFS_MODULE_PARM(router_ping_timeout, "i", int, 0444,
90                 "Seconds to wait for the reply to a router health query");
91
92 int
93 lnet_peers_start_down(void)
94 {
95         return check_routers_before_use;
96 }
97
98 void
99 lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, time_t when)
100 {
101         if (when < lp->lp_timestamp) {          /* out of date information */
102                 CDEBUG(D_NET, "Out of date\n");
103                 return;
104         }
105
106         lp->lp_timestamp = when;                /* update timestamp */
107         lp->lp_ping_deadline = 0;               /* disable ping timeout */
108
109         if (lp->lp_alive_count != 0 &&          /* got old news */
110             (!lp->lp_alive) == (!alive)) {      /* new date for old news */
111                 CDEBUG(D_NET, "Old news\n");
112                 return;
113         }
114
115         /* Flag that notification is outstanding */
116
117         lp->lp_alive_count++;
118         lp->lp_alive = !(!alive);               /* 1 bit! */
119         lp->lp_notify = 1;
120         lp->lp_notifylnd |= notifylnd;
121
122         CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
123 }
124
125 void
126 lnet_do_notify (lnet_peer_t *lp)
127 {
128         lnet_ni_t *ni = lp->lp_ni;
129         int        alive;
130         int        notifylnd;
131
132         LNET_LOCK();
133
134         /* Notify only in 1 thread at any time to ensure ordered notification.
135          * NB individual events can be missed; the only guarantee is that you
136          * always get the most recent news */
137
138         if (lp->lp_notifying) {
139                 LNET_UNLOCK();
140                 return;
141         }
142
143         lp->lp_notifying = 1;
144
145         while (lp->lp_notify) {
146                 alive     = lp->lp_alive;
147                 notifylnd = lp->lp_notifylnd;
148
149                 lp->lp_notifylnd = 0;
150                 lp->lp_notify    = 0;
151
152                 if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
153                         LNET_UNLOCK();
154
155                         /* A new notification could happen now; I'll handle it
156                          * when control returns to me */
157
158                         (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
159
160                         LNET_LOCK();
161                 }
162         }
163
164         lp->lp_notifying = 0;
165
166         LNET_UNLOCK();
167 }
168
169
170 static void
171 lnet_rtr_addref_locked(lnet_peer_t *lp)
172 {
173         LASSERT (lp->lp_refcount > 0);
174         LASSERT (lp->lp_rtr_refcount >= 0);
175
176         lp->lp_rtr_refcount++;
177         if (lp->lp_rtr_refcount == 1) {
178                 struct list_head *pos;
179
180                 /* a simple insertion sort */
181                 list_for_each_prev(pos, &the_lnet.ln_routers) {
182                         lnet_peer_t *rtr = list_entry(pos, lnet_peer_t, 
183                                                       lp_rtr_list);
184
185                         if (rtr->lp_nid < lp->lp_nid)
186                                 break;
187                 }
188
189                 list_add(&lp->lp_rtr_list, pos);
190                 /* addref for the_lnet.ln_routers */
191                 lnet_peer_addref_locked(lp);
192                 the_lnet.ln_routers_version++;
193         }
194 }
195
196 static void
197 lnet_rtr_decref_locked(lnet_peer_t *lp)
198 {
199         LASSERT (lp->lp_refcount > 0);
200         LASSERT (lp->lp_rtr_refcount > 0);
201
202         lp->lp_rtr_refcount--;
203         if (lp->lp_rtr_refcount == 0) {
204                 list_del(&lp->lp_rtr_list);
205                 /* decref for the_lnet.ln_routers */
206                 lnet_peer_decref_locked(lp);
207                 the_lnet.ln_routers_version++;
208         }
209 }
210
211 lnet_remotenet_t *
212 lnet_find_net_locked (__u32 net)
213 {
214         lnet_remotenet_t *rnet;
215         struct list_head *tmp;
216
217         LASSERT (!the_lnet.ln_shutdown);
218
219         list_for_each (tmp, &the_lnet.ln_remote_nets) {
220                 rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
221
222                 if (rnet->lrn_net == net)
223                         return rnet;
224         }
225         return NULL;
226 }
227
228 int
229 lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
230 {
231         struct list_head     zombies;
232         struct list_head    *e;
233         lnet_remotenet_t    *rnet;
234         lnet_remotenet_t    *rnet2;
235         lnet_route_t        *route;
236         lnet_route_t        *route2;
237         lnet_ni_t           *ni;
238         int                  add_route;
239         int                  rc;
240
241         CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
242                libcfs_net2str(net), hops, libcfs_nid2str(gateway));
243
244         if (gateway == LNET_NID_ANY ||
245             LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
246             net == LNET_NIDNET(LNET_NID_ANY) ||
247             LNET_NETTYP(net) == LOLND ||
248             LNET_NIDNET(gateway) == net ||
249             hops < 1 || hops > 255)
250                 return (-EINVAL);
251
252         if (lnet_islocalnet(net))               /* it's a local network */
253                 return 0;                       /* ignore the route entry */
254
255         /* Assume net, route, all new */
256         LIBCFS_ALLOC(route, sizeof(*route));
257         LIBCFS_ALLOC(rnet, sizeof(*rnet));
258         if (route == NULL || rnet == NULL) {
259                 CERROR("Out of memory creating route %s %d %s\n",
260                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
261                 if (route != NULL)
262                         LIBCFS_FREE(route, sizeof(*route));
263                 if (rnet != NULL)
264                         LIBCFS_FREE(rnet, sizeof(*rnet));
265                 return -ENOMEM;
266         }
267
268         CFS_INIT_LIST_HEAD(&rnet->lrn_routes);
269         rnet->lrn_net = net;
270         rnet->lrn_hops = hops;
271
272         LNET_LOCK();
273
274         rc = lnet_nid2peer_locked(&route->lr_gateway, gateway);
275         if (rc != 0) {
276                 LNET_UNLOCK();
277
278                 LIBCFS_FREE(route, sizeof(*route));
279                 LIBCFS_FREE(rnet, sizeof(*rnet));
280
281                 if (rc == -EHOSTUNREACH)        /* gateway is not on a local net */
282                         return 0;               /* ignore the route entry */
283
284                 CERROR("Error %d creating route %s %d %s\n", rc,
285                        libcfs_net2str(net), hops, libcfs_nid2str(gateway));
286                 return rc;
287         }
288
289         LASSERT (!the_lnet.ln_shutdown);
290         CFS_INIT_LIST_HEAD(&zombies);
291
292         rnet2 = lnet_find_net_locked(net);
293         if (rnet2 == NULL) {
294                 /* new network */
295                 list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
296                 rnet2 = rnet;
297         }
298
299         if (hops > rnet2->lrn_hops) {
300                 /* New route is longer; ignore it */
301                 add_route = 0;
302         } else if (hops < rnet2->lrn_hops) {
303                 /* new route supercedes all currently known routes to this
304                  * net */
305                 list_add(&zombies, &rnet2->lrn_routes);
306                 list_del_init(&rnet2->lrn_routes);
307                 add_route = 1;
308         } else {
309                 add_route = 1;
310                 /* New route has the same hopcount as existing routes; search
311                  * for a duplicate route (it's a NOOP if it is) */
312                 list_for_each (e, &rnet2->lrn_routes) {
313                         route2 = list_entry(e, lnet_route_t, lr_list);
314
315                         if (route2->lr_gateway == route->lr_gateway) {
316                                 add_route = 0;
317                                 break;
318                         }
319
320                         /* our loopups must be true */
321                         LASSERT (route2->lr_gateway->lp_nid != gateway);
322                 }
323         }
324
325         if (add_route) {
326                 ni = route->lr_gateway->lp_ni;
327                 lnet_ni_addref_locked(ni);
328
329                 LASSERT (rc == 0);
330                 list_add_tail(&route->lr_list, &rnet2->lrn_routes);
331                 the_lnet.ln_remote_nets_version++;
332
333                 lnet_rtr_addref_locked(route->lr_gateway);
334
335                 LNET_UNLOCK();
336
337                 /* XXX Assume alive */
338                 if (ni->ni_lnd->lnd_notify != NULL)
339                         (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
340
341                 lnet_ni_decref(ni);
342         } else {
343                 lnet_peer_decref_locked(route->lr_gateway);
344                 LNET_UNLOCK();
345                 LIBCFS_FREE(route, sizeof(*route));
346         }
347
348         if (rnet != rnet2)
349                 LIBCFS_FREE(rnet, sizeof(*rnet));
350
351         while (!list_empty(&zombies)) {
352                 route = list_entry(zombies.next, lnet_route_t, lr_list);
353                 list_del(&route->lr_list);
354
355                 LNET_LOCK();
356                 lnet_rtr_decref_locked(route->lr_gateway);
357                 lnet_peer_decref_locked(route->lr_gateway);
358                 LNET_UNLOCK();
359                 LIBCFS_FREE(route, sizeof(*route));
360         }
361
362         return rc;
363 }
364
365 int
366 lnet_check_routes (void)
367 {
368         lnet_remotenet_t    *rnet;
369         lnet_route_t        *route;
370         lnet_route_t        *route2;
371         struct list_head    *e1;
372         struct list_head    *e2;
373
374         LNET_LOCK();
375
376         list_for_each (e1, &the_lnet.ln_remote_nets) {
377                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
378
379                 route2 = NULL;
380                 list_for_each (e2, &rnet->lrn_routes) {
381                         route = list_entry(e2, lnet_route_t, lr_list);
382
383                         if (route2 == NULL)
384                                 route2 = route;
385                         else if (route->lr_gateway->lp_ni !=
386                                  route2->lr_gateway->lp_ni) {
387                                 LNET_UNLOCK();
388
389                                 CERROR("Routes to %s via %s and %s not supported\n",
390                                        libcfs_net2str(rnet->lrn_net),
391                                        libcfs_nid2str(route->lr_gateway->lp_nid),
392                                        libcfs_nid2str(route2->lr_gateway->lp_nid));
393                                 return -EINVAL;
394                         }
395                 }
396         }
397
398         LNET_UNLOCK();
399         return 0;
400 }
401
402 int
403 lnet_del_route (__u32 net, lnet_nid_t gw_nid)
404 {
405         lnet_remotenet_t    *rnet;
406         lnet_route_t        *route;
407         struct list_head    *e1;
408         struct list_head    *e2;
409         int                  rc = -ENOENT;
410
411         CDEBUG(D_NET, "Del route: net %s : gw %s\n",
412                libcfs_net2str(net), libcfs_nid2str(gw_nid));
413
414         /* NB Caller may specify either all routes via the given gateway
415          * or a specific route entry actual NIDs) */
416
417  again:
418         LNET_LOCK();
419
420         list_for_each (e1, &the_lnet.ln_remote_nets) {
421                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
422
423                 if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
424                       net == rnet->lrn_net))
425                         continue;
426
427                 list_for_each (e2, &rnet->lrn_routes) {
428                         route = list_entry(e2, lnet_route_t, lr_list);
429
430                         if (!(gw_nid == LNET_NID_ANY ||
431                               gw_nid == route->lr_gateway->lp_nid))
432                                 continue;
433
434                         list_del(&route->lr_list);
435                         the_lnet.ln_remote_nets_version++;
436
437                         if (list_empty(&rnet->lrn_routes))
438                                 list_del(&rnet->lrn_list);
439                         else
440                                 rnet = NULL;
441
442                         lnet_rtr_decref_locked(route->lr_gateway);
443                         lnet_peer_decref_locked(route->lr_gateway);
444                         LNET_UNLOCK();
445
446                         LIBCFS_FREE(route, sizeof (*route));
447
448                         if (rnet != NULL)
449                                 LIBCFS_FREE(rnet, sizeof(*rnet));
450
451                         rc = 0;
452                         goto again;
453                 }
454         }
455
456         LNET_UNLOCK();
457         return rc;
458 }
459
460 void
461 lnet_destroy_routes (void)
462 {
463         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
464 }
465
466 int
467 lnet_get_route (int idx, __u32 *net, __u32 *hops,
468                lnet_nid_t *gateway, __u32 *alive)
469 {
470         struct list_head    *e1;
471         struct list_head    *e2;
472         lnet_remotenet_t    *rnet;
473         lnet_route_t        *route;
474
475         LNET_LOCK();
476
477         list_for_each (e1, &the_lnet.ln_remote_nets) {
478                 rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
479
480                 list_for_each (e2, &rnet->lrn_routes) {
481                         route = list_entry(e2, lnet_route_t, lr_list);
482
483                         if (idx-- == 0) {
484                                 *net     = rnet->lrn_net;
485                                 *hops    = rnet->lrn_hops;
486                                 *gateway = route->lr_gateway->lp_nid;
487                                 *alive   = route->lr_gateway->lp_alive;
488                                 LNET_UNLOCK();
489                                 return 0;
490                         }
491                 }
492         }
493
494         LNET_UNLOCK();
495         return -ENOENT;
496 }
497
498 void
499 lnet_wait_known_routerstate(void)
500 {
501         lnet_peer_t         *rtr;
502         struct list_head    *entry;
503         int                  all_known;
504
505         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
506
507         for (;;) {
508                 LNET_LOCK();
509
510                 all_known = 1;
511                 list_for_each (entry, &the_lnet.ln_routers) {
512                         rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
513
514                         if (rtr->lp_alive_count == 0) {
515                                 all_known = 0;
516                                 break;
517                         }
518                 }
519
520                 LNET_UNLOCK();
521
522                 if (all_known)
523                         return;
524
525 #ifndef __KERNEL__
526                 lnet_router_checker();
527 #endif
528                 cfs_pause(cfs_time_seconds(1));
529         }
530 }
531
532 static void
533 lnet_router_checker_event (lnet_event_t *event)
534 {
535         /* CAVEAT EMPTOR: I'm called with LNET_LOCKed and I'm not allowed to
536          * drop it (that's how come I see _every_ event, even ones that would
537          * overflow my EQ) */
538         lnet_peer_t   *lp;
539         lnet_nid_t     nid;
540
541         if (event->unlinked) {
542                 /* The router checker thread has unlinked the rc_md
543                  * and exited. */
544                 LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
545                 the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
546 #ifdef __KERNEL__
547                 mutex_up(&the_lnet.ln_rc_signal);
548 #endif
549                 return;
550         }
551
552         LASSERT (event->type == LNET_EVENT_SEND ||
553                  event->type == LNET_EVENT_REPLY);
554
555         nid = (event->type == LNET_EVENT_SEND) ?
556               event->target.nid : event->initiator.nid;
557
558         lp = lnet_find_peer_locked(nid);
559         if (lp == NULL) {
560                 /* router may have been removed */
561                 CDEBUG(D_NET, "Router %s not found\n", libcfs_nid2str(nid));
562                 return;
563         }
564
565         if (event->type == LNET_EVENT_SEND)     /* re-enable another ping */
566                 lp->lp_ping_notsent = 0;
567
568         if (lnet_isrouter(lp) &&                /* ignore if no longer a router */
569             (event->status != 0 ||
570              event->type == LNET_EVENT_REPLY)) {
571
572                 /* A successful REPLY means the router is up.  If _any_ comms
573                  * to the router fail I assume it's down (this will happen if
574                  * we ping alive routers to try to detect router death before
575                  * apps get burned). */
576
577                 lnet_notify_locked(lp, 1, (event->status == 0),
578                                    cfs_time_current_sec());
579
580                 /* The router checker will wake up very shortly and do the
581                  * actual notification.  
582                  * XXX If 'lp' stops being a router before then, it will still
583                  * have the notification pending!!! */
584         }
585
586         /* This decref will NOT drop LNET_LOCK (it had to have 1 ref when it
587          * was in the peer table and I've not dropped the lock, so no-one else
588          * can have reduced the refcount) */
589         LASSERT(lp->lp_refcount > 1);
590
591         lnet_peer_decref_locked(lp);
592 }
593
594 static int
595 lnet_router_check_interval (lnet_peer_t *rtr)
596 {
597         int secs;
598
599         secs = rtr->lp_alive ? live_router_check_interval :
600                                dead_router_check_interval;
601         if (secs < 0)
602                 secs = 0;
603
604         return secs;
605 }
606
607 static void
608 lnet_ping_router_locked (lnet_peer_t *rtr)
609 {
610         lnet_process_id_t id;
611         int               secs;
612         time_t            now = cfs_time_current_sec();
613
614         lnet_peer_addref_locked(rtr);
615
616         if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
617             now > rtr->lp_ping_deadline)
618                 lnet_notify_locked(rtr, 1, 0, now);
619
620         LNET_UNLOCK();
621
622         /* Run any outstanding notifications */
623         lnet_do_notify(rtr);
624
625         LNET_LOCK();
626
627         secs = lnet_router_check_interval(rtr);
628
629         CDEBUG(D_NET,
630                "rtr %s %d: deadline %lu ping_notsent %d alive %d "
631                "alive_count %d lp_ping_timestamp %lu\n",
632                libcfs_nid2str(rtr->lp_nid), secs,
633                rtr->lp_ping_deadline, rtr->lp_ping_notsent,
634                rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
635
636         if (secs != 0 && !rtr->lp_ping_notsent &&
637             now > rtr->lp_ping_timestamp + secs) {
638                 id.nid = rtr->lp_nid;
639                 id.pid = LUSTRE_SRV_LNET_PID;
640                 CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
641
642                 rtr->lp_ping_notsent   = 1;
643                 rtr->lp_ping_timestamp = now;
644
645                 if (rtr->lp_ping_deadline == 0)
646                         rtr->lp_ping_deadline = now + router_ping_timeout;
647
648                 LNET_UNLOCK();
649
650                 LNetGet(LNET_NID_ANY, the_lnet.ln_rc_mdh, id,
651                         LNET_RESERVED_PORTAL, LNET_PROTO_PING_MATCHBITS, 0);
652
653                 LNET_LOCK();
654         }
655
656         lnet_peer_decref_locked(rtr);
657         return;
658 }
659
660 int
661 lnet_router_checker_start(void)
662 {
663         static lnet_ping_info_t pinginfo;
664
665         lnet_md_t    md;
666         int          rc;
667         int          eqsz;
668 #ifndef __KERNEL__
669         lnet_peer_t *rtr;
670         __u64        version;
671         int          nrtr = 0;
672         int          router_checker_max_eqsize = 10240;
673
674         LASSERT (check_routers_before_use);
675         LASSERT (dead_router_check_interval > 0);
676
677         LNET_LOCK();
678
679         /* As an approximation, allow each router the same number of
680          * outstanding events as it is allowed outstanding sends */
681         eqsz = 0;
682         version = the_lnet.ln_routers_version;
683         list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
684                 lnet_ni_t         *ni = rtr->lp_ni;
685                 lnet_process_id_t  id;
686
687                 nrtr++;
688                 eqsz += ni->ni_peertxcredits;
689
690                 /* one async ping reply per router */
691                 id.nid = rtr->lp_nid;
692                 id.pid = LUSTRE_SRV_LNET_PID;
693
694                 LNET_UNLOCK();
695
696                 rc = LNetSetAsync(id, 1);
697                 if (rc != 0) {
698                         CWARN("LNetSetAsync %s failed: %d\n",
699                               libcfs_id2str(id), rc);
700                         return rc;
701                 }
702
703                 LNET_LOCK();
704                 /* NB router list doesn't change in userspace */
705                 LASSERT (version == the_lnet.ln_routers_version);
706         }
707
708         LNET_UNLOCK();
709
710         if (nrtr == 0) {
711                 CDEBUG(D_NET,
712                        "No router found, not starting router checker\n");
713                 return 0;
714         }
715
716         /* at least allow a SENT and a REPLY per router */
717         if (router_checker_max_eqsize < 2 * nrtr)
718                 router_checker_max_eqsize = 2 * nrtr;
719
720         LASSERT (eqsz > 0);
721         if (eqsz > router_checker_max_eqsize)
722                 eqsz = router_checker_max_eqsize;
723 #endif
724
725         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
726
727         if (check_routers_before_use &&
728             dead_router_check_interval <= 0) {
729                 LCONSOLE_ERROR_MSG(0x10a, "'dead_router_check_interval' must be"
730                                    " set if 'check_routers_before_use' is set"
731                                    "\n");
732                 return -EINVAL;
733         }
734
735         if (live_router_check_interval <= 0 &&
736             dead_router_check_interval <= 0)
737                 return 0;
738
739 #ifdef __KERNEL__
740         init_mutex_locked(&the_lnet.ln_rc_signal);
741         /* EQ size doesn't matter; the callback is guaranteed to get every
742          * event */
743         eqsz = 1;
744         rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
745                          &the_lnet.ln_rc_eqh);
746 #else
747         rc = LNetEQAlloc(eqsz, LNET_EQ_HANDLER_NONE,
748                          &the_lnet.ln_rc_eqh);
749 #endif
750         if (rc != 0) {
751                 CERROR("Can't allocate EQ(%d): %d\n", eqsz, rc);
752                 return -ENOMEM;
753         }
754
755         memset(&md, 0, sizeof(md));
756         md.start     = &pinginfo;
757         md.length    = sizeof(pinginfo);
758         md.options   = LNET_MD_TRUNCATE;
759         md.threshold = LNET_MD_THRESH_INF;
760         md.eq_handle = the_lnet.ln_rc_eqh;
761         rc = LNetMDBind(md, LNET_UNLINK, &the_lnet.ln_rc_mdh);
762         if (rc < 0) {
763                 CERROR("Can't bind MD: %d\n", rc);
764                 rc = LNetEQFree(the_lnet.ln_rc_eqh);
765                 LASSERT (rc == 0);
766                 return -ENOMEM;
767         }
768         LASSERT (rc == 0);
769
770         the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
771 #ifdef __KERNEL__
772         rc = (int)cfs_kernel_thread(lnet_router_checker, NULL, 0);
773         if (rc < 0) {
774                 CERROR("Can't start router checker thread: %d\n", rc);
775                 the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
776                 rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
777                 LASSERT (rc == 0);
778                 /* block until event callback signals exit */
779                 mutex_down(&the_lnet.ln_rc_signal);
780                 rc = LNetEQFree(the_lnet.ln_rc_eqh);
781                 LASSERT (rc == 0);
782                 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
783                 return -ENOMEM;
784         }
785 #endif
786
787         if (check_routers_before_use) {
788                 /* Note that a helpful side-effect of pinging all known routers
789                  * at startup is that it makes them drop stale connections they
790                  * may have to a previous instance of me. */
791                 lnet_wait_known_routerstate();
792         }
793
794         return 0;
795 }
796
797 void
798 lnet_router_checker_stop (void)
799 {
800         int rc;
801
802         if (the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN)
803                 return;
804
805         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
806         the_lnet.ln_rc_state = LNET_RC_STATE_STOPTHREAD;
807
808 #ifdef __KERNEL__
809         /* block until event callback signals exit */
810         mutex_down(&the_lnet.ln_rc_signal);
811 #else
812         while (the_lnet.ln_rc_state != LNET_RC_STATE_UNLINKED) {
813                 lnet_router_checker();
814                 cfs_pause(cfs_time_seconds(1));
815         }
816 #endif
817         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKED);
818
819         rc = LNetEQFree(the_lnet.ln_rc_eqh);
820         LASSERT (rc == 0);
821         the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
822         return;
823 }
824
825 #if defined(__KERNEL__) && defined(LNET_ROUTER)
826
827 static int
828 lnet_router_checker(void *arg)
829 {
830         int                rc;
831         lnet_peer_t       *rtr;
832         struct list_head  *entry;
833         lnet_process_id_t  rtr_id;
834
835         cfs_daemonize("router_checker");
836         cfs_block_allsigs();
837
838         rtr_id.pid = LUSTRE_SRV_LNET_PID;
839
840         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
841
842         while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
843                 __u64 version;
844
845                 LNET_LOCK();
846 rescan:
847                 version = the_lnet.ln_routers_version;
848
849                 list_for_each (entry, &the_lnet.ln_routers) {
850                         rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
851                         lnet_ping_router_locked(rtr);
852
853                         /* NB dropped lock */
854                         if (version != the_lnet.ln_routers_version) {
855                                 /* the routers list has changed */
856                                 goto rescan;
857                         }
858                 }
859
860                 LNET_UNLOCK();
861
862                 /* Call cfs_pause() here always adds 1 to load average 
863                  * because kernel counts # active tasks as nr_running 
864                  * + nr_uninterruptible. */
865                 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
866                                      cfs_time_seconds(1));
867         }
868
869         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_STOPTHREAD);
870         the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
871
872         rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
873         LASSERT (rc == 0);
874
875         /* The unlink event callback will signal final completion */
876         return 0;
877 }
878
879 void
880 lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
881 {
882         int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
883
884         while (--npages >= 0)
885                 cfs_free_page(rb->rb_kiov[npages].kiov_page);
886
887         LIBCFS_FREE(rb, sz);
888 }
889
890 lnet_rtrbuf_t *
891 lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp)
892 {
893         int            npages = rbp->rbp_npages;
894         int            sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
895         struct page   *page;
896         lnet_rtrbuf_t *rb;
897         int            i;
898
899         LIBCFS_ALLOC(rb, sz);
900         if (rb == NULL)
901                 return NULL;
902
903         rb->rb_pool = rbp;
904
905         for (i = 0; i < npages; i++) {
906                 page = cfs_alloc_page(CFS_ALLOC_ZERO | CFS_ALLOC_STD);
907                 if (page == NULL) {
908                         while (--i >= 0)
909                                 cfs_free_page(rb->rb_kiov[i].kiov_page);
910
911                         LIBCFS_FREE(rb, sz);
912                         return NULL;
913                 }
914
915                 rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
916                 rb->rb_kiov[i].kiov_offset = 0;
917                 rb->rb_kiov[i].kiov_page = page;
918         }
919
920         return rb;
921 }
922
923 void
924 lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
925 {
926         int            npages = rbp->rbp_npages;
927         int            nbuffers = 0;
928         lnet_rtrbuf_t *rb;
929
930         LASSERT (list_empty(&rbp->rbp_msgs));
931         LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
932
933         while (!list_empty(&rbp->rbp_bufs)) {
934                 LASSERT (rbp->rbp_credits > 0);
935
936                 rb = list_entry(rbp->rbp_bufs.next,
937                                 lnet_rtrbuf_t, rb_list);
938                 list_del(&rb->rb_list);
939                 lnet_destroy_rtrbuf(rb, npages);
940                 nbuffers++;
941         }
942
943         LASSERT (rbp->rbp_nbuffers == nbuffers);
944         LASSERT (rbp->rbp_credits == nbuffers);
945
946         rbp->rbp_nbuffers = rbp->rbp_credits = 0;
947 }
948
949 int
950 lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs)
951 {
952         lnet_rtrbuf_t *rb;
953         int            i;
954
955         if (rbp->rbp_nbuffers != 0) {
956                 LASSERT (rbp->rbp_nbuffers == nbufs);
957                 return 0;
958         }
959
960         for (i = 0; i < nbufs; i++) {
961                 rb = lnet_new_rtrbuf(rbp);
962
963                 if (rb == NULL) {
964                         CERROR("Failed to allocate %d router bufs of %d pages\n",
965                                nbufs, rbp->rbp_npages);
966                         return -ENOMEM;
967                 }
968
969                 rbp->rbp_nbuffers++;
970                 rbp->rbp_credits++;
971                 rbp->rbp_mincredits++;
972                 list_add(&rb->rb_list, &rbp->rbp_bufs);
973
974                 /* No allocation "under fire" */
975                 /* Otherwise we'd need code to schedule blocked msgs etc */
976                 LASSERT (!the_lnet.ln_routing);
977         }
978
979         LASSERT (rbp->rbp_credits == nbufs);
980         return 0;
981 }
982
983 void
984 lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
985 {
986         CFS_INIT_LIST_HEAD(&rbp->rbp_msgs);
987         CFS_INIT_LIST_HEAD(&rbp->rbp_bufs);
988
989         rbp->rbp_npages = npages;
990         rbp->rbp_credits = 0;
991         rbp->rbp_mincredits = 0;
992 }
993
994 void
995 lnet_free_rtrpools(void)
996 {
997         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[0]);
998         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[1]);
999         lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[2]);
1000 }
1001
1002 void
1003 lnet_init_rtrpools(void)
1004 {
1005         int small_pages = 1;
1006         int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
1007
1008         lnet_rtrpool_init(&the_lnet.ln_rtrpools[0], 0);
1009         lnet_rtrpool_init(&the_lnet.ln_rtrpools[1], small_pages);
1010         lnet_rtrpool_init(&the_lnet.ln_rtrpools[2], large_pages);
1011 }
1012
1013
1014 int
1015 lnet_alloc_rtrpools(int im_a_router)
1016 {
1017         int       rc;
1018
1019         if (!strcmp(forwarding, "")) {
1020                 /* not set either way */
1021                 if (!im_a_router)
1022                         return 0;
1023         } else if (!strcmp(forwarding, "disabled")) {
1024                 /* explicitly disabled */
1025                 return 0;
1026         } else if (!strcmp(forwarding, "enabled")) {
1027                 /* explicitly enabled */
1028         } else {
1029                 LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
1030                                    "'enabled' or 'disabled'\n");
1031                 return -EINVAL;
1032         }
1033
1034         if (tiny_router_buffers <= 0) {
1035                 LCONSOLE_ERROR_MSG(0x10c, "tiny_router_buffers=%d invalid when "
1036                                    "routing enabled\n", tiny_router_buffers);
1037                 rc = -EINVAL;
1038                 goto failed;
1039         }
1040
1041         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[0],
1042                                      tiny_router_buffers);
1043         if (rc != 0)
1044                 goto failed;
1045
1046         if (small_router_buffers <= 0) {
1047                 LCONSOLE_ERROR_MSG(0x10d, "small_router_buffers=%d invalid when"
1048                                    " routing enabled\n", small_router_buffers);
1049                 rc = -EINVAL;
1050                 goto failed;
1051         }
1052
1053         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[1],
1054                                      small_router_buffers);
1055         if (rc != 0)
1056                 goto failed;
1057
1058         if (large_router_buffers <= 0) {
1059                 LCONSOLE_ERROR_MSG(0x10e, "large_router_buffers=%d invalid when"
1060                                    " routing enabled\n", large_router_buffers);
1061                 rc = -EINVAL;
1062                 goto failed;
1063         }
1064
1065         rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[2],
1066                                      large_router_buffers);
1067         if (rc != 0)
1068                 goto failed;
1069
1070         LNET_LOCK();
1071         the_lnet.ln_routing = 1;
1072         LNET_UNLOCK();
1073
1074         return 0;
1075
1076  failed:
1077         lnet_free_rtrpools();
1078         return rc;
1079 }
1080
1081 int
1082 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
1083 {
1084         lnet_peer_t         *lp = NULL;
1085         time_t               now = cfs_time_current_sec();
1086
1087         LASSERT (!in_interrupt ());
1088
1089         CDEBUG (D_NET, "%s notifying %s: %s\n",
1090                 (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1091                 libcfs_nid2str(nid),
1092                 alive ? "up" : "down");
1093
1094         if (ni != NULL &&
1095             LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
1096                 CWARN ("Ignoring notification of %s %s by %s (different net)\n",
1097                         libcfs_nid2str(nid), alive ? "birth" : "death",
1098                         libcfs_nid2str(ni->ni_nid));
1099                 return -EINVAL;
1100         }
1101
1102         /* can't do predictions... */
1103         if (when > now) {
1104                 CWARN ("Ignoring prediction from %s of %s %s "
1105                        "%ld seconds in the future\n",
1106                        (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
1107                        libcfs_nid2str(nid), alive ? "up" : "down",
1108                        when - now);
1109                 return -EINVAL;
1110         }
1111
1112         if (ni != NULL && !alive &&             /* LND telling me she's down */
1113             !auto_down) {                       /* auto-down disabled */
1114                 CDEBUG(D_NET, "Auto-down disabled\n");
1115                 return 0;
1116         }
1117
1118         LNET_LOCK();
1119
1120         lp = lnet_find_peer_locked(nid);
1121         if (lp == NULL) {
1122                 /* nid not found */
1123                 LNET_UNLOCK();
1124                 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
1125                 return 0;
1126         }
1127
1128         /* We can't fully trust LND on reporting exact peer last_alive
1129          * if he notifies us about dead peer. For example ksocklnd can
1130          * call us with when == _time_when_the_node_was_booted_ if
1131          * no connections were successfully established */
1132         if (ni != NULL && !alive && when < lp->lp_last_alive)
1133                 when = lp->lp_last_alive;
1134
1135         lnet_notify_locked(lp, ni == NULL, alive, when);
1136
1137         LNET_UNLOCK();
1138
1139         lnet_do_notify(lp);
1140
1141         LNET_LOCK();
1142
1143         lnet_peer_decref_locked(lp);
1144
1145         LNET_UNLOCK();
1146         return 0;
1147 }
1148 EXPORT_SYMBOL(lnet_notify);
1149
1150 void
1151 lnet_get_tunables (void)
1152 {
1153         return;
1154 }
1155
1156 #else
1157
1158 int
1159 lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
1160 {
1161         return -EOPNOTSUPP;
1162 }
1163
1164 void
1165 lnet_router_checker (void)
1166 {
1167         static time_t last = 0;
1168         static int    running = 0;
1169
1170         time_t            now = cfs_time_current_sec();
1171         int               interval = now - last;
1172         int               rc;
1173         __u64             version;
1174         lnet_peer_t      *rtr;
1175
1176         /* It's no use to call me again within a sec - all intervals and
1177          * timeouts are measured in seconds */
1178         if (last != 0 && interval < 2)
1179                 return;
1180
1181         if (last != 0 &&
1182             interval > MAX(live_router_check_interval,
1183                            dead_router_check_interval))
1184                 CDEBUG(D_NETERROR, "Checker(%d/%d) not called for %d seconds\n",
1185                        live_router_check_interval, dead_router_check_interval,
1186                        interval);
1187
1188         LNET_LOCK();
1189         LASSERT (!running); /* recursion check */
1190         running = 1;
1191         LNET_UNLOCK();
1192
1193         last = now;
1194
1195         if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPTHREAD) {
1196                 the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
1197                 rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
1198                 LASSERT (rc == 0);
1199         }
1200
1201         /* consume all pending events */
1202         while (1) {
1203                 int          i;
1204                 lnet_event_t ev;
1205
1206                 /* NB ln_rc_eqh must be the 1st in 'eventqs' otherwise the
1207                  * recursion breaker in LNetEQPoll would fail */
1208                 rc = LNetEQPoll(&the_lnet.ln_rc_eqh, 1, 0, &ev, &i);
1209                 if (rc == 0)   /* no event pending */
1210                         break;
1211
1212                 /* NB a lost SENT prevents me from pinging a router again */
1213                 if (rc == -EOVERFLOW) {
1214                         CERROR("Dropped an event!!!\n");
1215                         abort();
1216                 }
1217
1218                 LASSERT (rc == 1);
1219
1220                 LNET_LOCK();
1221                 lnet_router_checker_event(&ev);
1222                 LNET_UNLOCK();
1223         }
1224
1225         if (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKED ||
1226             the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING) {
1227                 running = 0;
1228                 return;
1229         }
1230
1231         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
1232
1233         LNET_LOCK();
1234
1235         version = the_lnet.ln_routers_version;
1236         list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
1237                 lnet_ping_router_locked(rtr);
1238                 LASSERT (version == the_lnet.ln_routers_version);
1239         }
1240
1241         LNET_UNLOCK();
1242
1243         running = 0; /* lock only needed for the recursion check */
1244         return;
1245 }
1246
1247 /* NB lnet_peers_start_down depends on me,
1248  * so must be called before any peer creation */
1249 void
1250 lnet_get_tunables (void)
1251 {
1252         char *s;
1253
1254         s = getenv("LNET_ROUTER_PING_TIMEOUT");
1255         if (s != NULL) router_ping_timeout = atoi(s);
1256
1257         s = getenv("LNET_LIVE_ROUTER_CHECK_INTERVAL");
1258         if (s != NULL) live_router_check_interval = atoi(s);
1259
1260         s = getenv("LNET_DEAD_ROUTER_CHECK_INTERVAL");
1261         if (s != NULL) dead_router_check_interval = atoi(s);
1262
1263         /* This replaces old lnd_notify mechanism */
1264         check_routers_before_use = 1;
1265         if (dead_router_check_interval <= 0)
1266                 dead_router_check_interval = 30;
1267 }
1268
1269 void
1270 lnet_free_rtrpools (void)
1271 {
1272 }
1273
1274 void
1275 lnet_init_rtrpools (void)
1276 {
1277 }
1278
1279 int
1280 lnet_alloc_rtrpools (int im_a_arouter)
1281 {
1282         return 0;
1283 }
1284
1285 #endif