Whamcloud - gitweb
LU-5485 lnet: peer aliveness status and NI status
[fs/lustre-release.git] / lnet / lnet / router.c
index 9714c21..091cfd9 100644 (file)
@@ -1,7 +1,7 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  *
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2011, 2013, Intel Corporation.
  *
  *   This file is part of Portals
  *   http://sourceforge.net/projects/sandiaportals/
 
 #if defined(__KERNEL__) && defined(LNET_ROUTER)
 
+#define LNET_NRB_TINY_MIN      512     /* min value for each CPT */
+#define LNET_NRB_TINY          (LNET_NRB_TINY_MIN * 4)
+#define LNET_NRB_SMALL_MIN     4096    /* min value for each CPT */
+#define LNET_NRB_SMALL         (LNET_NRB_SMALL_MIN * 4)
+#define LNET_NRB_SMALL_PAGES   1
+#define LNET_NRB_LARGE_MIN     256     /* min value for each CPT */
+#define LNET_NRB_LARGE         (LNET_NRB_LARGE_MIN * 4)
+#define LNET_NRB_LARGE_PAGES   ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \
+                                 PAGE_CACHE_SHIFT)
+
 static char *forwarding = "";
 CFS_MODULE_PARM(forwarding, "s", charp, 0444,
                 "Explicitly enable/disable forwarding between networks");
 
-static int tiny_router_buffers = 1024;
+static int tiny_router_buffers;
 CFS_MODULE_PARM(tiny_router_buffers, "i", int, 0444,
-                "# of 0 payload messages to buffer in the router");
-static int small_router_buffers = 8192;
+               "# of 0 payload messages to buffer in the router");
+static int small_router_buffers;
 CFS_MODULE_PARM(small_router_buffers, "i", int, 0444,
-                "# of small (1 page) messages to buffer in the router");
-static int large_router_buffers = 512;
+               "# of small (1 page) messages to buffer in the router");
+static int large_router_buffers;
 CFS_MODULE_PARM(large_router_buffers, "i", int, 0444,
-                "# of large messages to buffer in the router");
+               "# of large messages to buffer in the router");
 static int peer_buffer_credits = 0;
 CFS_MODULE_PARM(peer_buffer_credits, "i", int, 0444,
                 "# router buffer credits per peer");
@@ -75,19 +85,23 @@ lnet_peer_buffer_credits(lnet_ni_t *ni)
 
 static int check_routers_before_use = 0;
 CFS_MODULE_PARM(check_routers_before_use, "i", int, 0444,
-                "Assume routers are down and ping them before use");
+               "Assume routers are down and ping them before use");
+
+int avoid_asym_router_failure = 1;
+CFS_MODULE_PARM(avoid_asym_router_failure, "i", int, 0644,
+               "Avoid asymmetrical router failures (0 to disable)");
 
-static int dead_router_check_interval = 0;
-CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0444,
-                "Seconds between dead router health checks (<= 0 to disable)");
+static int dead_router_check_interval = 60;
+CFS_MODULE_PARM(dead_router_check_interval, "i", int, 0644,
+               "Seconds between dead router health checks (<= 0 to disable)");
 
-static int live_router_check_interval = 0;
-CFS_MODULE_PARM(live_router_check_interval, "i", int, 0444,
-                "Seconds between live router health checks (<= 0 to disable)");
+static int live_router_check_interval = 60;
+CFS_MODULE_PARM(live_router_check_interval, "i", int, 0644,
+               "Seconds between live router health checks (<= 0 to disable)");
 
 static int router_ping_timeout = 50;
-CFS_MODULE_PARM(router_ping_timeout, "i", int, 0444,
-                "Seconds to wait for the reply to a router health query");
+CFS_MODULE_PARM(router_ping_timeout, "i", int, 0644,
+               "Seconds to wait for the reply to a router health query");
 
 int
 lnet_peers_start_down(void)
@@ -96,9 +110,9 @@ lnet_peers_start_down(void)
 }
 
 void
-lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, time_t when)
+lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, cfs_time_t when)
 {
-        if (when < lp->lp_timestamp) {          /* out of date information */
+        if (cfs_time_before(when, lp->lp_timestamp)) { /* out of date information */
                 CDEBUG(D_NET, "Out of date\n");
                 return;
         }
@@ -118,27 +132,24 @@ lnet_notify_locked(lnet_peer_t *lp, int notifylnd, int alive, time_t when)
         lp->lp_alive = !(!alive);               /* 1 bit! */
         lp->lp_notify = 1;
         lp->lp_notifylnd |= notifylnd;
+       if (lp->lp_alive)
+               lp->lp_ping_feats = LNET_PING_FEAT_INVAL; /* reset */
 
-        CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
+       CDEBUG(D_NET, "set %s %d\n", libcfs_nid2str(lp->lp_nid), alive);
 }
 
 void
-lnet_do_notify (lnet_peer_t *lp)
+lnet_ni_notify_locked(lnet_ni_t *ni, lnet_peer_t *lp)
 {
-        lnet_ni_t *ni = lp->lp_ni;
         int        alive;
         int        notifylnd;
 
-        LNET_LOCK();
-
         /* Notify only in 1 thread at any time to ensure ordered notification.
          * NB individual events can be missed; the only guarantee is that you
          * always get the most recent news */
 
-        if (lp->lp_notifying) {
-                LNET_UNLOCK();
+       if (lp->lp_notifying)
                 return;
-        }
 
         lp->lp_notifying = 1;
 
@@ -150,311 +161,390 @@ lnet_do_notify (lnet_peer_t *lp)
                 lp->lp_notify    = 0;
 
                 if (notifylnd && ni->ni_lnd->lnd_notify != NULL) {
-                        LNET_UNLOCK();
+                       lnet_net_unlock(lp->lp_cpt);
 
-                        /* A new notification could happen now; I'll handle it
-                         * when control returns to me */
+                       /* A new notification could happen now; I'll handle it
+                        * when control returns to me */
 
-                        (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
+                       (ni->ni_lnd->lnd_notify)(ni, lp->lp_nid, alive);
 
-                        LNET_LOCK();
-                }
-        }
+                       lnet_net_lock(lp->lp_cpt);
+               }
+       }
 
-        lp->lp_notifying = 0;
-
-        LNET_UNLOCK();
+       lp->lp_notifying = 0;
 }
 
 
 static void
 lnet_rtr_addref_locked(lnet_peer_t *lp)
 {
-        LASSERT (lp->lp_refcount > 0);
-        LASSERT (lp->lp_rtr_refcount >= 0);
-
-        lp->lp_rtr_refcount++;
-        if (lp->lp_rtr_refcount == 1) {
-                struct list_head *pos;
-
-                /* a simple insertion sort */
-                list_for_each_prev(pos, &the_lnet.ln_routers) {
-                        lnet_peer_t *rtr = list_entry(pos, lnet_peer_t, 
-                                                      lp_rtr_list);
-
-                        if (rtr->lp_nid < lp->lp_nid)
-                                break;
-                }
-
-                list_add(&lp->lp_rtr_list, pos);
-                /* addref for the_lnet.ln_routers */
-                lnet_peer_addref_locked(lp);
-                the_lnet.ln_routers_version++;
-        }
+       LASSERT(lp->lp_refcount > 0);
+       LASSERT(lp->lp_rtr_refcount >= 0);
+
+       /* lnet_net_lock must be exclusively locked */
+       lp->lp_rtr_refcount++;
+       if (lp->lp_rtr_refcount == 1) {
+               struct list_head *pos;
+
+               /* a simple insertion sort */
+               list_for_each_prev(pos, &the_lnet.ln_routers) {
+                       lnet_peer_t *rtr = list_entry(pos, lnet_peer_t,
+                                                     lp_rtr_list);
+
+                       if (rtr->lp_nid < lp->lp_nid)
+                               break;
+               }
+
+               list_add(&lp->lp_rtr_list, pos);
+               /* addref for the_lnet.ln_routers */
+               lnet_peer_addref_locked(lp);
+               the_lnet.ln_routers_version++;
+       }
 }
 
 static void
 lnet_rtr_decref_locked(lnet_peer_t *lp)
 {
-        LASSERT (lp->lp_refcount > 0);
-        LASSERT (lp->lp_rtr_refcount > 0);
-
-        lp->lp_rtr_refcount--;
-        if (lp->lp_rtr_refcount == 0) {
-                list_del(&lp->lp_rtr_list);
-                /* decref for the_lnet.ln_routers */
-                lnet_peer_decref_locked(lp);
-                the_lnet.ln_routers_version++;
-        }
+       LASSERT(lp->lp_refcount > 0);
+       LASSERT(lp->lp_rtr_refcount > 0);
+
+       /* lnet_net_lock must be exclusively locked */
+       lp->lp_rtr_refcount--;
+       if (lp->lp_rtr_refcount == 0) {
+               LASSERT(list_empty(&lp->lp_routes));
+
+               if (lp->lp_rcd != NULL) {
+                       list_add(&lp->lp_rcd->rcd_list,
+                                &the_lnet.ln_rcd_deathrow);
+                       lp->lp_rcd = NULL;
+               }
+
+               list_del(&lp->lp_rtr_list);
+               /* decref for the_lnet.ln_routers */
+               lnet_peer_decref_locked(lp);
+               the_lnet.ln_routers_version++;
+       }
 }
 
 lnet_remotenet_t *
 lnet_find_net_locked (__u32 net)
 {
-        lnet_remotenet_t *rnet;
-        struct list_head *tmp;
+       lnet_remotenet_t *rnet;
+       struct list_head *tmp;
+       struct list_head *rn_list;
 
-        LASSERT (!the_lnet.ln_shutdown);
+       LASSERT(!the_lnet.ln_shutdown);
 
-        list_for_each (tmp, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
+       rn_list = lnet_net2rnethash(net);
+       list_for_each(tmp, rn_list) {
+               rnet = list_entry(tmp, lnet_remotenet_t, lrn_list);
 
-                if (rnet->lrn_net == net)
-                        return rnet;
-        }
-        return NULL;
+               if (rnet->lrn_net == net)
+                       return rnet;
+       }
+       return NULL;
 }
 
-int
-lnet_add_route (__u32 net, unsigned int hops, lnet_nid_t gateway)
+static void lnet_shuffle_seed(void)
 {
-        struct list_head     zombies;
-        struct list_head    *e;
-        lnet_remotenet_t    *rnet;
-        lnet_remotenet_t    *rnet2;
-        lnet_route_t        *route;
-        lnet_route_t        *route2;
-        lnet_ni_t           *ni;
-        int                  add_route;
-        int                  rc;
-
-        CDEBUG(D_NET, "Add route: net %s hops %u gw %s\n",
-               libcfs_net2str(net), hops, libcfs_nid2str(gateway));
-
-        if (gateway == LNET_NID_ANY ||
-            LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
-            net == LNET_NIDNET(LNET_NID_ANY) ||
-            LNET_NETTYP(net) == LOLND ||
-            LNET_NIDNET(gateway) == net ||
-            hops < 1 || hops > 255)
-                return (-EINVAL);
-
-        if (lnet_islocalnet(net))               /* it's a local network */
-                return 0;                       /* ignore the route entry */
-
-        /* Assume net, route, all new */
-        LIBCFS_ALLOC(route, sizeof(*route));
-        LIBCFS_ALLOC(rnet, sizeof(*rnet));
-        if (route == NULL || rnet == NULL) {
-                CERROR("Out of memory creating route %s %d %s\n",
-                       libcfs_net2str(net), hops, libcfs_nid2str(gateway));
-                if (route != NULL)
-                        LIBCFS_FREE(route, sizeof(*route));
-                if (rnet != NULL)
-                        LIBCFS_FREE(rnet, sizeof(*rnet));
-                return -ENOMEM;
-        }
-
-        CFS_INIT_LIST_HEAD(&rnet->lrn_routes);
-        rnet->lrn_net = net;
-        rnet->lrn_hops = hops;
-
-        LNET_LOCK();
-
-        rc = lnet_nid2peer_locked(&route->lr_gateway, gateway);
-        if (rc != 0) {
-                LNET_UNLOCK();
-
-                LIBCFS_FREE(route, sizeof(*route));
-                LIBCFS_FREE(rnet, sizeof(*rnet));
-
-                if (rc == -EHOSTUNREACH)        /* gateway is not on a local net */
-                        return 0;               /* ignore the route entry */
-
-                CERROR("Error %d creating route %s %d %s\n", rc,
-                       libcfs_net2str(net), hops, libcfs_nid2str(gateway));
-                return rc;
-        }
-
-        LASSERT (!the_lnet.ln_shutdown);
-        CFS_INIT_LIST_HEAD(&zombies);
-
-        rnet2 = lnet_find_net_locked(net);
-        if (rnet2 == NULL) {
-                /* new network */
-                list_add_tail(&rnet->lrn_list, &the_lnet.ln_remote_nets);
-                rnet2 = rnet;
-        }
+        static int seeded = 0;
+        int lnd_type, seed[2];
+        struct timeval tv;
+        lnet_ni_t *ni;
+       struct list_head *tmp;
 
-        if (hops > rnet2->lrn_hops) {
-                /* New route is longer; ignore it */
-                add_route = 0;
-        } else if (hops < rnet2->lrn_hops) {
-                /* new route supercedes all currently known routes to this
-                 * net */
-                list_add(&zombies, &rnet2->lrn_routes);
-                list_del_init(&rnet2->lrn_routes);
-                add_route = 1;
-        } else {
-                add_route = 1;
-                /* New route has the same hopcount as existing routes; search
-                 * for a duplicate route (it's a NOOP if it is) */
-                list_for_each (e, &rnet2->lrn_routes) {
-                        route2 = list_entry(e, lnet_route_t, lr_list);
-
-                        if (route2->lr_gateway == route->lr_gateway) {
-                                add_route = 0;
-                                break;
-                        }
-
-                        /* our loopups must be true */
-                        LASSERT (route2->lr_gateway->lp_nid != gateway);
-                }
-        }
-
-        if (add_route) {
-                ni = route->lr_gateway->lp_ni;
-                lnet_ni_addref_locked(ni);
-
-                LASSERT (rc == 0);
-                list_add_tail(&route->lr_list, &rnet2->lrn_routes);
-                the_lnet.ln_remote_nets_version++;
-
-                lnet_rtr_addref_locked(route->lr_gateway);
-
-                LNET_UNLOCK();
-
-                /* XXX Assume alive */
-                if (ni->ni_lnd->lnd_notify != NULL)
-                        (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
+        if (seeded)
+                return;
 
-                lnet_ni_decref(ni);
-        } else {
-                lnet_peer_decref_locked(route->lr_gateway);
-                LNET_UNLOCK();
-                LIBCFS_FREE(route, sizeof(*route));
-        }
+        cfs_get_random_bytes(seed, sizeof(seed));
 
-        if (rnet != rnet2)
-                LIBCFS_FREE(rnet, sizeof(*rnet));
+       /* Nodes with small feet have little entropy
+        * the NID for this node gives the most entropy in the low bits */
+       list_for_each(tmp, &the_lnet.ln_nis) {
+               ni = list_entry(tmp, lnet_ni_t, ni_list);
+               lnd_type = LNET_NETTYP(LNET_NIDNET(ni->ni_nid));
 
-        while (!list_empty(&zombies)) {
-                route = list_entry(zombies.next, lnet_route_t, lr_list);
-                list_del(&route->lr_list);
+               if (lnd_type != LOLND)
+                       seed[0] ^= (LNET_NIDADDR(ni->ni_nid) | lnd_type);
+       }
 
-                LNET_LOCK();
-                lnet_rtr_decref_locked(route->lr_gateway);
-                lnet_peer_decref_locked(route->lr_gateway);
-                LNET_UNLOCK();
-                LIBCFS_FREE(route, sizeof(*route));
-        }
+       do_gettimeofday(&tv);
+       cfs_srand(tv.tv_sec ^ seed[0], tv.tv_usec ^ seed[1]);
+       seeded = 1;
+       return;
+}
 
-        return rc;
+/* NB expects LNET_LOCK held */
+void
+lnet_add_route_to_rnet (lnet_remotenet_t *rnet, lnet_route_t *route)
+{
+       unsigned int      len = 0;
+       unsigned int      offset = 0;
+       struct list_head *e;
+
+       lnet_shuffle_seed();
+
+       list_for_each(e, &rnet->lrn_routes) {
+               len++;
+       }
+
+       /* len+1 positions to add a new entry, also prevents division by 0 */
+       offset = cfs_rand() % (len + 1);
+       list_for_each(e, &rnet->lrn_routes) {
+               if (offset == 0)
+                       break;
+               offset--;
+       }
+       list_add(&route->lr_list, e);
+       list_add(&route->lr_gwlist, &route->lr_gateway->lp_routes);
+
+       the_lnet.ln_remote_nets_version++;
+       lnet_rtr_addref_locked(route->lr_gateway);
 }
 
 int
-lnet_check_routes (void)
+lnet_add_route(__u32 net, unsigned int hops, lnet_nid_t gateway,
+              unsigned int priority)
 {
-        lnet_remotenet_t    *rnet;
-        lnet_route_t        *route;
-        lnet_route_t        *route2;
-        struct list_head    *e1;
-        struct list_head    *e2;
-
-        LNET_LOCK();
-
-        list_for_each (e1, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
-
-                route2 = NULL;
-                list_for_each (e2, &rnet->lrn_routes) {
-                        route = list_entry(e2, lnet_route_t, lr_list);
-
-                        if (route2 == NULL)
-                                route2 = route;
-                        else if (route->lr_gateway->lp_ni !=
-                                 route2->lr_gateway->lp_ni) {
-                                LNET_UNLOCK();
-
-                                CERROR("Routes to %s via %s and %s not supported\n",
-                                       libcfs_net2str(rnet->lrn_net),
-                                       libcfs_nid2str(route->lr_gateway->lp_nid),
-                                       libcfs_nid2str(route2->lr_gateway->lp_nid));
-                                return -EINVAL;
-                        }
-                }
-        }
-
-        LNET_UNLOCK();
-        return 0;
+       struct list_head        *e;
+       lnet_remotenet_t        *rnet;
+       lnet_remotenet_t        *rnet2;
+       lnet_route_t            *route;
+       lnet_ni_t               *ni;
+       int                     add_route;
+       int                     rc;
+
+       CDEBUG(D_NET, "Add route: net %s hops %u priority %u gw %s\n",
+              libcfs_net2str(net), hops, priority, libcfs_nid2str(gateway));
+
+       if (gateway == LNET_NID_ANY ||
+           LNET_NETTYP(LNET_NIDNET(gateway)) == LOLND ||
+           net == LNET_NIDNET(LNET_NID_ANY) ||
+           LNET_NETTYP(net) == LOLND ||
+           LNET_NIDNET(gateway) == net ||
+           hops < 1 || hops > 255)
+               return -EINVAL;
+
+       if (lnet_islocalnet(net))       /* it's a local network */
+               return 0;               /* ignore the route entry */
+
+       /* Assume net, route, all new */
+       LIBCFS_ALLOC(route, sizeof(*route));
+       LIBCFS_ALLOC(rnet, sizeof(*rnet));
+       if (route == NULL || rnet == NULL) {
+               CERROR("Out of memory creating route %s %d %s\n",
+                      libcfs_net2str(net), hops, libcfs_nid2str(gateway));
+               if (route != NULL)
+                       LIBCFS_FREE(route, sizeof(*route));
+               if (rnet != NULL)
+                       LIBCFS_FREE(rnet, sizeof(*rnet));
+               return -ENOMEM;
+       }
+
+       INIT_LIST_HEAD(&rnet->lrn_routes);
+       rnet->lrn_net = net;
+       route->lr_hops = hops;
+       route->lr_net = net;
+       route->lr_priority = priority;
+
+       lnet_net_lock(LNET_LOCK_EX);
+
+       rc = lnet_nid2peer_locked(&route->lr_gateway, gateway, LNET_LOCK_EX);
+       if (rc != 0) {
+               lnet_net_unlock(LNET_LOCK_EX);
+
+               LIBCFS_FREE(route, sizeof(*route));
+               LIBCFS_FREE(rnet, sizeof(*rnet));
+
+               if (rc == -EHOSTUNREACH) /* gateway is not on a local net. */
+                       return 0;        /* ignore the route entry */
+               CERROR("Error %d creating route %s %d %s\n", rc,
+                       libcfs_net2str(net), hops,
+                       libcfs_nid2str(gateway));
+               return rc;
+       }
+
+       LASSERT(!the_lnet.ln_shutdown);
+
+       rnet2 = lnet_find_net_locked(net);
+       if (rnet2 == NULL) {
+               /* new network */
+               list_add_tail(&rnet->lrn_list, lnet_net2rnethash(net));
+               rnet2 = rnet;
+       }
+
+       /* Search for a duplicate route (it's a NOOP if it is) */
+       add_route = 1;
+       list_for_each(e, &rnet2->lrn_routes) {
+               lnet_route_t *route2 = list_entry(e, lnet_route_t, lr_list);
+
+               if (route2->lr_gateway == route->lr_gateway) {
+                       add_route = 0;
+                       break;
+               }
+
+               /* our lookups must be true */
+               LASSERT(route2->lr_gateway->lp_nid != gateway);
+       }
+
+       if (add_route) {
+               lnet_peer_addref_locked(route->lr_gateway); /* +1 for notify */
+               lnet_add_route_to_rnet(rnet2, route);
+
+               ni = route->lr_gateway->lp_ni;
+               lnet_net_unlock(LNET_LOCK_EX);
+
+               /* XXX Assume alive */
+               if (ni->ni_lnd->lnd_notify != NULL)
+                       (ni->ni_lnd->lnd_notify)(ni, gateway, 1);
+
+               lnet_net_lock(LNET_LOCK_EX);
+       }
+
+       /* -1 for notify or !add_route */
+       lnet_peer_decref_locked(route->lr_gateway);
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       if (!add_route)
+               LIBCFS_FREE(route, sizeof(*route));
+
+       if (rnet != rnet2)
+               LIBCFS_FREE(rnet, sizeof(*rnet));
+
+       return 0;
 }
 
 int
-lnet_del_route (__u32 net, lnet_nid_t gw_nid)
+lnet_check_routes(void)
 {
-        lnet_remotenet_t    *rnet;
-        lnet_route_t        *route;
-        struct list_head    *e1;
-        struct list_head    *e2;
-        int                  rc = -ENOENT;
-
-        CDEBUG(D_NET, "Del route: net %s : gw %s\n",
-               libcfs_net2str(net), libcfs_nid2str(gw_nid));
-
-        /* NB Caller may specify either all routes via the given gateway
-         * or a specific route entry actual NIDs) */
-
- again:
-        LNET_LOCK();
-
-        list_for_each (e1, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
-
-                if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
-                      net == rnet->lrn_net))
-                        continue;
-
-                list_for_each (e2, &rnet->lrn_routes) {
-                        route = list_entry(e2, lnet_route_t, lr_list);
-
-                        if (!(gw_nid == LNET_NID_ANY ||
-                              gw_nid == route->lr_gateway->lp_nid))
-                                continue;
-
-                        list_del(&route->lr_list);
-                        the_lnet.ln_remote_nets_version++;
-
-                        if (list_empty(&rnet->lrn_routes))
-                                list_del(&rnet->lrn_list);
-                        else
-                                rnet = NULL;
-
-                        lnet_rtr_decref_locked(route->lr_gateway);
-                        lnet_peer_decref_locked(route->lr_gateway);
-                        LNET_UNLOCK();
-
-                        LIBCFS_FREE(route, sizeof (*route));
-
-                        if (rnet != NULL)
-                                LIBCFS_FREE(rnet, sizeof(*rnet));
-
-                        rc = 0;
-                        goto again;
-                }
-        }
+       lnet_remotenet_t *rnet;
+       lnet_route_t     *route;
+       lnet_route_t     *route2;
+       struct list_head *e1;
+       struct list_head *e2;
+       int               cpt;
+       struct list_head *rn_list;
+       int               i;
+
+       cpt = lnet_net_lock_current();
+
+       for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
+               rn_list = &the_lnet.ln_remote_nets_hash[i];
+               list_for_each(e1, rn_list) {
+                       rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+
+                       route2 = NULL;
+                       list_for_each(e2, &rnet->lrn_routes) {
+                               lnet_nid_t      nid1;
+                               lnet_nid_t      nid2;
+                               int             net;
+
+                               route = list_entry(e2, lnet_route_t,
+                                                  lr_list);
+
+                               if (route2 == NULL) {
+                                       route2 = route;
+                                       continue;
+                               }
+
+                               if (route->lr_gateway->lp_ni ==
+                                   route2->lr_gateway->lp_ni)
+                                       continue;
+
+                               nid1 = route->lr_gateway->lp_nid;
+                               nid2 = route2->lr_gateway->lp_nid;
+                               net = rnet->lrn_net;
+
+                               lnet_net_unlock(cpt);
+
+                               CERROR("Routes to %s via %s and %s not "
+                                      "supported\n",
+                                      libcfs_net2str(net),
+                                      libcfs_nid2str(nid1),
+                                      libcfs_nid2str(nid2));
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       lnet_net_unlock(cpt);
+       return 0;
+}
 
-        LNET_UNLOCK();
-        return rc;
+int
+lnet_del_route(__u32 net, lnet_nid_t gw_nid)
+{
+       struct lnet_peer        *gateway;
+       lnet_remotenet_t        *rnet;
+       lnet_route_t            *route;
+       struct list_head        *e1;
+       struct list_head        *e2;
+       int                     rc = -ENOENT;
+       struct list_head        *rn_list;
+       int                     idx = 0;
+
+       CDEBUG(D_NET, "Del route: net %s : gw %s\n",
+              libcfs_net2str(net), libcfs_nid2str(gw_nid));
+
+       /* NB Caller may specify either all routes via the given gateway
+        * or a specific route entry actual NIDs) */
+
+       lnet_net_lock(LNET_LOCK_EX);
+       if (net == LNET_NIDNET(LNET_NID_ANY))
+               rn_list = &the_lnet.ln_remote_nets_hash[0];
+       else
+               rn_list = lnet_net2rnethash(net);
+
+again:
+       list_for_each(e1, rn_list) {
+               rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+
+               if (!(net == LNET_NIDNET(LNET_NID_ANY) ||
+                       net == rnet->lrn_net))
+                       continue;
+
+               list_for_each(e2, &rnet->lrn_routes) {
+                       route = list_entry(e2, lnet_route_t, lr_list);
+
+                       gateway = route->lr_gateway;
+                       if (!(gw_nid == LNET_NID_ANY ||
+                             gw_nid == gateway->lp_nid))
+                               continue;
+
+                       list_del(&route->lr_list);
+                       list_del(&route->lr_gwlist);
+                       the_lnet.ln_remote_nets_version++;
+
+                       if (list_empty(&rnet->lrn_routes))
+                               list_del(&rnet->lrn_list);
+                       else
+                               rnet = NULL;
+
+                       lnet_rtr_decref_locked(gateway);
+                       lnet_peer_decref_locked(gateway);
+
+                       lnet_net_unlock(LNET_LOCK_EX);
+
+                       LIBCFS_FREE(route, sizeof(*route));
+
+                       if (rnet != NULL)
+                               LIBCFS_FREE(rnet, sizeof(*rnet));
+
+                       rc = 0;
+                       lnet_net_lock(LNET_LOCK_EX);
+                       goto again;
+               }
+       }
+
+       if (net == LNET_NIDNET(LNET_NID_ANY) &&
+           ++idx < LNET_REMOTE_NETS_HASH_SIZE) {
+               rn_list = &the_lnet.ln_remote_nets_hash[idx];
+               goto again;
+       }
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       return rc;
 }
 
 void
@@ -463,63 +553,266 @@ lnet_destroy_routes (void)
         lnet_del_route(LNET_NIDNET(LNET_NID_ANY), LNET_NID_ANY);
 }
 
+int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg)
+{
+       int i, rc = -ENOENT, lidx, j;
+
+       if (the_lnet.ln_rtrpools == NULL)
+               return rc;
+
+       for (i = 0; i < LNET_NRBPOOLS; i++) {
+               lnet_rtrbufpool_t *rbp;
+
+               lnet_net_lock(LNET_LOCK_EX);
+               lidx = idx;
+               cfs_percpt_for_each(rbp, j, the_lnet.ln_rtrpools) {
+                       if (lidx-- == 0) {
+                               rc = 0;
+                               pool_cfg->pl_pools[i].pl_npages =
+                                       rbp[i].rbp_npages;
+                               pool_cfg->pl_pools[i].pl_nbuffers =
+                                       rbp[i].rbp_nbuffers;
+                               pool_cfg->pl_pools[i].pl_credits =
+                                       rbp[i].rbp_credits;
+                               pool_cfg->pl_pools[i].pl_mincredits =
+                                       rbp[i].rbp_mincredits;
+                               break;
+                       }
+               }
+               lnet_net_unlock(LNET_LOCK_EX);
+       }
+
+       lnet_net_lock(LNET_LOCK_EX);
+       pool_cfg->pl_routing = the_lnet.ln_routing;
+       lnet_net_unlock(LNET_LOCK_EX);
+
+       return rc;
+}
+
 int
-lnet_get_route (int idx, __u32 *net, __u32 *hops,
-               lnet_nid_t *gateway, __u32 *alive)
+lnet_get_route(int idx, __u32 *net, __u32 *hops,
+              lnet_nid_t *gateway, __u32 *alive, __u32 *priority)
 {
-        struct list_head    *e1;
-        struct list_head    *e2;
-        lnet_remotenet_t    *rnet;
-        lnet_route_t        *route;
-
-        LNET_LOCK();
-
-        list_for_each (e1, &the_lnet.ln_remote_nets) {
-                rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
-
-                list_for_each (e2, &rnet->lrn_routes) {
-                        route = list_entry(e2, lnet_route_t, lr_list);
-
-                        if (idx-- == 0) {
-                                *net     = rnet->lrn_net;
-                                *hops    = rnet->lrn_hops;
-                                *gateway = route->lr_gateway->lp_nid;
-                                *alive   = route->lr_gateway->lp_alive;
-                                LNET_UNLOCK();
-                                return 0;
-                        }
-                }
-        }
+       struct list_head *e1;
+       struct list_head *e2;
+       lnet_remotenet_t *rnet;
+       lnet_route_t     *route;
+       int               cpt;
+       int               i;
+       struct list_head *rn_list;
+
+       cpt = lnet_net_lock_current();
+
+       for (i = 0; i < LNET_REMOTE_NETS_HASH_SIZE; i++) {
+               rn_list = &the_lnet.ln_remote_nets_hash[i];
+               list_for_each(e1, rn_list) {
+                       rnet = list_entry(e1, lnet_remotenet_t, lrn_list);
+
+                       list_for_each(e2, &rnet->lrn_routes) {
+                               route = list_entry(e2, lnet_route_t,
+                                                  lr_list);
+
+                               if (idx-- == 0) {
+                                       *net      = rnet->lrn_net;
+                                       *hops     = route->lr_hops;
+                                       *priority = route->lr_priority;
+                                       *gateway  = route->lr_gateway->lp_nid;
+                                       *alive    =
+                                               route->lr_gateway->lp_alive &&
+                                                       !route->lr_downis;
+                                       lnet_net_unlock(cpt);
+                                       return 0;
+                               }
+                       }
+               }
+       }
+
+       lnet_net_unlock(cpt);
+       return -ENOENT;
+}
+
+void
+lnet_swap_pinginfo(lnet_ping_info_t *info)
+{
+       int               i;
+       lnet_ni_status_t *stat;
+
+       __swab32s(&info->pi_magic);
+       __swab32s(&info->pi_features);
+       __swab32s(&info->pi_pid);
+       __swab32s(&info->pi_nnis);
+       for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
+               stat = &info->pi_ni[i];
+               __swab64s(&stat->ns_nid);
+               __swab32s(&stat->ns_status);
+       }
+       return;
+}
+
+/**
+ * parse router-checker pinginfo, record number of down NIs for remote
+ * networks on that router.
+ */
+static void
+lnet_parse_rc_info(lnet_rc_data_t *rcd)
+{
+       lnet_ping_info_t        *info = rcd->rcd_pinginfo;
+       struct lnet_peer        *gw   = rcd->rcd_gateway;
+       lnet_route_t            *rte;
+
+       if (!gw->lp_alive)
+               return;
+
+       if (info->pi_magic == __swab32(LNET_PROTO_PING_MAGIC))
+               lnet_swap_pinginfo(info);
+
+       /* NB always racing with network! */
+       if (info->pi_magic != LNET_PROTO_PING_MAGIC) {
+               CDEBUG(D_NET, "%s: Unexpected magic %08x\n",
+                      libcfs_nid2str(gw->lp_nid), info->pi_magic);
+               gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
+               return;
+       }
+
+       gw->lp_ping_feats = info->pi_features;
+       if ((gw->lp_ping_feats & LNET_PING_FEAT_MASK) == 0) {
+               CDEBUG(D_NET, "%s: Unexpected features 0x%x\n",
+                      libcfs_nid2str(gw->lp_nid), gw->lp_ping_feats);
+               return; /* nothing I can understand */
+       }
+
+       if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) == 0)
+               return; /* can't carry NI status info */
+
+       list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
+               int     down = 0;
+               int     up = 0;
+               int     i;
+
+               if ((gw->lp_ping_feats & LNET_PING_FEAT_RTE_DISABLED) != 0) {
+                       rte->lr_downis = 1;
+                       continue;
+               }
+
+               for (i = 0; i < info->pi_nnis && i < LNET_MAX_RTR_NIS; i++) {
+                       lnet_ni_status_t *stat = &info->pi_ni[i];
+                       lnet_nid_t       nid = stat->ns_nid;
+
+                       if (nid == LNET_NID_ANY) {
+                               CDEBUG(D_NET, "%s: unexpected LNET_NID_ANY\n",
+                                      libcfs_nid2str(gw->lp_nid));
+                               gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
+                               return;
+                       }
+
+                       if (LNET_NETTYP(LNET_NIDNET(nid)) == LOLND)
+                               continue;
+
+                       if (stat->ns_status == LNET_NI_STATUS_DOWN) {
+                               down++;
+                               continue;
+                       }
+
+                       if (stat->ns_status == LNET_NI_STATUS_UP) {
+                               if (LNET_NIDNET(nid) == rte->lr_net) {
+                                       up = 1;
+                                       break;
+                               }
+                               continue;
+                       }
+
+                       CDEBUG(D_NET, "%s: Unexpected status 0x%x\n",
+                              libcfs_nid2str(gw->lp_nid), stat->ns_status);
+                       gw->lp_ping_feats = LNET_PING_FEAT_INVAL;
+                       return;
+               }
+
+               if (up) { /* ignore downed NIs if NI for dest network is up */
+                       rte->lr_downis = 0;
+                       continue;
+               }
+               rte->lr_downis = down;
+       }
+}
 
-        LNET_UNLOCK();
-        return -ENOENT;
+static void
+lnet_router_checker_event(lnet_event_t *event)
+{
+       lnet_rc_data_t          *rcd = event->md.user_ptr;
+       struct lnet_peer        *lp;
+
+       LASSERT(rcd != NULL);
+
+       if (event->unlinked) {
+               LNetInvalidateHandle(&rcd->rcd_mdh);
+               return;
+       }
+
+       LASSERT(event->type == LNET_EVENT_SEND ||
+               event->type == LNET_EVENT_REPLY);
+
+       lp = rcd->rcd_gateway;
+       LASSERT(lp != NULL);
+
+        /* NB: it's called with holding lnet_res_lock, we have a few
+         * places need to hold both locks at the same time, please take
+         * care of lock ordering */
+       lnet_net_lock(lp->lp_cpt);
+       if (!lnet_isrouter(lp) || lp->lp_rcd != rcd) {
+               /* ignore if no longer a router or rcd is replaced */
+               goto out;
+       }
+
+       if (event->type == LNET_EVENT_SEND) {
+               lp->lp_ping_notsent = 0;
+               if (event->status == 0)
+                       goto out;
+       }
+
+       /* LNET_EVENT_REPLY */
+       /* A successful REPLY means the router is up.  If _any_ comms
+        * to the router fail I assume it's down (this will happen if
+        * we ping alive routers to try to detect router death before
+        * apps get burned). */
+
+       lnet_notify_locked(lp, 1, (event->status == 0), cfs_time_current());
+       /* The router checker will wake up very shortly and do the
+        * actual notification.
+        * XXX If 'lp' stops being a router before then, it will still
+        * have the notification pending!!! */
+
+       if (avoid_asym_router_failure && event->status == 0)
+               lnet_parse_rc_info(rcd);
+
+ out:
+       lnet_net_unlock(lp->lp_cpt);
 }
 
 void
 lnet_wait_known_routerstate(void)
 {
-        lnet_peer_t         *rtr;
-        struct list_head    *entry;
-        int                  all_known;
+       lnet_peer_t      *rtr;
+       struct list_head *entry;
+       int               all_known;
 
-        LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
+       LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
 
-        for (;;) {
-                LNET_LOCK();
+       for (;;) {
+               int cpt = lnet_net_lock_current();
 
-                all_known = 1;
-                list_for_each (entry, &the_lnet.ln_routers) {
-                        rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+               all_known = 1;
+               list_for_each(entry, &the_lnet.ln_routers) {
+                       rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
 
-                        if (rtr->lp_alive_count == 0) {
-                                all_known = 0;
-                                break;
-                        }
-                }
+                       if (rtr->lp_alive_count == 0) {
+                               all_known = 0;
+                               break;
+                       }
+               }
 
-                LNET_UNLOCK();
+               lnet_net_unlock(cpt);
 
-                if (all_known)
+               if (all_known)
                         return;
 
 #ifndef __KERNEL__
@@ -529,66 +822,149 @@ lnet_wait_known_routerstate(void)
         }
 }
 
-static void
-lnet_router_checker_event (lnet_event_t *event)
+void
+lnet_router_ni_update_locked(lnet_peer_t *gw, __u32 net)
 {
-        /* CAVEAT EMPTOR: I'm called with LNET_LOCKed and I'm not allowed to
-         * drop it (that's how come I see _every_ event, even ones that would
-         * overflow my EQ) */
-        lnet_peer_t   *lp;
-        lnet_nid_t     nid;
-
-        if (event->unlinked) {
-                /* The router checker thread has unlinked the rc_md
-                 * and exited. */
-                LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING);
-                the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKED;
-#ifdef __KERNEL__
-                mutex_up(&the_lnet.ln_rc_signal);
-#endif
-                return;
-        }
+       lnet_route_t *rte;
+
+       if ((gw->lp_ping_feats & LNET_PING_FEAT_NI_STATUS) != 0) {
+               list_for_each_entry(rte, &gw->lp_routes, lr_gwlist) {
+                       if (rte->lr_net == net) {
+                               rte->lr_downis = 0;
+                               break;
+                       }
+               }
+       }
+}
 
-        LASSERT (event->type == LNET_EVENT_SEND ||
-                 event->type == LNET_EVENT_REPLY);
+void
+lnet_update_ni_status_locked(void)
+{
+       lnet_ni_t       *ni;
+       long            now;
+       int             timeout;
+
+       LASSERT(the_lnet.ln_routing);
+
+       timeout = router_ping_timeout +
+                 MAX(live_router_check_interval, dead_router_check_interval);
+
+       now = cfs_time_current_sec();
+       list_for_each_entry(ni, &the_lnet.ln_nis, ni_list) {
+               if (ni->ni_lnd->lnd_type == LOLND)
+                       continue;
+
+               if (now < ni->ni_last_alive + timeout)
+                       continue;
+
+               lnet_ni_lock(ni);
+               /* re-check with lock */
+               if (now < ni->ni_last_alive + timeout) {
+                       lnet_ni_unlock(ni);
+                       continue;
+               }
+
+               LASSERT(ni->ni_status != NULL);
+
+               if (ni->ni_status->ns_status != LNET_NI_STATUS_DOWN) {
+                       CDEBUG(D_NET, "NI(%s:%d) status changed to down\n",
+                              libcfs_nid2str(ni->ni_nid), timeout);
+                       /* NB: so far, this is the only place to set
+                        * NI status to "down" */
+                       ni->ni_status->ns_status = LNET_NI_STATUS_DOWN;
+               }
+               lnet_ni_unlock(ni);
+       }
+}
 
-        nid = (event->type == LNET_EVENT_SEND) ?
-              event->target.nid : event->initiator.nid;
+void
+lnet_destroy_rc_data(lnet_rc_data_t *rcd)
+{
+       LASSERT(list_empty(&rcd->rcd_list));
+       /* detached from network */
+       LASSERT(LNetHandleIsInvalid(rcd->rcd_mdh));
 
-        lp = lnet_find_peer_locked(nid);
-        if (lp == NULL) {
-                /* router may have been removed */
-                CDEBUG(D_NET, "Router %s not found\n", libcfs_nid2str(nid));
-                return;
-        }
+       if (rcd->rcd_gateway != NULL) {
+               int cpt = rcd->rcd_gateway->lp_cpt;
 
-        if (event->type == LNET_EVENT_SEND)     /* re-enable another ping */
-                lp->lp_ping_notsent = 0;
+               lnet_net_lock(cpt);
+               lnet_peer_decref_locked(rcd->rcd_gateway);
+               lnet_net_unlock(cpt);
+       }
 
-        if (lnet_isrouter(lp) &&                /* ignore if no longer a router */
-            (event->status != 0 ||
-             event->type == LNET_EVENT_REPLY)) {
+       if (rcd->rcd_pinginfo != NULL)
+               LIBCFS_FREE(rcd->rcd_pinginfo, LNET_PINGINFO_SIZE);
 
-                /* A successful REPLY means the router is up.  If _any_ comms
-                 * to the router fail I assume it's down (this will happen if
-                 * we ping alive routers to try to detect router death before
-                 * apps get burned). */
+       LIBCFS_FREE(rcd, sizeof(*rcd));
+}
+
+lnet_rc_data_t *
+lnet_create_rc_data_locked(lnet_peer_t *gateway)
+{
+       lnet_rc_data_t          *rcd = NULL;
+       lnet_ping_info_t        *pi;
+       int                     rc;
+       int                     i;
 
-                lnet_notify_locked(lp, 1, (event->status == 0),
-                                   cfs_time_current_sec());
+       lnet_net_unlock(gateway->lp_cpt);
 
-                /* The router checker will wake up very shortly and do the
-                 * actual notification.  
-                 * XXX If 'lp' stops being a router before then, it will still
-                 * have the notification pending!!! */
-        }
+       LIBCFS_ALLOC(rcd, sizeof(*rcd));
+       if (rcd == NULL)
+               goto out;
 
-        /* This decref will NOT drop LNET_LOCK (it had to have 1 ref when it
-         * was in the peer table and I've not dropped the lock, so no-one else
-         * can have reduced the refcount) */
-        LASSERT(lp->lp_refcount > 1);
+       LNetInvalidateHandle(&rcd->rcd_mdh);
+       INIT_LIST_HEAD(&rcd->rcd_list);
 
-        lnet_peer_decref_locked(lp);
+       LIBCFS_ALLOC(pi, LNET_PINGINFO_SIZE);
+       if (pi == NULL)
+               goto out;
+
+        for (i = 0; i < LNET_MAX_RTR_NIS; i++) {
+                pi->pi_ni[i].ns_nid = LNET_NID_ANY;
+                pi->pi_ni[i].ns_status = LNET_NI_STATUS_INVALID;
+        }
+        rcd->rcd_pinginfo = pi;
+
+        LASSERT (!LNetHandleIsInvalid(the_lnet.ln_rc_eqh));
+        rc = LNetMDBind((lnet_md_t){.start     = pi,
+                                    .user_ptr  = rcd,
+                                    .length    = LNET_PINGINFO_SIZE,
+                                    .threshold = LNET_MD_THRESH_INF,
+                                    .options   = LNET_MD_TRUNCATE,
+                                    .eq_handle = the_lnet.ln_rc_eqh},
+                        LNET_UNLINK,
+                        &rcd->rcd_mdh);
+        if (rc < 0) {
+                CERROR("Can't bind MD: %d\n", rc);
+               goto out;
+       }
+       LASSERT(rc == 0);
+
+       lnet_net_lock(gateway->lp_cpt);
+       /* router table changed or someone has created rcd for this gateway */
+       if (!lnet_isrouter(gateway) || gateway->lp_rcd != NULL) {
+               lnet_net_unlock(gateway->lp_cpt);
+               goto out;
+       }
+
+       lnet_peer_addref_locked(gateway);
+       rcd->rcd_gateway = gateway;
+       gateway->lp_rcd = rcd;
+       gateway->lp_ping_notsent = 0;
+
+       return rcd;
+
+ out:
+       if (rcd != NULL) {
+               if (!LNetHandleIsInvalid(rcd->rcd_mdh)) {
+                       rc = LNetMDUnlink(rcd->rcd_mdh);
+                       LASSERT(rc == 0);
+               }
+               lnet_destroy_rc_data(rcd);
+       }
+
+       lnet_net_lock(gateway->lp_cpt);
+       return gateway->lp_rcd;
 }
 
 static int
@@ -607,22 +983,31 @@ lnet_router_check_interval (lnet_peer_t *rtr)
 static void
 lnet_ping_router_locked (lnet_peer_t *rtr)
 {
-        lnet_process_id_t id;
-        int               secs;
-        time_t            now = cfs_time_current_sec();
+        lnet_rc_data_t *rcd = NULL;
+        cfs_time_t      now = cfs_time_current();
+        int             secs;
 
         lnet_peer_addref_locked(rtr);
 
         if (rtr->lp_ping_deadline != 0 && /* ping timed out? */
-            now > rtr->lp_ping_deadline)
+            cfs_time_after(now, rtr->lp_ping_deadline))
                 lnet_notify_locked(rtr, 1, 0, now);
 
-        LNET_UNLOCK();
+       /* Run any outstanding notifications */
+       lnet_ni_notify_locked(rtr->lp_ni, rtr);
 
-        /* Run any outstanding notifications */
-        lnet_do_notify(rtr);
+       if (!lnet_isrouter(rtr) ||
+           the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
+               /* router table changed or router checker is shutting down */
+               lnet_peer_decref_locked(rtr);
+               return;
+       }
 
-        LNET_LOCK();
+       rcd = rtr->lp_rcd != NULL ?
+             rtr->lp_rcd : lnet_create_rc_data_locked(rtr);
+
+       if (rcd == NULL)
+               return;
 
         secs = lnet_router_check_interval(rtr);
 
@@ -634,23 +1019,34 @@ lnet_ping_router_locked (lnet_peer_t *rtr)
                rtr->lp_alive, rtr->lp_alive_count, rtr->lp_ping_timestamp);
 
         if (secs != 0 && !rtr->lp_ping_notsent &&
-            now > rtr->lp_ping_timestamp + secs) {
+            cfs_time_after(now, cfs_time_add(rtr->lp_ping_timestamp,
+                                             cfs_time_seconds(secs)))) {
+                int               rc;
+                lnet_process_id_t id;
+                lnet_handle_md_t  mdh;
+
                 id.nid = rtr->lp_nid;
-                id.pid = LUSTRE_SRV_LNET_PID;
+               id.pid = LNET_PID_LUSTRE;
                 CDEBUG(D_NET, "Check: %s\n", libcfs_id2str(id));
 
                 rtr->lp_ping_notsent   = 1;
                 rtr->lp_ping_timestamp = now;
 
-                if (rtr->lp_ping_deadline == 0)
-                        rtr->lp_ping_deadline = now + router_ping_timeout;
+               mdh = rcd->rcd_mdh;
 
-                LNET_UNLOCK();
+               if (rtr->lp_ping_deadline == 0) {
+                       rtr->lp_ping_deadline =
+                               cfs_time_shift(router_ping_timeout);
+               }
 
-                LNetGet(LNET_NID_ANY, the_lnet.ln_rc_mdh, id,
-                        LNET_RESERVED_PORTAL, LNET_PROTO_PING_MATCHBITS, 0);
+               lnet_net_unlock(rtr->lp_cpt);
 
-                LNET_LOCK();
+               rc = LNetGet(LNET_NID_ANY, mdh, id, LNET_RESERVED_PORTAL,
+                            LNET_PROTO_PING_MATCHBITS, 0);
+
+               lnet_net_lock(rtr->lp_cpt);
+                if (rc != 0)
+                        rtr->lp_ping_notsent = 0; /* no event pending */
         }
 
         lnet_peer_decref_locked(rtr);
@@ -660,27 +1056,26 @@ lnet_ping_router_locked (lnet_peer_t *rtr)
 int
 lnet_router_checker_start(void)
 {
-        static lnet_ping_info_t pinginfo;
-
-        lnet_md_t    md;
-        int          rc;
-        int          eqsz;
-#ifndef __KERNEL__
-        lnet_peer_t *rtr;
-        __u64        version;
-        int          nrtr = 0;
-        int          router_checker_max_eqsize = 10240;
+       int                     rc;
+       int                     eqsz;
+#ifdef __KERNEL__
+       struct task_struct     *task;
+#else /* __KERNEL__ */
+       lnet_peer_t            *rtr;
+       __u64                   version;
+       int                     nrtr = 0;
+       int                     router_checker_max_eqsize = 10240;
 
         LASSERT (check_routers_before_use);
         LASSERT (dead_router_check_interval > 0);
 
-        LNET_LOCK();
+       lnet_net_lock(0);
 
         /* As an approximation, allow each router the same number of
          * outstanding events as it is allowed outstanding sends */
         eqsz = 0;
         version = the_lnet.ln_routers_version;
-        list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
+       list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
                 lnet_ni_t         *ni = rtr->lp_ni;
                 lnet_process_id_t  id;
 
@@ -689,9 +1084,9 @@ lnet_router_checker_start(void)
 
                 /* one async ping reply per router */
                 id.nid = rtr->lp_nid;
-                id.pid = LUSTRE_SRV_LNET_PID;
+               id.pid = LNET_PID_LUSTRE;
 
-                LNET_UNLOCK();
+               lnet_net_unlock(0);
 
                 rc = LNetSetAsync(id, 1);
                 if (rc != 0) {
@@ -700,12 +1095,12 @@ lnet_router_checker_start(void)
                         return rc;
                 }
 
-                LNET_LOCK();
-                /* NB router list doesn't change in userspace */
-                LASSERT (version == the_lnet.ln_routers_version);
-        }
+               lnet_net_lock(0);
+               /* NB router list doesn't change in userspace */
+               LASSERT(version == the_lnet.ln_routers_version);
+       }
 
-        LNET_UNLOCK();
+       lnet_net_unlock(0);
 
         if (nrtr == 0) {
                 CDEBUG(D_NET,
@@ -732,15 +1127,16 @@ lnet_router_checker_start(void)
                 return -EINVAL;
         }
 
-        if (live_router_check_interval <= 0 &&
+        if (!the_lnet.ln_routing &&
+            live_router_check_interval <= 0 &&
             dead_router_check_interval <= 0)
                 return 0;
 
 #ifdef __KERNEL__
-        init_mutex_locked(&the_lnet.ln_rc_signal);
+       sema_init(&the_lnet.ln_rc_signal, 0);
         /* EQ size doesn't matter; the callback is guaranteed to get every
          * event */
-        eqsz = 1;
+       eqsz = 0;
         rc = LNetEQAlloc(eqsz, lnet_router_checker_event,
                          &the_lnet.ln_rc_eqh);
 #else
@@ -752,36 +1148,19 @@ lnet_router_checker_start(void)
                 return -ENOMEM;
         }
 
-        memset(&md, 0, sizeof(md));
-        md.start     = &pinginfo;
-        md.length    = sizeof(pinginfo);
-        md.options   = LNET_MD_TRUNCATE;
-        md.threshold = LNET_MD_THRESH_INF;
-        md.eq_handle = the_lnet.ln_rc_eqh;
-        rc = LNetMDBind(md, LNET_UNLINK, &the_lnet.ln_rc_mdh);
-        if (rc < 0) {
-                CERROR("Can't bind MD: %d\n", rc);
-                rc = LNetEQFree(the_lnet.ln_rc_eqh);
-                LASSERT (rc == 0);
-                return -ENOMEM;
-        }
-        LASSERT (rc == 0);
-
         the_lnet.ln_rc_state = LNET_RC_STATE_RUNNING;
 #ifdef __KERNEL__
-        rc = (int)cfs_kernel_thread(lnet_router_checker, NULL, 0);
-        if (rc < 0) {
-                CERROR("Can't start router checker thread: %d\n", rc);
-                the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
-                rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
-                LASSERT (rc == 0);
-                /* block until event callback signals exit */
-                mutex_down(&the_lnet.ln_rc_signal);
-                rc = LNetEQFree(the_lnet.ln_rc_eqh);
-                LASSERT (rc == 0);
-                the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
-                return -ENOMEM;
-        }
+       task = kthread_run(lnet_router_checker, NULL, "router_checker");
+       if (IS_ERR(task)) {
+               rc = PTR_ERR(task);
+               CERROR("Can't start router checker thread: %d\n", rc);
+               /* block until event callback signals exit */
+               down(&the_lnet.ln_rc_signal);
+               rc = LNetEQFree(the_lnet.ln_rc_eqh);
+               LASSERT(rc == 0);
+               the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
+               return -ENOMEM;
+       }
 #endif
 
         if (check_routers_before_use) {
@@ -803,51 +1182,136 @@ lnet_router_checker_stop (void)
                 return;
 
         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
-        the_lnet.ln_rc_state = LNET_RC_STATE_STOPTHREAD;
+       the_lnet.ln_rc_state = LNET_RC_STATE_STOPPING;
 
 #ifdef __KERNEL__
-        /* block until event callback signals exit */
-        mutex_down(&the_lnet.ln_rc_signal);
+       /* block until event callback signals exit */
+       down(&the_lnet.ln_rc_signal);
 #else
-        while (the_lnet.ln_rc_state != LNET_RC_STATE_UNLINKED) {
-                lnet_router_checker();
-                cfs_pause(cfs_time_seconds(1));
-        }
+       lnet_router_checker();
 #endif
-        LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKED);
+       LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
 
         rc = LNetEQFree(the_lnet.ln_rc_eqh);
         LASSERT (rc == 0);
-        the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
         return;
 }
 
+static void
+lnet_prune_rc_data(int wait_unlink)
+{
+       lnet_rc_data_t          *rcd;
+       lnet_rc_data_t          *tmp;
+       lnet_peer_t             *lp;
+       struct list_head         head;
+       int                      i = 2;
+
+       if (likely(the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING &&
+                  list_empty(&the_lnet.ln_rcd_deathrow) &&
+                  list_empty(&the_lnet.ln_rcd_zombie)))
+               return;
+
+       INIT_LIST_HEAD(&head);
+
+       lnet_net_lock(LNET_LOCK_EX);
+
+       if (the_lnet.ln_rc_state != LNET_RC_STATE_RUNNING) {
+               /* router checker is stopping, prune all */
+               list_for_each_entry(lp, &the_lnet.ln_routers,
+                                   lp_rtr_list) {
+                       if (lp->lp_rcd == NULL)
+                               continue;
+
+                       LASSERT(list_empty(&lp->lp_rcd->rcd_list));
+                       list_add(&lp->lp_rcd->rcd_list,
+                                &the_lnet.ln_rcd_deathrow);
+                       lp->lp_rcd = NULL;
+               }
+       }
+
+       /* unlink all RCDs on deathrow list */
+       list_splice_init(&the_lnet.ln_rcd_deathrow, &head);
+
+       if (!list_empty(&head)) {
+               lnet_net_unlock(LNET_LOCK_EX);
+
+               list_for_each_entry(rcd, &head, rcd_list)
+                       LNetMDUnlink(rcd->rcd_mdh);
+
+               lnet_net_lock(LNET_LOCK_EX);
+       }
+
+       list_splice_init(&head, &the_lnet.ln_rcd_zombie);
+
+       /* release all zombie RCDs */
+       while (!list_empty(&the_lnet.ln_rcd_zombie)) {
+               list_for_each_entry_safe(rcd, tmp, &the_lnet.ln_rcd_zombie,
+                                        rcd_list) {
+                       if (LNetHandleIsInvalid(rcd->rcd_mdh))
+                               list_move(&rcd->rcd_list, &head);
+               }
+
+               wait_unlink = wait_unlink &&
+                             !list_empty(&the_lnet.ln_rcd_zombie);
+
+               lnet_net_unlock(LNET_LOCK_EX);
+
+               while (!list_empty(&head)) {
+                       rcd = list_entry(head.next,
+                                        lnet_rc_data_t, rcd_list);
+                       list_del_init(&rcd->rcd_list);
+                       lnet_destroy_rc_data(rcd);
+               }
+
+               if (!wait_unlink)
+                       return;
+
+               i++;
+               CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+                      "Waiting for rc buffers to unlink\n");
+               cfs_pause(cfs_time_seconds(1) / 4);
+
+               lnet_net_lock(LNET_LOCK_EX);
+       }
+
+       lnet_net_unlock(LNET_LOCK_EX);
+}
+
+
 #if defined(__KERNEL__) && defined(LNET_ROUTER)
 
 static int
 lnet_router_checker(void *arg)
 {
-        int                rc;
         lnet_peer_t       *rtr;
-        struct list_head  *entry;
-        lnet_process_id_t  rtr_id;
+       struct list_head  *entry;
 
-        cfs_daemonize("router_checker");
         cfs_block_allsigs();
 
-        rtr_id.pid = LUSTRE_SRV_LNET_PID;
-
         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
 
         while (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING) {
-                __u64 version;
+               __u64   version;
+               int     cpt;
+               int     cpt2;
 
-                LNET_LOCK();
+               cpt = lnet_net_lock_current();
 rescan:
-                version = the_lnet.ln_routers_version;
+               version = the_lnet.ln_routers_version;
+
+               list_for_each(entry, &the_lnet.ln_routers) {
+                       rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
+
+                       cpt2 = lnet_cpt_of_nid_locked(rtr->lp_nid);
+                       if (cpt != cpt2) {
+                               lnet_net_unlock(cpt);
+                               cpt = cpt2;
+                               lnet_net_lock(cpt);
+                               /* the routers list has changed */
+                               if (version != the_lnet.ln_routers_version)
+                                       goto rescan;
+                       }
 
-                list_for_each (entry, &the_lnet.ln_routers) {
-                        rtr = list_entry(entry, lnet_peer_t, lp_rtr_list);
                         lnet_ping_router_locked(rtr);
 
                         /* NB dropped lock */
@@ -857,23 +1321,28 @@ rescan:
                         }
                 }
 
-                LNET_UNLOCK();
+               if (the_lnet.ln_routing)
+                       lnet_update_ni_status_locked();
 
-                /* Call cfs_pause() here always adds 1 to load average 
-                 * because kernel counts # active tasks as nr_running 
-                 * + nr_uninterruptible. */
-                cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE,
-                                     cfs_time_seconds(1));
-        }
+               lnet_net_unlock(cpt);
 
-        LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_STOPTHREAD);
-        the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
+               lnet_prune_rc_data(0); /* don't wait for UNLINK */
 
-        rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
-        LASSERT (rc == 0);
+               /* Call cfs_pause() here always adds 1 to load average
+                * because kernel counts # active tasks as nr_running
+                * + nr_uninterruptible. */
+               schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
+                                                  cfs_time_seconds(1));
+       }
 
-        /* The unlink event callback will signal final completion */
-        return 0;
+       LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING);
+
+       lnet_prune_rc_data(1); /* wait for UNLINK */
+
+       the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
+       up(&the_lnet.ln_rc_signal);
+       /* The unlink event callback will signal final completion */
+       return 0;
 }
 
 void
@@ -882,37 +1351,38 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
         int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
 
         while (--npages >= 0)
-                cfs_free_page(rb->rb_kiov[npages].kiov_page);
+               __free_page(rb->rb_kiov[npages].kiov_page);
 
         LIBCFS_FREE(rb, sz);
 }
 
 lnet_rtrbuf_t *
-lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp)
+lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
 {
-        int            npages = rbp->rbp_npages;
-        int            sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
-        struct page   *page;
-        lnet_rtrbuf_t *rb;
-        int            i;
+       int            npages = rbp->rbp_npages;
+       int            sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
+       struct page   *page;
+       lnet_rtrbuf_t *rb;
+       int            i;
 
-        LIBCFS_ALLOC(rb, sz);
-        if (rb == NULL)
-                return NULL;
+       LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz);
+       if (rb == NULL)
+               return NULL;
 
-        rb->rb_pool = rbp;
+       rb->rb_pool = rbp;
 
-        for (i = 0; i < npages; i++) {
-                page = cfs_alloc_page(CFS_ALLOC_ZERO | CFS_ALLOC_STD);
+       for (i = 0; i < npages; i++) {
+               page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
+                                         __GFP_ZERO | GFP_IOFS);
                 if (page == NULL) {
                         while (--i >= 0)
-                                cfs_free_page(rb->rb_kiov[i].kiov_page);
+                               __free_page(rb->rb_kiov[i].kiov_page);
 
                         LIBCFS_FREE(rb, sz);
                         return NULL;
                 }
 
-                rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
+               rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
                 rb->rb_kiov[i].kiov_offset = 0;
                 rb->rb_kiov[i].kiov_page = page;
         }
@@ -921,175 +1391,381 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp)
 }
 
 void
-lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp)
+lnet_rtrpool_free_bufs(lnet_rtrbufpool_t *rbp, int cpt)
 {
-        int            npages = rbp->rbp_npages;
-        int            nbuffers = 0;
-        lnet_rtrbuf_t *rb;
-
-        LASSERT (list_empty(&rbp->rbp_msgs));
-        LASSERT (rbp->rbp_credits == rbp->rbp_nbuffers);
+       int              npages = rbp->rbp_npages;
+       lnet_rtrbuf_t    *rb;
+       struct list_head tmp;
+
+       if (rbp->rbp_nbuffers == 0) /* not initialized or already freed */
+               return;
+
+       INIT_LIST_HEAD(&tmp);
+
+       lnet_net_lock(cpt);
+       lnet_drop_routed_msgs_locked(&rbp->rbp_msgs, cpt);
+       list_splice_init(&rbp->rbp_bufs, &tmp);
+       rbp->rbp_nbuffers = rbp->rbp_credits = 0;
+       rbp->rbp_mincredits = 0;
+       lnet_net_unlock(cpt);
+
+       /* Free buffers on the free list. */
+       while (!list_empty(&tmp)) {
+               rb = list_entry(tmp.next, lnet_rtrbuf_t, rb_list);
+               list_del(&rb->rb_list);
+               lnet_destroy_rtrbuf(rb, npages);
+       }
+}
 
-        while (!list_empty(&rbp->rbp_bufs)) {
-                LASSERT (rbp->rbp_credits > 0);
+static int
+lnet_rtrpool_adjust_bufs(lnet_rtrbufpool_t *rbp, int nbufs, int cpt)
+{
+       struct list_head rb_list;
+       lnet_rtrbuf_t   *rb;
+       int             num_rb;
+       int             num_buffers = 0;
+       int             npages = rbp->rbp_npages;
+
+       /* If we are called for less buffers than already in the pool, we
+        * just lower the nbuffers number and excess buffers will be
+        * thrown away as they are returned to the free list.  Credits
+        * then get adjusted as well. */
+       if (nbufs <= rbp->rbp_nbuffers) {
+               lnet_net_lock(cpt);
+               rbp->rbp_nbuffers = nbufs;
+               lnet_net_unlock(cpt);
+               return 0;
+       }
+
+       INIT_LIST_HEAD(&rb_list);
+
+       /* allocate the buffers on a local list first.  If all buffers are
+        * allocated successfully then join this list to the rbp buffer
+        * list.  If not then free all allocated buffers. */
+       num_rb = rbp->rbp_nbuffers;
+
+       while (num_rb < nbufs) {
+               rb = lnet_new_rtrbuf(rbp, cpt);
+               if (rb == NULL) {
+                       CERROR("Failed to allocate %d route bufs of %d pages\n",
+                              nbufs, npages);
+                       goto failed;
+               }
+
+               list_add(&rb->rb_list, &rb_list);
+               num_buffers++;
+               num_rb++;
+       }
+
+       lnet_net_lock(cpt);
+
+       list_splice_tail(&rb_list, &rbp->rbp_bufs);
+       rbp->rbp_nbuffers += num_buffers;
+       rbp->rbp_credits += num_buffers;
+       rbp->rbp_mincredits = rbp->rbp_credits;
+       /* We need to schedule blocked msg using the newly
+        * added buffers. */
+       while (!list_empty(&rbp->rbp_bufs) &&
+              !list_empty(&rbp->rbp_msgs))
+               lnet_schedule_blocked_locked(rbp);
+
+       lnet_net_unlock(cpt);
+
+       return 0;
+
+failed:
+       while (!list_empty(&rb_list)) {
+               rb = list_entry(rb_list.next, lnet_rtrbuf_t, rb_list);
+               list_del(&rb->rb_list);
+               lnet_destroy_rtrbuf(rb, npages);
+       }
+
+       return -ENOMEM;
+}
 
-                rb = list_entry(rbp->rbp_bufs.next,
-                                lnet_rtrbuf_t, rb_list);
-                list_del(&rb->rb_list);
-                lnet_destroy_rtrbuf(rb, npages);
-                nbuffers++;
-        }
+void
+lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
+{
+       INIT_LIST_HEAD(&rbp->rbp_msgs);
+       INIT_LIST_HEAD(&rbp->rbp_bufs);
 
-        LASSERT (rbp->rbp_nbuffers == nbuffers);
-        LASSERT (rbp->rbp_credits == nbuffers);
+        rbp->rbp_npages = npages;
+        rbp->rbp_credits = 0;
+        rbp->rbp_mincredits = 0;
+}
 
-        rbp->rbp_nbuffers = rbp->rbp_credits = 0;
+void
+lnet_rtrpools_free(int keep_pools)
+{
+       lnet_rtrbufpool_t *rtrp;
+       int               i;
+
+       if (the_lnet.ln_rtrpools == NULL) /* uninitialized or freed */
+               return;
+
+       cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+               lnet_rtrpool_free_bufs(&rtrp[LNET_TINY_BUF_IDX], i);
+               lnet_rtrpool_free_bufs(&rtrp[LNET_SMALL_BUF_IDX], i);
+               lnet_rtrpool_free_bufs(&rtrp[LNET_LARGE_BUF_IDX], i);
+       }
+
+       if (!keep_pools) {
+               cfs_percpt_free(the_lnet.ln_rtrpools);
+               the_lnet.ln_rtrpools = NULL;
+       }
 }
 
-int
-lnet_rtrpool_alloc_bufs(lnet_rtrbufpool_t *rbp, int nbufs)
+static int
+lnet_nrb_tiny_calculate(void)
 {
-        lnet_rtrbuf_t *rb;
-        int            i;
+       int     nrbs = LNET_NRB_TINY;
 
-        if (rbp->rbp_nbuffers != 0) {
-                LASSERT (rbp->rbp_nbuffers == nbufs);
-                return 0;
-        }
+       if (tiny_router_buffers < 0) {
+               LCONSOLE_ERROR_MSG(0x10c,
+                                  "tiny_router_buffers=%d invalid when "
+                                  "routing enabled\n", tiny_router_buffers);
+               return -1;
+       }
 
-        for (i = 0; i < nbufs; i++) {
-                rb = lnet_new_rtrbuf(rbp);
+       if (tiny_router_buffers > 0)
+               nrbs = tiny_router_buffers;
 
-                if (rb == NULL) {
-                        CERROR("Failed to allocate %d router bufs of %d pages\n",
-                               nbufs, rbp->rbp_npages);
-                        return -ENOMEM;
-                }
+       nrbs /= LNET_CPT_NUMBER;
+       return max(nrbs, LNET_NRB_TINY_MIN);
+}
 
-                rbp->rbp_nbuffers++;
-                rbp->rbp_credits++;
-                rbp->rbp_mincredits++;
-                list_add(&rb->rb_list, &rbp->rbp_bufs);
+static int
+lnet_nrb_small_calculate(void)
+{
+       int     nrbs = LNET_NRB_SMALL;
 
-                /* No allocation "under fire" */
-                /* Otherwise we'd need code to schedule blocked msgs etc */
-                LASSERT (!the_lnet.ln_routing);
-        }
+       if (small_router_buffers < 0) {
+               LCONSOLE_ERROR_MSG(0x10c,
+                                  "small_router_buffers=%d invalid when "
+                                  "routing enabled\n", small_router_buffers);
+               return -1;
+       }
 
-        LASSERT (rbp->rbp_credits == nbufs);
-        return 0;
+       if (small_router_buffers > 0)
+               nrbs = small_router_buffers;
+
+       nrbs /= LNET_CPT_NUMBER;
+       return max(nrbs, LNET_NRB_SMALL_MIN);
 }
 
-void
-lnet_rtrpool_init(lnet_rtrbufpool_t *rbp, int npages)
+static int
+lnet_nrb_large_calculate(void)
 {
-        CFS_INIT_LIST_HEAD(&rbp->rbp_msgs);
-        CFS_INIT_LIST_HEAD(&rbp->rbp_bufs);
+       int     nrbs = LNET_NRB_LARGE;
 
-        rbp->rbp_npages = npages;
-        rbp->rbp_credits = 0;
-        rbp->rbp_mincredits = 0;
-}
+       if (large_router_buffers < 0) {
+               LCONSOLE_ERROR_MSG(0x10c,
+                                  "large_router_buffers=%d invalid when "
+                                  "routing enabled\n", large_router_buffers);
+               return -1;
+       }
 
-void
-lnet_free_rtrpools(void)
-{
-        lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[0]);
-        lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[1]);
-        lnet_rtrpool_free_bufs(&the_lnet.ln_rtrpools[2]);
+       if (large_router_buffers > 0)
+               nrbs = large_router_buffers;
+
+       nrbs /= LNET_CPT_NUMBER;
+       return max(nrbs, LNET_NRB_LARGE_MIN);
 }
 
-void
-lnet_init_rtrpools(void)
+int
+lnet_rtrpools_alloc(int im_a_router)
 {
-        int small_pages = 1;
-        int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+       lnet_rtrbufpool_t *rtrp;
+       int     nrb_tiny;
+       int     nrb_small;
+       int     nrb_large;
+       int     rc;
+       int     i;
+
+       if (!strcmp(forwarding, "")) {
+               /* not set either way */
+               if (!im_a_router)
+                       return 0;
+       } else if (!strcmp(forwarding, "disabled")) {
+               /* explicitly disabled */
+               return 0;
+       } else if (!strcmp(forwarding, "enabled")) {
+               /* explicitly enabled */
+       } else {
+               LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
+                                  "'enabled' or 'disabled'\n");
+               return -EINVAL;
+       }
+
+       nrb_tiny = lnet_nrb_tiny_calculate();
+       if (nrb_tiny < 0)
+               return -EINVAL;
+
+       nrb_small = lnet_nrb_small_calculate();
+       if (nrb_small < 0)
+               return -EINVAL;
+
+       nrb_large = lnet_nrb_large_calculate();
+       if (nrb_large < 0)
+               return -EINVAL;
+
+       the_lnet.ln_rtrpools = cfs_percpt_alloc(lnet_cpt_table(),
+                                               LNET_NRBPOOLS *
+                                               sizeof(lnet_rtrbufpool_t));
+       if (the_lnet.ln_rtrpools == NULL) {
+               LCONSOLE_ERROR_MSG(0x10c,
+                                  "Failed to initialize router buffe pool\n");
+               return -ENOMEM;
+       }
+
+       cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+               lnet_rtrpool_init(&rtrp[LNET_TINY_BUF_IDX], 0);
+               rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
+                                             nrb_tiny, i);
+               if (rc != 0)
+                       goto failed;
+
+               lnet_rtrpool_init(&rtrp[LNET_SMALL_BUF_IDX],
+                                 LNET_NRB_SMALL_PAGES);
+               rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
+                                             nrb_small, i);
+               if (rc != 0)
+                       goto failed;
+
+               lnet_rtrpool_init(&rtrp[LNET_LARGE_BUF_IDX],
+                                 LNET_NRB_LARGE_PAGES);
+               rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
+                                             nrb_large, i);
+               if (rc != 0)
+                       goto failed;
+       }
+
+       lnet_net_lock(LNET_LOCK_EX);
+       the_lnet.ln_routing = 1;
+       lnet_net_unlock(LNET_LOCK_EX);
+       return 0;
 
-        lnet_rtrpool_init(&the_lnet.ln_rtrpools[0], 0);
-        lnet_rtrpool_init(&the_lnet.ln_rtrpools[1], small_pages);
-        lnet_rtrpool_init(&the_lnet.ln_rtrpools[2], large_pages);
+ failed:
+       lnet_rtrpools_free(0);
+       return rc;
 }
 
+static int
+lnet_rtrpools_adjust_helper(int tiny, int small, int large)
+{
+       int nrb = 0;
+       int rc = 0;
+       int i;
+       lnet_rtrbufpool_t *rtrp;
+
+       /* If the provided values for each buffer pool are different than the
+        * configured values, we need to take action. */
+       if (tiny >= 0) {
+               tiny_router_buffers = tiny;
+               nrb = lnet_nrb_tiny_calculate();
+               cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+                       rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_TINY_BUF_IDX],
+                                                     nrb, i);
+                       if (rc != 0)
+                               return rc;
+               }
+       }
+       if (small >= 0) {
+               small_router_buffers = small;
+               nrb = lnet_nrb_small_calculate();
+               cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+                       rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_SMALL_BUF_IDX],
+                                                     nrb, i);
+                       if (rc != 0)
+                               return rc;
+               }
+       }
+       if (large >= 0) {
+               large_router_buffers = large;
+               nrb = lnet_nrb_large_calculate();
+               cfs_percpt_for_each(rtrp, i, the_lnet.ln_rtrpools) {
+                       rc = lnet_rtrpool_adjust_bufs(&rtrp[LNET_LARGE_BUF_IDX],
+                                                     nrb, i);
+                       if (rc != 0)
+                               return rc;
+               }
+       }
+
+       return 0;
+}
 
 int
-lnet_alloc_rtrpools(int im_a_router)
+lnet_rtrpools_adjust(int tiny, int small, int large)
 {
-        int       rc;
-
-        if (!strcmp(forwarding, "")) {
-                /* not set either way */
-                if (!im_a_router)
-                        return 0;
-        } else if (!strcmp(forwarding, "disabled")) {
-                /* explicitly disabled */
-                return 0;
-        } else if (!strcmp(forwarding, "enabled")) {
-                /* explicitly enabled */
-        } else {
-                LCONSOLE_ERROR_MSG(0x10b, "'forwarding' not set to either "
-                                   "'enabled' or 'disabled'\n");
-                return -EINVAL;
-        }
+       /* this function doesn't revert the changes if adding new buffers
+        * failed.  It's up to the user space caller to revert the
+        * changes. */
 
-        if (tiny_router_buffers <= 0) {
-                LCONSOLE_ERROR_MSG(0x10c, "tiny_router_buffers=%d invalid when "
-                                   "routing enabled\n", tiny_router_buffers);
-                rc = -EINVAL;
-                goto failed;
-        }
+       if (!the_lnet.ln_routing)
+               return 0;
 
-        rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[0],
-                                     tiny_router_buffers);
-        if (rc != 0)
-                goto failed;
+       return lnet_rtrpools_adjust_helper(tiny, small, large);
+}
 
-        if (small_router_buffers <= 0) {
-                LCONSOLE_ERROR_MSG(0x10d, "small_router_buffers=%d invalid when"
-                                   " routing enabled\n", small_router_buffers);
-                rc = -EINVAL;
-                goto failed;
-        }
+int
+lnet_rtrpools_enable(void)
+{
+       int rc;
 
-        rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[1],
-                                     small_router_buffers);
-        if (rc != 0)
-                goto failed;
+       if (the_lnet.ln_routing)
+               return 0;
 
-        if (large_router_buffers <= 0) {
-                LCONSOLE_ERROR_MSG(0x10e, "large_router_buffers=%d invalid when"
-                                   " routing enabled\n", large_router_buffers);
-                rc = -EINVAL;
-                goto failed;
-        }
+       if (the_lnet.ln_rtrpools == NULL)
+               /* If routing is turned off, and we have never
+                * initialized the pools before, just call the
+                * standard buffer pool allocation routine as
+                * if we are just configuring this for the first
+                * time. */
+               return lnet_rtrpools_alloc(1);
 
-        rc = lnet_rtrpool_alloc_bufs(&the_lnet.ln_rtrpools[2],
-                                     large_router_buffers);
-        if (rc != 0)
-                goto failed;
+       rc = lnet_rtrpools_adjust_helper(0, 0, 0);
+       if (rc != 0)
+               return rc;
 
-        LNET_LOCK();
-        the_lnet.ln_routing = 1;
-        LNET_UNLOCK();
+       lnet_net_lock(LNET_LOCK_EX);
+       the_lnet.ln_routing = 1;
 
-        return 0;
+       the_lnet.ln_ping_info->pi_features &= ~LNET_PING_FEAT_RTE_DISABLED;
+       lnet_net_unlock(LNET_LOCK_EX);
 
- failed:
-        lnet_free_rtrpools();
-        return rc;
+       return 0;
+}
+
+void
+lnet_rtrpools_disable(void)
+{
+       if (!the_lnet.ln_routing)
+               return;
+
+       lnet_net_lock(LNET_LOCK_EX);
+       the_lnet.ln_routing = 0;
+       the_lnet.ln_ping_info->pi_features |= LNET_PING_FEAT_RTE_DISABLED;
+
+       tiny_router_buffers = 0;
+       small_router_buffers = 0;
+       large_router_buffers = 0;
+       lnet_net_unlock(LNET_LOCK_EX);
+       lnet_rtrpools_free(1);
 }
 
 int
-lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
+lnet_notify(lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
 {
-        lnet_peer_t         *lp = NULL;
-        time_t               now = cfs_time_current_sec();
+       struct lnet_peer        *lp = NULL;
+       cfs_time_t              now = cfs_time_current();
+       int                     cpt = lnet_cpt_of_nid(nid);
 
-        LASSERT (!in_interrupt ());
+       LASSERT (!in_interrupt ());
 
-        CDEBUG (D_NET, "%s notifying %s: %s\n",
-                (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
-                libcfs_nid2str(nid),
-                alive ? "up" : "down");
+       CDEBUG (D_NET, "%s notifying %s: %s\n",
+               (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
+               libcfs_nid2str(nid),
+               alive ? "up" : "down");
 
         if (ni != NULL &&
             LNET_NIDNET(ni->ni_nid) != LNET_NIDNET(nid)) {
@@ -1100,12 +1776,12 @@ lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
         }
 
         /* can't do predictions... */
-        if (when > now) {
+        if (cfs_time_after(when, now)) {
                 CWARN ("Ignoring prediction from %s of %s %s "
                        "%ld seconds in the future\n",
                        (ni == NULL) ? "userspace" : libcfs_nid2str(ni->ni_nid),
                        libcfs_nid2str(nid), alive ? "up" : "down",
-                       when - now);
+                       cfs_duration_sec(cfs_time_sub(when, now)));
                 return -EINVAL;
         }
 
@@ -1115,12 +1791,17 @@ lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
                 return 0;
         }
 
-        LNET_LOCK();
+       lnet_net_lock(cpt);
+
+       if (the_lnet.ln_shutdown) {
+               lnet_net_unlock(cpt);
+               return -ESHUTDOWN;
+       }
 
-        lp = lnet_find_peer_locked(nid);
-        if (lp == NULL) {
-                /* nid not found */
-                LNET_UNLOCK();
+       lp = lnet_find_peer_locked(the_lnet.ln_peer_tables[cpt], nid);
+       if (lp == NULL) {
+               /* nid not found */
+               lnet_net_unlock(cpt);
                 CDEBUG(D_NET, "%s not found\n", libcfs_nid2str(nid));
                 return 0;
         }
@@ -1134,16 +1815,13 @@ lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
 
         lnet_notify_locked(lp, ni == NULL, alive, when);
 
-        LNET_UNLOCK();
-
-        lnet_do_notify(lp);
-
-        LNET_LOCK();
+       if (ni != NULL)
+               lnet_ni_notify_locked(ni, lp);
 
-        lnet_peer_decref_locked(lp);
+       lnet_peer_decref_locked(lp);
 
-        LNET_UNLOCK();
-        return 0;
+       lnet_net_unlock(cpt);
+       return 0;
 }
 EXPORT_SYMBOL(lnet_notify);
 
@@ -1156,7 +1834,7 @@ lnet_get_tunables (void)
 #else
 
 int
-lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, time_t when)
+lnet_notify (lnet_ni_t *ni, lnet_nid_t nid, int alive, cfs_time_t when)
 {
         return -EOPNOTSUPP;
 }
@@ -1181,22 +1859,21 @@ lnet_router_checker (void)
         if (last != 0 &&
             interval > MAX(live_router_check_interval,
                            dead_router_check_interval))
-                CDEBUG(D_NETERROR, "Checker(%d/%d) not called for %d seconds\n",
-                       live_router_check_interval, dead_router_check_interval,
-                       interval);
+                CNETERR("Checker(%d/%d) not called for %d seconds\n",
+                        live_router_check_interval, dead_router_check_interval,
+                        interval);
 
-        LNET_LOCK();
-        LASSERT (!running); /* recursion check */
-        running = 1;
-        LNET_UNLOCK();
+       LASSERT(LNET_CPT_NUMBER == 1);
 
-        last = now;
+       lnet_net_lock(0);
+       LASSERT(!running); /* recursion check */
+       running = 1;
+       lnet_net_unlock(0);
 
-        if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPTHREAD) {
-                the_lnet.ln_rc_state = LNET_RC_STATE_UNLINKING;
-                rc = LNetMDUnlink(the_lnet.ln_rc_mdh);
-                LASSERT (rc == 0);
-        }
+       last = now;
+
+       if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING)
+               lnet_prune_rc_data(0); /* unlink all rcd and nowait */
 
         /* consume all pending events */
         while (1) {
@@ -1217,31 +1894,30 @@ lnet_router_checker (void)
 
                 LASSERT (rc == 1);
 
-                LNET_LOCK();
                 lnet_router_checker_event(&ev);
-                LNET_UNLOCK();
         }
 
-        if (the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKED ||
-            the_lnet.ln_rc_state == LNET_RC_STATE_UNLINKING) {
+       if (the_lnet.ln_rc_state == LNET_RC_STATE_STOPPING) {
+               lnet_prune_rc_data(1); /* release rcd */
+               the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
                 running = 0;
                 return;
         }
 
         LASSERT (the_lnet.ln_rc_state == LNET_RC_STATE_RUNNING);
 
-        LNET_LOCK();
+       lnet_net_lock(0);
 
-        version = the_lnet.ln_routers_version;
-        list_for_each_entry (rtr, &the_lnet.ln_routers, lp_rtr_list) {
-                lnet_ping_router_locked(rtr);
-                LASSERT (version == the_lnet.ln_routers_version);
-        }
+       version = the_lnet.ln_routers_version;
+       list_for_each_entry(rtr, &the_lnet.ln_routers, lp_rtr_list) {
+               lnet_ping_router_locked(rtr);
+               LASSERT(version == the_lnet.ln_routers_version);
+       }
 
-        LNET_UNLOCK();
+       lnet_net_unlock(0);
 
-        running = 0; /* lock only needed for the recursion check */
-        return;
+       running = 0; /* lock only needed for the recursion check */
+       return;
 }
 
 /* NB lnet_peers_start_down depends on me,
@@ -1267,17 +1943,12 @@ lnet_get_tunables (void)
 }
 
 void
-lnet_free_rtrpools (void)
-{
-}
-
-void
-lnet_init_rtrpools (void)
+lnet_rtrpools_free(int keep_pools)
 {
 }
 
 int
-lnet_alloc_rtrpools (int im_a_arouter)
+lnet_rtrpools_alloc(int im_a_arouter)
 {
         return 0;
 }