Whamcloud - gitweb
LU-12739 lnet: Don't queue msg when discovery has completed
[fs/lustre-release.git] / lnet / lnet / lib-move.c
index a079c86..88f20e7 100644 (file)
@@ -42,6 +42,8 @@
 #include <linux/nsproxy.h>
 #include <net/net_namespace.h>
 
+extern unsigned int lnet_current_net_count;
+
 static int local_nid_dist_zero = 1;
 module_param(local_nid_dist_zero, int, 0444);
 MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
@@ -794,12 +796,32 @@ lnet_ni_eager_recv(struct lnet_ni *ni, struct lnet_msg *msg)
        return rc;
 }
 
+static bool
+lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
+{
+       time64_t deadline;
+
+       deadline = lpni->lpni_last_alive +
+                  lpni->lpni_net->net_tunables.lct_peer_timeout;
+
+       /*
+        * assume peer_ni is alive as long as we're within the configured
+        * peer timeout
+        */
+       if (deadline > now)
+               return false;
+
+       return true;
+}
+
 /* NB: returns 1 when alive, 0 when dead, negative when error;
  *     may drop the lnet_net_lock */
 static int
 lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
                       struct lnet_msg *msg)
 {
+       time64_t now = ktime_get_seconds();
+
        if (!lnet_peer_aliveness_enabled(lpni))
                return -ENODEV;
 
@@ -819,6 +841,9 @@ lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
            msg->msg_type == LNET_MSG_REPLY)
                return 1;
 
+       if (!lnet_is_peer_deadline_passed(lpni, now))
+               return true;
+
        return lnet_is_peer_ni_alive(lpni);
 }
 
@@ -1294,7 +1319,6 @@ routing_off:
        }
 }
 
-#if 0
 static int
 lnet_compare_peers(struct lnet_peer_ni *p1, struct lnet_peer_ni *p2)
 {
@@ -1312,53 +1336,197 @@ lnet_compare_peers(struct lnet_peer_ni *p1, struct lnet_peer_ni *p2)
 
        return 0;
 }
-#endif
+
+static struct lnet_peer_ni *
+lnet_select_peer_ni(struct lnet_ni *best_ni, lnet_nid_t dst_nid,
+                   struct lnet_peer *peer,
+                   struct lnet_peer_net *peer_net)
+{
+       /*
+        * Look at the peer NIs for the destination peer that connect
+        * to the chosen net. If a peer_ni is preferred when using the
+        * best_ni to communicate, we use that one. If there is no
+        * preferred peer_ni, or there are multiple preferred peer_ni,
+        * the available transmit credits are used. If the transmit
+        * credits are equal, we round-robin over the peer_ni.
+        */
+       struct lnet_peer_ni *lpni = NULL;
+       struct lnet_peer_ni *best_lpni = NULL;
+       int best_lpni_credits = INT_MIN;
+       bool preferred = false;
+       bool ni_is_pref;
+       int best_lpni_healthv = 0;
+       int lpni_healthv;
+
+       while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
+               /*
+                * if the best_ni we've chosen aleady has this lpni
+                * preferred, then let's use it
+                */
+               if (best_ni) {
+                       ni_is_pref = lnet_peer_is_pref_nid_locked(lpni,
+                                                               best_ni->ni_nid);
+                       CDEBUG(D_NET, "%s ni_is_pref = %d\n",
+                              libcfs_nid2str(best_ni->ni_nid), ni_is_pref);
+               } else {
+                       ni_is_pref = false;
+               }
+
+               lpni_healthv = atomic_read(&lpni->lpni_healthv);
+
+               if (best_lpni)
+                       CDEBUG(D_NET, "%s c:[%d, %d], s:[%d, %d]\n",
+                               libcfs_nid2str(lpni->lpni_nid),
+                               lpni->lpni_txcredits, best_lpni_credits,
+                               lpni->lpni_seq, best_lpni->lpni_seq);
+
+               /* pick the healthiest peer ni */
+               if (lpni_healthv < best_lpni_healthv) {
+                       continue;
+               } else if (lpni_healthv > best_lpni_healthv) {
+                       best_lpni_healthv = lpni_healthv;
+               /* if this is a preferred peer use it */
+               } else if (!preferred && ni_is_pref) {
+                       preferred = true;
+               } else if (preferred && !ni_is_pref) {
+                       /*
+                        * this is not the preferred peer so let's ignore
+                        * it.
+                        */
+                       continue;
+               } else if (lpni->lpni_txcredits < best_lpni_credits) {
+                       /*
+                        * We already have a peer that has more credits
+                        * available than this one. No need to consider
+                        * this peer further.
+                        */
+                       continue;
+               } else if (lpni->lpni_txcredits == best_lpni_credits) {
+                       /*
+                        * The best peer found so far and the current peer
+                        * have the same number of available credits let's
+                        * make sure to select between them using Round
+                        * Robin
+                        */
+                       if (best_lpni) {
+                               if (best_lpni->lpni_seq <= lpni->lpni_seq)
+                                       continue;
+                       }
+               }
+
+               best_lpni = lpni;
+               best_lpni_credits = lpni->lpni_txcredits;
+       }
+
+       /* if we still can't find a peer ni then we can't reach it */
+       if (!best_lpni) {
+               __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
+                       LNET_NIDNET(dst_nid);
+               CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
+                               libcfs_net2str(net_id));
+               return NULL;
+       }
+
+       CDEBUG(D_NET, "sd_best_lpni = %s\n",
+              libcfs_nid2str(best_lpni->lpni_nid));
+
+       return best_lpni;
+}
+
+/*
+ * Prerequisite: the best_ni should already be set in the sd
+ */
+static inline struct lnet_peer_ni *
+lnet_find_best_lpni_on_net(struct lnet_send_data *sd, struct lnet_peer *peer,
+                          __u32 net_id)
+{
+       struct lnet_peer_net *peer_net;
+
+       /*
+        * The gateway is Multi-Rail capable so now we must select the
+        * proper peer_ni
+        */
+       peer_net = lnet_peer_get_net_locked(peer, net_id);
+
+       if (!peer_net) {
+               CERROR("gateway peer %s has no NI on net %s\n",
+                      libcfs_nid2str(peer->lp_primary_nid),
+                      libcfs_net2str(net_id));
+               return NULL;
+       }
+
+       return lnet_select_peer_ni(sd->sd_best_ni, sd->sd_dst_nid,
+                                  peer, peer_net);
+}
 
 static int
-lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
+lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2,
+                   struct lnet_peer_ni **best_lpni)
 {
-       /* TODO re-implement gateway comparison
-       struct lnet_peer_ni *p1 = r1->lr_gateway;
-       struct lnet_peer_ni *p2 = r2->lr_gateway;
-       */
        int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
        int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
-       /*int rc;*/
+       struct lnet_peer *lp1 = r1->lr_gateway;
+       struct lnet_peer *lp2 = r2->lr_gateway;
+       struct lnet_peer_ni *lpni1;
+       struct lnet_peer_ni *lpni2;
+       struct lnet_send_data sd;
+       int rc;
+
+       sd.sd_best_ni = NULL;
+       sd.sd_dst_nid = LNET_NID_ANY;
+       lpni1 = lnet_find_best_lpni_on_net(&sd, lp1, r1->lr_lnet);
+       lpni2 = lnet_find_best_lpni_on_net(&sd, lp2, r2->lr_lnet);
+       LASSERT(lpni1 && lpni2);
 
-       if (r1->lr_priority < r2->lr_priority)
+       if (r1->lr_priority < r2->lr_priority) {
+               *best_lpni = lpni1;
                return 1;
+       }
 
-       if (r1->lr_priority > r2->lr_priority)
+       if (r1->lr_priority > r2->lr_priority) {
+               *best_lpni = lpni2;
                return -1;
+       }
 
-       if (r1_hops < r2_hops)
+       if (r1_hops < r2_hops) {
+               *best_lpni = lpni1;
                return 1;
+       }
 
-       if (r1_hops > r2_hops)
+       if (r1_hops > r2_hops) {
+               *best_lpni = lpni2;
                return -1;
+       }
 
-       /*
-       rc = lnet_compare_peers(p1, p2);
-       if (rc)
+       rc = lnet_compare_peers(lpni1, lpni2);
+       if (rc == 1) {
+               *best_lpni = lpni1;
                return rc;
-       */
+       } else if (rc == -1) {
+               *best_lpni = lpni2;
+               return rc;
+       }
 
-       if (r1->lr_seq - r2->lr_seq <= 0)
+       if (r1->lr_seq - r2->lr_seq <= 0) {
+               *best_lpni = lpni1;
                return 1;
+       }
 
+       *best_lpni = lpni2;
        return -1;
 }
 
-/* TODO: lnet_find_route_locked() needs to be reimplemented */
 static struct lnet_route *
 lnet_find_route_locked(struct lnet_net *net, __u32 remote_net,
-                      lnet_nid_t rtr_nid, struct lnet_route **prev_route)
+                      lnet_nid_t rtr_nid, struct lnet_route **prev_route,
+                      struct lnet_peer_ni **gwni)
 {
-       struct lnet_remotenet *rnet;
-       struct lnet_route *route;
+       struct lnet_peer_ni *best_gw_ni = NULL;
        struct lnet_route *best_route;
        struct lnet_route *last_route;
+       struct lnet_remotenet *rnet;
        struct lnet_peer *lp_best;
+       struct lnet_route *route;
        struct lnet_peer *lp;
        int rc;
 
@@ -1380,14 +1548,13 @@ lnet_find_route_locked(struct lnet_net *net, __u32 remote_net,
                if (lp_best == NULL) {
                        best_route = last_route = route;
                        lp_best = lp;
-                       continue;
                }
 
                /* no protection on below fields, but it's harmless */
                if (last_route->lr_seq - route->lr_seq < 0)
                        last_route = route;
 
-               rc = lnet_compare_routes(route, best_route);
+               rc = lnet_compare_routes(route, best_route, &best_gw_ni);
                if (rc < 0)
                        continue;
 
@@ -1396,6 +1563,7 @@ lnet_find_route_locked(struct lnet_net *net, __u32 remote_net,
        }
 
        *prev_route = last_route;
+       *gwni = best_gw_ni;
 
        return best_route;
 }
@@ -1693,125 +1861,6 @@ lnet_handle_send(struct lnet_send_data *sd)
        return rc;
 }
 
-static struct lnet_peer_ni *
-lnet_select_peer_ni(struct lnet_send_data *sd, struct lnet_peer *peer,
-                   struct lnet_peer_net *peer_net)
-{
-       /*
-        * Look at the peer NIs for the destination peer that connect
-        * to the chosen net. If a peer_ni is preferred when using the
-        * best_ni to communicate, we use that one. If there is no
-        * preferred peer_ni, or there are multiple preferred peer_ni,
-        * the available transmit credits are used. If the transmit
-        * credits are equal, we round-robin over the peer_ni.
-        */
-       struct lnet_peer_ni *lpni = NULL;
-       struct lnet_peer_ni *best_lpni = NULL;
-       struct lnet_ni *best_ni = sd->sd_best_ni;
-       lnet_nid_t dst_nid = sd->sd_dst_nid;
-       int best_lpni_credits = INT_MIN;
-       bool preferred = false;
-       bool ni_is_pref;
-       int best_lpni_healthv = 0;
-       int lpni_healthv;
-
-       while ((lpni = lnet_get_next_peer_ni_locked(peer, peer_net, lpni))) {
-               /*
-                * if the best_ni we've chosen aleady has this lpni
-                * preferred, then let's use it
-                */
-               ni_is_pref = lnet_peer_is_pref_nid_locked(lpni,
-                                                         best_ni->ni_nid);
-
-               lpni_healthv = atomic_read(&lpni->lpni_healthv);
-
-               CDEBUG(D_NET, "%s ni_is_pref = %d\n",
-                      libcfs_nid2str(best_ni->ni_nid), ni_is_pref);
-
-               if (best_lpni)
-                       CDEBUG(D_NET, "%s c:[%d, %d], s:[%d, %d]\n",
-                               libcfs_nid2str(lpni->lpni_nid),
-                               lpni->lpni_txcredits, best_lpni_credits,
-                               lpni->lpni_seq, best_lpni->lpni_seq);
-
-               /* pick the healthiest peer ni */
-               if (lpni_healthv < best_lpni_healthv) {
-                       continue;
-               } else if (lpni_healthv > best_lpni_healthv) {
-                       best_lpni_healthv = lpni_healthv;
-               /* if this is a preferred peer use it */
-               } else if (!preferred && ni_is_pref) {
-                       preferred = true;
-               } else if (preferred && !ni_is_pref) {
-                       /*
-                        * this is not the preferred peer so let's ignore
-                        * it.
-                        */
-                       continue;
-               } else if (lpni->lpni_txcredits < best_lpni_credits) {
-                       /*
-                        * We already have a peer that has more credits
-                        * available than this one. No need to consider
-                        * this peer further.
-                        */
-                       continue;
-               } else if (lpni->lpni_txcredits == best_lpni_credits) {
-                       /*
-                        * The best peer found so far and the current peer
-                        * have the same number of available credits let's
-                        * make sure to select between them using Round
-                        * Robin
-                        */
-                       if (best_lpni) {
-                               if (best_lpni->lpni_seq <= lpni->lpni_seq)
-                                       continue;
-                       }
-               }
-
-               best_lpni = lpni;
-               best_lpni_credits = lpni->lpni_txcredits;
-       }
-
-       /* if we still can't find a peer ni then we can't reach it */
-       if (!best_lpni) {
-               __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
-                       LNET_NIDNET(dst_nid);
-               CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
-                               libcfs_net2str(net_id));
-               return NULL;
-       }
-
-       CDEBUG(D_NET, "sd_best_lpni = %s\n",
-              libcfs_nid2str(best_lpni->lpni_nid));
-
-       return best_lpni;
-}
-
-/*
- * Prerequisite: the best_ni should already be set in the sd
- */
-static inline struct lnet_peer_ni *
-lnet_find_best_lpni_on_net(struct lnet_send_data *sd, struct lnet_peer *peer,
-                          __u32 net_id)
-{
-       struct lnet_peer_net *peer_net;
-
-       /*
-        * The gateway is Multi-Rail capable so now we must select the
-        * proper peer_ni
-        */
-       peer_net = lnet_peer_get_net_locked(peer, net_id);
-
-       if (!peer_net) {
-               CERROR("gateway peer %s has no NI on net %s\n",
-                      libcfs_nid2str(peer->lp_primary_nid),
-                      libcfs_net2str(net_id));
-               return NULL;
-       }
-
-       return lnet_select_peer_ni(sd, peer, peer_net);
-}
-
 static inline void
 lnet_set_non_mr_pref_nid(struct lnet_send_data *sd)
 {
@@ -1948,6 +1997,11 @@ lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni,
                return 0;
        }
 
+       if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
+               lnet_peer_ni_decref_locked(lpni);
+               return 0;
+       }
+
        rc = lnet_discover_peer_locked(lpni, cpt, false);
        if (rc) {
                lnet_peer_ni_decref_locked(lpni);
@@ -1955,15 +2009,21 @@ lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni,
        }
        /* The peer may have changed. */
        peer = lpni->lpni_peer_net->lpn_peer;
+       spin_lock(&peer->lp_lock);
+       if (lnet_peer_is_uptodate_locked(peer)) {
+               spin_unlock(&peer->lp_lock);
+               lnet_peer_ni_decref_locked(lpni);
+               return 0;
+       }
        /* queue message and return */
        msg->msg_rtr_nid_param = rtr_nid;
        msg->msg_sending = 0;
        msg->msg_txpeer = NULL;
-       spin_lock(&peer->lp_lock);
        list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
+       primary_nid = peer->lp_primary_nid;
        spin_unlock(&peer->lp_lock);
+
        lnet_peer_ni_decref_locked(lpni);
-       primary_nid = peer->lp_primary_nid;
 
        CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
                msg, libcfs_nid2str(primary_nid));
@@ -1977,37 +2037,80 @@ lnet_handle_find_routed_path(struct lnet_send_data *sd,
                             struct lnet_peer_ni **gw_lpni,
                             struct lnet_peer **gw_peer)
 {
+       int rc;
        struct lnet_peer *gw;
+       struct lnet_peer *lp;
+       struct lnet_peer_net *lpn;
+       struct lnet_peer_net *best_lpn = NULL;
+       struct lnet_remotenet *rnet;
        struct lnet_route *best_route;
        struct lnet_route *last_route;
        struct lnet_peer_ni *lpni = NULL;
+       struct lnet_peer_ni *gwni = NULL;
        lnet_nid_t src_nid = sd->sd_src_nid;
 
-       best_route = lnet_find_route_locked(NULL, LNET_NIDNET(dst_nid),
-                                           sd->sd_rtr_nid, &last_route);
+       /* we've already looked up the initial lpni using dst_nid */
+       lpni = sd->sd_best_lpni;
+       /* the peer tree must be in existence */
+       LASSERT(lpni && lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer);
+       lp = lpni->lpni_peer_net->lpn_peer;
+
+       list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
+               /* is this remote network reachable?  */
+               rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
+               if (!rnet)
+                       continue;
+
+               if (!best_lpn)
+                       best_lpn = lpn;
+
+               if (best_lpn->lpn_seq <= lpn->lpn_seq)
+                       continue;
+
+               best_lpn = lpn;
+       }
+
+       if (!best_lpn) {
+               CERROR("peer %s has no available nets \n",
+                      libcfs_nid2str(sd->sd_dst_nid));
+               return -EHOSTUNREACH;
+       }
+
+       sd->sd_best_lpni = lnet_find_best_lpni_on_net(sd, lp, best_lpn->lpn_net_id);
+       if (!sd->sd_best_lpni) {
+               CERROR("peer %s down\n", libcfs_nid2str(sd->sd_dst_nid));
+               return -EHOSTUNREACH;
+       }
+
+       best_route = lnet_find_route_locked(NULL, best_lpn->lpn_net_id,
+                                           sd->sd_rtr_nid, &last_route,
+                                           &gwni);
        if (!best_route) {
                CERROR("no route to %s from %s\n",
                       libcfs_nid2str(dst_nid), libcfs_nid2str(src_nid));
                return -EHOSTUNREACH;
        }
 
+       if (!gwni) {
+               CERROR("Internal Error. Route expected to %s from %s\n",
+                       libcfs_nid2str(dst_nid),
+                       libcfs_nid2str(src_nid));
+               return -EFAULT;
+       }
+
        gw = best_route->lr_gateway;
-       *gw_peer = gw;
+       LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
 
        /*
         * Discover this gateway if it hasn't already been discovered.
         * This means we might delay the message until discovery has
         * completed
         */
-#if 0
-       /* TODO: disable discovey for now */
-       if (lnet_msg_discovery(sd->sd_msg) &&
-           !lnet_peer_is_uptodate(*gw_peer)) {
-               sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
-               return lnet_initiate_peer_discovery(gw, sd->sd_msg,
-                                                   sd->sd_rtr_nid, sd->sd_cpt);
-       }
-#endif
+       sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
+       rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_rtr_nid,
+                                         sd->sd_cpt);
+       if (rc)
+               return rc;
 
        if (!sd->sd_best_ni)
                sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
@@ -2024,50 +2127,16 @@ lnet_handle_find_routed_path(struct lnet_send_data *sd,
                return -EFAULT;
        }
 
-       /*
-        * if gw is MR let's find its best peer_ni
-        */
-       if (lnet_peer_is_multi_rail(gw)) {
-               lpni = lnet_find_best_lpni_on_net(sd, gw,
-                               sd->sd_best_ni->ni_net->net_id);
-               /*
-                * We've already verified that the gw has an NI on that
-                * desired net, but we're not finding it. Something is
-                * wrong.
-                */
-               if (!lpni) {
-                       CERROR("Internal Error. Route expected to %s from %s\n",
-                               libcfs_nid2str(dst_nid),
-                               libcfs_nid2str(src_nid));
-                       return -EFAULT;
-               }
-       } else {
-               struct lnet_peer_net *lpn;
-               lpn = lnet_peer_get_net_locked(gw, best_route->lr_lnet);
-               if (!lpn) {
-                       CERROR("Internal Error. Route expected to %s from %s\n",
-                               libcfs_nid2str(dst_nid),
-                               libcfs_nid2str(src_nid));
-                       return -EFAULT;
-               }
-               lpni = list_entry(lpn->lpn_peer_nis.next, struct lnet_peer_ni,
-                                 lpni_peer_nis);
-               if (!lpni) {
-                       CERROR("Internal Error. Route expected to %s from %s\n",
-                               libcfs_nid2str(dst_nid),
-                               libcfs_nid2str(src_nid));
-                       return -EFAULT;
-               }
-       }
-
-       *gw_lpni = lpni;
+       *gw_lpni = gwni;
+       *gw_peer = gw;
 
        /*
-        * increment the route sequence number since now we're sure we're
-        * going to use it
+        * increment the sequence numbers since now we're sure we're
+        * going to use this path
         */
        LASSERT(best_route && last_route);
        best_route->lr_seq = last_route->lr_seq + 1;
+       best_lpn->lpn_seq++;
 
        return 0;
 }
@@ -2127,7 +2196,8 @@ lnet_handle_spec_router_dst(struct lnet_send_data *sd)
 }
 
 struct lnet_ni *
-lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt)
+lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
+                              bool discovery)
 {
        struct lnet_peer_net *peer_net = NULL;
        struct lnet_ni *best_ni = NULL;
@@ -2149,6 +2219,14 @@ lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt)
                        continue;
                best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer,
                                                   peer_net, md_cpt, false);
+
+               /*
+                * if this is a discovery message and lp_disc_net_id is
+                * specified then use that net to send the discovery on.
+                */
+               if (peer->lp_disc_net_id == peer_net->lpn_net_id &&
+                   discovery)
+                       break;
        }
 
        if (best_ni)
@@ -2318,7 +2396,8 @@ lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
         * networks.
         */
        sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
-                                                       sd->sd_md_cpt);
+                                       sd->sd_md_cpt,
+                                       lnet_msg_discovery(sd->sd_msg));
        if (sd->sd_best_ni) {
                sd->sd_best_lpni =
                  lnet_find_best_lpni_on_net(sd, sd->sd_peer,
@@ -2428,11 +2507,11 @@ lnet_handle_any_mr_dst(struct lnet_send_data *sd)
                return rc;
 
        /*
-        * TODO; One possible enhancement is to run the selection
-        * algorithm on the peer. However for remote peers the credits are
-        * not decremented, so we'll be basically going over the peer NIs
-        * in round robin. An MR router will run the selection algorithm
-        * on the next-hop interfaces.
+        * Now that we must route to the destination, we must consider the
+        * MR case, where the destination has multiple interfaces, some of
+        * which we can route to and others we do not. For this reason we
+        * need to select the destination which we can route to and if
+        * there are multiple, we need to round robin.
         */
        rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
                                          &gw_peer);
@@ -2598,19 +2677,20 @@ again:
        msg->msg_src_nid_param = src_nid;
 
        /*
-        * Now that we have a peer_ni, check if we want to discover
-        * the peer. Traffic to the LNET_RESERVED_PORTAL should not
-        * trigger discovery.
+        * If necessary, perform discovery on the peer that owns this peer_ni.
+        * Note, this can result in the ownership of this peer_ni changing
+        * to another peer object.
         */
-       peer = lpni->lpni_peer_net->lpn_peer;
-       if (lnet_msg_discovery(msg) && !lnet_peer_is_uptodate(peer)) {
-               rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
+       rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
+       if (rc) {
                lnet_peer_ni_decref_locked(lpni);
                lnet_net_unlock(cpt);
                return rc;
        }
        lnet_peer_ni_decref_locked(lpni);
 
+       peer = lpni->lpni_peer_net->lpn_peer;
+
        /*
         * Identify the different send cases
         */
@@ -2691,8 +2771,13 @@ lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
        LASSERT(!msg->msg_tx_committed);
 
        rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
-       if (rc < 0)
+       if (rc < 0) {
+               if (rc == -EHOSTUNREACH)
+                       msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
+               else
+                       msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
                return rc;
+       }
 
        if (rc == LNET_CREDIT_OK)
                lnet_ni_send(msg->msg_txni, msg);
@@ -2726,25 +2811,57 @@ lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
                return;
 
        rspt = md->md_rspt_ptr;
-       md->md_rspt_ptr = NULL;
 
        /* debug code */
        LASSERT(rspt->rspt_cpt == cpt);
 
-       /*
-        * invalidate the handle to indicate that a response has been
-        * received, which will then lead the monitor thread to clean up
-        * the rspt block.
-        */
-       LNetInvalidateMDHandle(&rspt->rspt_mdh);
+       md->md_rspt_ptr = NULL;
+
+       if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
+               /*
+                * The monitor thread has invalidated this handle because the
+                * response timed out, but it failed to lookup the MD. That
+                * means this response tracker is on the zombie list. We can
+                * safely remove it under the resource lock (held by caller) and
+                * free the response tracker block.
+                */
+               list_del(&rspt->rspt_on_list);
+               lnet_rspt_free(rspt, cpt);
+       } else {
+               /*
+                * invalidate the handle to indicate that a response has been
+                * received, which will then lead the monitor thread to clean up
+                * the rspt block.
+                */
+               LNetInvalidateMDHandle(&rspt->rspt_mdh);
+       }
+}
+
+void
+lnet_clean_zombie_rstqs(void)
+{
+       struct lnet_rsp_tracker *rspt, *tmp;
+       int i;
+
+       cfs_cpt_for_each(i, lnet_cpt_table()) {
+               list_for_each_entry_safe(rspt, tmp,
+                                        the_lnet.ln_mt_zombie_rstqs[i],
+                                        rspt_on_list) {
+                       list_del(&rspt->rspt_on_list);
+                       lnet_rspt_free(rspt, i);
+               }
+       }
+
+       cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
 }
 
 static void
-lnet_finalize_expired_responses(bool force)
+lnet_finalize_expired_responses(void)
 {
        struct lnet_libmd *md;
        struct list_head local_queue;
        struct lnet_rsp_tracker *rspt, *tmp;
+       ktime_t now;
        int i;
 
        if (the_lnet.ln_mt_rstq == NULL)
@@ -2761,6 +2878,8 @@ lnet_finalize_expired_responses(bool force)
                list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
                lnet_net_unlock(i);
 
+               now = ktime_get();
+
                list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
                        /*
                         * The rspt mdh will be invalidated when a response
@@ -2776,41 +2895,74 @@ lnet_finalize_expired_responses(bool force)
                        lnet_res_lock(i);
                        if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
                                lnet_res_unlock(i);
-                               list_del_init(&rspt->rspt_on_list);
+                               list_del(&rspt->rspt_on_list);
                                lnet_rspt_free(rspt, i);
                                continue;
                        }
 
-                       if (ktime_compare(ktime_get(), rspt->rspt_deadline) >= 0 ||
-                           force) {
+                       if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
+                           the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
                                struct lnet_peer_ni *lpni;
                                lnet_nid_t nid;
 
                                md = lnet_handle2md(&rspt->rspt_mdh);
                                if (!md) {
+                                       /* MD has been queued for unlink, but
+                                        * rspt hasn't been detached (Note we've
+                                        * checked above that the rspt_mdh is
+                                        * valid). Since we cannot lookup the MD
+                                        * we're unable to detach the rspt
+                                        * ourselves. Thus, move the rspt to the
+                                        * zombie list where we'll wait for
+                                        * either:
+                                        *   1. The remaining operations on the
+                                        *   MD to complete. In this case the
+                                        *   final operation will result in
+                                        *   lnet_msg_detach_md()->
+                                        *   lnet_detach_rsp_tracker() where
+                                        *   we will clean up this response
+                                        *   tracker.
+                                        *   2. LNet to shutdown. In this case
+                                        *   we'll wait until after all LND Nets
+                                        *   have shutdown and then we can
+                                        *   safely free any remaining response
+                                        *   tracker blocks on the zombie list.
+                                        * Note: We need to hold the resource
+                                        * lock when adding to the zombie list
+                                        * because we may have concurrent access
+                                        * with lnet_detach_rsp_tracker().
+                                        */
                                        LNetInvalidateMDHandle(&rspt->rspt_mdh);
+                                       list_move(&rspt->rspt_on_list,
+                                                 the_lnet.ln_mt_zombie_rstqs[i]);
                                        lnet_res_unlock(i);
-                                       list_del_init(&rspt->rspt_on_list);
-                                       lnet_rspt_free(rspt, i);
                                        continue;
                                }
                                LASSERT(md->md_rspt_ptr == rspt);
                                md->md_rspt_ptr = NULL;
                                lnet_res_unlock(i);
 
-                               lnet_net_lock(i);
-                               the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
-                               lnet_net_unlock(i);
-
-                               list_del_init(&rspt->rspt_on_list);
+                               LNetMDUnlink(rspt->rspt_mdh);
 
                                nid = rspt->rspt_next_hop_nid;
 
-                               CNETERR("Response timed out: md = %p: nid = %s\n",
-                                       md, libcfs_nid2str(nid));
-                               LNetMDUnlink(rspt->rspt_mdh);
+                               list_del(&rspt->rspt_on_list);
                                lnet_rspt_free(rspt, i);
 
+                               /* If we're shutting down we just want to clean
+                                * up the rspt blocks
+                                */
+                               if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
+                                       continue;
+
+                               lnet_net_lock(i);
+                               the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
+                               lnet_net_unlock(i);
+
+                               CDEBUG(D_NET,
+                                      "Response timeout: md = %p: nid = %s\n",
+                                      md, libcfs_nid2str(nid));
+
                                /*
                                 * If there is a timeout on the response
                                 * from the next hop decrement its health
@@ -2829,10 +2981,11 @@ lnet_finalize_expired_responses(bool force)
                        }
                }
 
-               lnet_net_lock(i);
-               if (!list_empty(&local_queue))
+               if (!list_empty(&local_queue)) {
+                       lnet_net_lock(i);
                        list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
-               lnet_net_unlock(i);
+                       lnet_net_unlock(i);
+               }
        }
 }
 
@@ -3105,26 +3258,6 @@ lnet_recover_local_nis(void)
        lnet_net_unlock(0);
 }
 
-static struct list_head **
-lnet_create_array_of_queues(void)
-{
-       struct list_head **qs;
-       struct list_head *q;
-       int i;
-
-       qs = cfs_percpt_alloc(lnet_cpt_table(),
-                             sizeof(struct list_head));
-       if (!qs) {
-               CERROR("Failed to allocate queues\n");
-               return NULL;
-       }
-
-       cfs_percpt_for_each(q, i, qs)
-               INIT_LIST_HEAD(q);
-
-       return qs;
-}
-
 static int
 lnet_resendqs_create(void)
 {
@@ -3367,6 +3500,7 @@ lnet_monitor_thread(void *arg)
        int interval;
        time64_t now;
 
+       wait_for_completion(&the_lnet.ln_started);
        /*
         * The monitor thread takes care of the following:
         *  1. Checks the aliveness of routers
@@ -3388,7 +3522,7 @@ lnet_monitor_thread(void *arg)
                lnet_resend_pending_msgs();
 
                if (now >= rsp_timeout) {
-                       lnet_finalize_expired_responses(false);
+                       lnet_finalize_expired_responses();
                        rsp_timeout = now + (lnet_transaction_timeout / 2);
                }
 
@@ -3406,12 +3540,22 @@ lnet_monitor_thread(void *arg)
                 * if we wake up every 1 second? Although, we've seen
                 * cases where we get a complaint that an idle thread
                 * is waking up unnecessarily.
+                *
+                * Take into account the current net_count when you wake
+                * up for alive router checking, since we need to check
+                * possibly as many networks as we have configured.
                 */
                interval = min(lnet_recovery_interval,
-                              lnet_transaction_timeout / 2);
-               wait_event_interruptible_timeout(the_lnet.ln_mt_waitq,
-                                               false,
-                                               cfs_time_seconds(interval));
+                              min((unsigned int) alive_router_check_interval /
+                                       lnet_current_net_count,
+                                  lnet_transaction_timeout / 2));
+               wait_for_completion_interruptible_timeout(
+                       &the_lnet.ln_mt_wait_complete,
+                       cfs_time_seconds(interval));
+               /* Must re-init the completion before testing anything,
+                * including ln_mt_state.
+                */
+               reinit_completion(&the_lnet.ln_mt_wait_complete);
        }
 
        /* Shutting down */
@@ -3566,6 +3710,7 @@ lnet_mt_event_handler(struct lnet_event *event)
        case LNET_EVENT_UNLINK:
                CDEBUG(D_NET, "%s recovery ping unlinked\n",
                       libcfs_nid2str(ev_info->mt_nid));
+               /* fallthrough */
        case LNET_EVENT_REPLY:
                lnet_handle_recovery_reply(ev_info, event->status,
                                           event->type == LNET_EVENT_UNLINK);
@@ -3604,7 +3749,7 @@ lnet_rsp_tracker_create(void)
 static void
 lnet_rsp_tracker_clean(void)
 {
-       lnet_finalize_expired_responses(true);
+       lnet_finalize_expired_responses();
 
        cfs_percpt_free(the_lnet.ln_mt_rstq);
        the_lnet.ln_mt_rstq = NULL;
@@ -3675,7 +3820,7 @@ void lnet_monitor_thr_stop(void)
        lnet_net_unlock(LNET_LOCK_EX);
 
        /* tell the monitor thread that we're shutting down */
-       wake_up(&the_lnet.ln_mt_waitq);
+       complete(&the_lnet.ln_mt_wait_complete);
 
        /* block until monitor thread signals that it's done */
        down(&the_lnet.ln_mt_signal);
@@ -4109,16 +4254,17 @@ int
 lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
           void *private, int rdma_req)
 {
-       int             rc = 0;
-       int             cpt;
-       int             for_me;
-       struct lnet_msg *msg;
-       lnet_pid_t     dest_pid;
-       lnet_nid_t     dest_nid;
-       lnet_nid_t     src_nid;
        struct lnet_peer_ni *lpni;
-       __u32          payload_length;
-       __u32          type;
+       struct lnet_msg *msg;
+       __u32 payload_length;
+       lnet_pid_t dest_pid;
+       lnet_nid_t dest_nid;
+       lnet_nid_t src_nid;
+       bool push = false;
+       int for_me;
+       __u32 type;
+       int rc = 0;
+       int cpt;
 
        LASSERT (!in_interrupt ());
 
@@ -4173,16 +4319,22 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
        }
 
        if (the_lnet.ln_routing &&
-           ni->ni_last_alive != ktime_get_real_seconds()) {
-               /* NB: so far here is the only place to set NI status to "up */
+           ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
                lnet_ni_lock(ni);
-               ni->ni_last_alive = ktime_get_real_seconds();
+               spin_lock(&ni->ni_net->net_lock);
+               ni->ni_net->net_last_alive = ktime_get_real_seconds();
+               spin_unlock(&ni->ni_net->net_lock);
                if (ni->ni_status != NULL &&
-                   ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
+                   ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) {
                        ni->ni_status->ns_status = LNET_NI_STATUS_UP;
+                       push = true;
+               }
                lnet_ni_unlock(ni);
        }
 
+       if (push)
+               lnet_push_update_to_peers(1);
+
        /* Regard a bad destination NID as a protocol error.  Senders should
         * know what they're doing; if they don't they're misconfigured, buggy
         * or malicious so we chop them off at the knees :) */
@@ -4240,7 +4392,7 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
        }
 
        if (!list_empty(&the_lnet.ln_drop_rules) &&
-           lnet_drop_rule_match(hdr, NULL)) {
+           lnet_drop_rule_match(hdr, ni->ni_nid, NULL)) {
                CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate"
                              "silent message loss\n",
                       libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
@@ -4352,6 +4504,10 @@ lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
                        return 0;
                goto drop;
        }
+
+       if (the_lnet.ln_routing)
+               lpni->lpni_last_alive = ktime_get_seconds();
+
        msg->msg_rxpeer = lpni;
        msg->msg_rxni = ni;
        lnet_ni_addref_locked(ni, cpt);