+ best_lpni = lpni;
+ best_lpni_credits = lpni->lpni_txcredits;
+ }
+
+ /* if we still can't find a peer ni then we can't reach it */
+ if (!best_lpni) {
+ __u32 net_id = (peer_net) ? peer_net->lpn_net_id :
+ LNET_NIDNET(dst_nid);
+ CDEBUG(D_NET, "no peer_ni found on peer net %s\n",
+ libcfs_net2str(net_id));
+ return NULL;
+ }
+
+ CDEBUG(D_NET, "sd_best_lpni = %s\n",
+ libcfs_nid2str(best_lpni->lpni_nid));
+
+ return best_lpni;
+}
+
+/*
+ * Prerequisite: the best_ni should already be set in the sd
+ */
+static inline struct lnet_peer_ni *
+lnet_find_best_lpni_on_net(struct lnet_send_data *sd, struct lnet_peer *peer,
+ __u32 net_id)
+{
+ struct lnet_peer_net *peer_net;
+
+ /*
+ * The gateway is Multi-Rail capable so now we must select the
+ * proper peer_ni
+ */
+ peer_net = lnet_peer_get_net_locked(peer, net_id);
+
+ if (!peer_net) {
+ CERROR("gateway peer %s has no NI on net %s\n",
+ libcfs_nid2str(peer->lp_primary_nid),
+ libcfs_net2str(net_id));
+ return NULL;
+ }
+
+ return lnet_select_peer_ni(sd, peer, peer_net);
+}
+
+static inline void
+lnet_set_non_mr_pref_nid(struct lnet_send_data *sd)
+{
+ if (sd->sd_send_case & NMR_DST &&
+ sd->sd_msg->msg_type != LNET_MSG_REPLY &&
+ sd->sd_msg->msg_type != LNET_MSG_ACK &&
+ sd->sd_best_lpni->lpni_pref_nnids == 0) {
+ CDEBUG(D_NET, "Setting preferred local NID %s on NMR peer %s\n",
+ libcfs_nid2str(sd->sd_best_ni->ni_nid),
+ libcfs_nid2str(sd->sd_best_lpni->lpni_nid));
+ lnet_peer_ni_set_non_mr_pref_nid(sd->sd_best_lpni,
+ sd->sd_best_ni->ni_nid);
+ }
+}
+
+/*
+ * Source Specified
+ * Local Destination
+ * non-mr peer
+ *
+ * use the source and destination NIDs as the pathway
+ */
+static int
+lnet_handle_spec_local_nmr_dst(struct lnet_send_data *sd)
+{
+ /* the destination lpni is set before we get here. */
+
+ /* find local NI */
+ sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
+ if (!sd->sd_best_ni) {
+ CERROR("Can't send to %s: src %s is not a "
+ "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
+ libcfs_nid2str(sd->sd_src_nid));
+ return -EINVAL;
+ }
+
+ /*
+ * the preferred NID will only be set for NMR peers
+ */
+ lnet_set_non_mr_pref_nid(sd);
+
+ return lnet_handle_send(sd);
+}
+
+/*
+ * Source Specified
+ * Local Destination
+ * MR Peer
+ *
+ * Run the selection algorithm on the peer NIs unless we're sending
+ * a response, in this case just send to the destination
+ */
+static int
+lnet_handle_spec_local_mr_dst(struct lnet_send_data *sd)
+{
+ sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
+ if (!sd->sd_best_ni) {
+ CERROR("Can't send to %s: src %s is not a "
+ "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
+ libcfs_nid2str(sd->sd_src_nid));
+ return -EINVAL;
+ }
+
+ /*
+ * only run the selection algorithm to pick the peer_ni if we're
+ * sending a GET or a PUT. Responses are sent to the same
+ * destination NID provided.
+ */
+ if (!(sd->sd_send_case & SND_RESP)) {
+ sd->sd_best_lpni =
+ lnet_find_best_lpni_on_net(sd, sd->sd_peer,
+ sd->sd_best_ni->ni_net->net_id);
+ }
+
+ if (sd->sd_best_lpni &&
+ sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
+ return lnet_handle_lo_send(sd);
+ else if (sd->sd_best_lpni)
+ return lnet_handle_send(sd);
+
+ CERROR("can't send to %s. no NI on %s\n",
+ libcfs_nid2str(sd->sd_dst_nid),
+ libcfs_net2str(sd->sd_best_ni->ni_net->net_id));
+
+ return -EHOSTUNREACH;
+}
+
+struct lnet_ni *
+lnet_find_best_ni_on_spec_net(struct lnet_ni *cur_best_ni,
+ struct lnet_peer *peer,
+ struct lnet_peer_net *peer_net,
+ int cpt,
+ bool incr_seq)
+{
+ struct lnet_net *local_net;
+ struct lnet_ni *best_ni;
+
+ local_net = lnet_get_net_locked(peer_net->lpn_net_id);
+ if (!local_net)
+ return NULL;
+
+ /*
+ * Iterate through the NIs in this local Net and select
+ * the NI to send from. The selection is determined by
+ * these 3 criterion in the following priority:
+ * 1. NUMA
+ * 2. NI available credits
+ * 3. Round Robin
+ */
+ best_ni = lnet_get_best_ni(local_net, cur_best_ni,
+ peer, peer_net, cpt);
+
+ if (incr_seq && best_ni)
+ best_ni->ni_seq++;
+
+ return best_ni;
+}
+
+static int
+lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni,
+ struct lnet_msg *msg, lnet_nid_t rtr_nid,
+ int cpt)
+{
+ struct lnet_peer *peer;
+ lnet_nid_t primary_nid;
+ int rc;
+
+ lnet_peer_ni_addref_locked(lpni);
+
+ rc = lnet_discover_peer_locked(lpni, cpt, false);
+ if (rc) {
+ lnet_peer_ni_decref_locked(lpni);
+ return rc;
+ }
+ /* The peer may have changed. */
+ peer = lpni->lpni_peer_net->lpn_peer;
+ /* queue message and return */
+ msg->msg_rtr_nid_param = rtr_nid;
+ msg->msg_sending = 0;
+ msg->msg_txpeer = NULL;
+ spin_lock(&peer->lp_lock);
+ list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
+ spin_unlock(&peer->lp_lock);
+ lnet_peer_ni_decref_locked(lpni);
+ primary_nid = peer->lp_primary_nid;
+
+ CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
+ msg, libcfs_nid2str(primary_nid));
+
+ return LNET_DC_WAIT;
+}
+
+static int
+lnet_handle_find_routed_path(struct lnet_send_data *sd,
+ lnet_nid_t dst_nid,
+ struct lnet_peer_ni **gw_lpni,
+ struct lnet_peer **gw_peer)
+{
+ struct lnet_peer *gw;
+ struct lnet_route *best_route;
+ struct lnet_route *last_route;
+ struct lnet_peer_ni *lpni = NULL;
+ lnet_nid_t src_nid = sd->sd_src_nid;
+
+ best_route = lnet_find_route_locked(NULL, LNET_NIDNET(dst_nid),
+ sd->sd_rtr_nid, &last_route);
+ if (!best_route) {
+ CERROR("no route to %s from %s\n",
+ libcfs_nid2str(dst_nid), libcfs_nid2str(src_nid));
+ return -EHOSTUNREACH;
+ }
+
+ gw = best_route->lr_gateway;
+ *gw_peer = gw;
+
+ /*
+ * Discover this gateway if it hasn't already been discovered.
+ * This means we might delay the message until discovery has
+ * completed
+ */
+#if 0
+ /* TODO: disable discovey for now */
+ if (lnet_msg_discovery(sd->sd_msg) &&
+ !lnet_peer_is_uptodate(*gw_peer)) {
+ sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
+ return lnet_initiate_peer_discovery(gw, sd->sd_msg,
+ sd->sd_rtr_nid, sd->sd_cpt);
+ }
+#endif
+
+ if (!sd->sd_best_ni)
+ sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
+ lnet_peer_get_net_locked(gw,
+ best_route->lr_lnet),
+ sd->sd_md_cpt,
+ true);
+
+ if (!sd->sd_best_ni) {
+ CERROR("Internal Error. Expected local ni on %s "
+ "but non found :%s\n",
+ libcfs_net2str(best_route->lr_lnet),
+ libcfs_nid2str(sd->sd_src_nid));
+ return -EFAULT;
+ }
+
+ /*
+ * if gw is MR let's find its best peer_ni
+ */
+ if (lnet_peer_is_multi_rail(gw)) {
+ lpni = lnet_find_best_lpni_on_net(sd, gw,
+ sd->sd_best_ni->ni_net->net_id);
+ /*
+ * We've already verified that the gw has an NI on that
+ * desired net, but we're not finding it. Something is
+ * wrong.
+ */
+ if (!lpni) {
+ CERROR("Internal Error. Route expected to %s from %s\n",
+ libcfs_nid2str(dst_nid),
+ libcfs_nid2str(src_nid));
+ return -EFAULT;
+ }
+ } else {
+ struct lnet_peer_net *lpn;
+ lpn = lnet_peer_get_net_locked(gw, best_route->lr_lnet);
+ if (!lpn) {
+ CERROR("Internal Error. Route expected to %s from %s\n",
+ libcfs_nid2str(dst_nid),
+ libcfs_nid2str(src_nid));
+ return -EFAULT;
+ }
+ lpni = list_entry(lpn->lpn_peer_nis.next, struct lnet_peer_ni,
+ lpni_peer_nis);
+ if (!lpni) {
+ CERROR("Internal Error. Route expected to %s from %s\n",
+ libcfs_nid2str(dst_nid),
+ libcfs_nid2str(src_nid));
+ return -EFAULT;
+ }
+ }
+
+ *gw_lpni = lpni;
+
+ /*
+ * increment the route sequence number since now we're sure we're
+ * going to use it
+ */
+ LASSERT(best_route && last_route);
+ best_route->lr_seq = last_route->lr_seq + 1;
+
+ return 0;
+}
+
+/*
+ * Handle two cases:
+ *
+ * Case 1:
+ * Source specified
+ * Remote destination
+ * Non-MR destination
+ *
+ * Case 2:
+ * Source specified
+ * Remote destination
+ * MR destination
+ *
+ * The handling of these two cases is similar. Even though the destination
+ * can be MR or non-MR, we'll deal directly with the router.
+ */
+static int
+lnet_handle_spec_router_dst(struct lnet_send_data *sd)
+{
+ int rc;
+ struct lnet_peer_ni *gw_lpni = NULL;
+ struct lnet_peer *gw_peer = NULL;
+
+ /* find local NI */
+ sd->sd_best_ni = lnet_nid2ni_locked(sd->sd_src_nid, sd->sd_cpt);
+ if (!sd->sd_best_ni) {
+ CERROR("Can't send to %s: src %s is not a "
+ "local nid\n", libcfs_nid2str(sd->sd_dst_nid),
+ libcfs_nid2str(sd->sd_src_nid));
+ return -EINVAL;
+ }
+
+ rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
+ &gw_peer);
+ if (rc)
+ return rc;
+
+ if (sd->sd_send_case & NMR_DST)
+ /*
+ * since the final destination is non-MR let's set its preferred
+ * NID before we send
+ */
+ lnet_set_non_mr_pref_nid(sd);
+
+ /*
+ * We're going to send to the gw found so let's set its
+ * info
+ */
+ sd->sd_peer = gw_peer;
+ sd->sd_best_lpni = gw_lpni;
+
+ return lnet_handle_send(sd);
+}
+
+struct lnet_ni *
+lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt)
+{
+ struct lnet_peer_net *peer_net = NULL;
+ struct lnet_ni *best_ni = NULL;
+
+ /*
+ * The peer can have multiple interfaces, some of them can be on
+ * the local network and others on a routed network. We should
+ * prefer the local network. However if the local network is not
+ * available then we need to try the routed network
+ */
+
+ /* go through all the peer nets and find the best_ni */
+ list_for_each_entry(peer_net, &peer->lp_peer_nets, lpn_peer_nets) {
+ /*
+ * The peer's list of nets can contain non-local nets. We
+ * want to only examine the local ones.
+ */
+ if (!lnet_get_net_locked(peer_net->lpn_net_id))
+ continue;
+ best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer,
+ peer_net, md_cpt, false);
+ }
+
+ if (best_ni)
+ /* increment sequence number so we can round robin */
+ best_ni->ni_seq++;
+
+ return best_ni;
+}
+
+static struct lnet_ni *
+lnet_find_existing_preferred_best_ni(struct lnet_send_data *sd)
+{
+ struct lnet_ni *best_ni = NULL;
+ struct lnet_peer_net *peer_net;
+ struct lnet_peer *peer = sd->sd_peer;
+ struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
+ struct lnet_peer_ni *lpni;
+ int cpt = sd->sd_cpt;
+
+ /*
+ * We must use a consistent source address when sending to a
+ * non-MR peer. However, a non-MR peer can have multiple NIDs
+ * on multiple networks, and we may even need to talk to this
+ * peer on multiple networks -- certain types of
+ * load-balancing configuration do this.
+ *
+ * So we need to pick the NI the peer prefers for this
+ * particular network.
+ */
+
+ /* Get the target peer_ni */
+ peer_net = lnet_peer_get_net_locked(peer,
+ LNET_NIDNET(best_lpni->lpni_nid));
+ LASSERT(peer_net != NULL);
+ list_for_each_entry(lpni, &peer_net->lpn_peer_nis,
+ lpni_peer_nis) {
+ if (lpni->lpni_pref_nnids == 0)
+ continue;
+ LASSERT(lpni->lpni_pref_nnids == 1);
+ best_ni = lnet_nid2ni_locked(
+ lpni->lpni_pref.nid, cpt);
+ break;
+ }
+
+ return best_ni;
+}
+
+/* Prerequisite: sd->sd_peer and sd->sd_best_lpni should be set */
+static int
+lnet_select_preferred_best_ni(struct lnet_send_data *sd)
+{
+ struct lnet_ni *best_ni = NULL;
+ struct lnet_peer_ni *best_lpni = sd->sd_best_lpni;
+
+ /*
+ * We must use a consistent source address when sending to a
+ * non-MR peer. However, a non-MR peer can have multiple NIDs
+ * on multiple networks, and we may even need to talk to this
+ * peer on multiple networks -- certain types of
+ * load-balancing configuration do this.
+ *
+ * So we need to pick the NI the peer prefers for this
+ * particular network.
+ */
+
+ best_ni = lnet_find_existing_preferred_best_ni(sd);
+
+ /* if best_ni is still not set just pick one */
+ if (!best_ni) {
+ best_ni =
+ lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
+ sd->sd_best_lpni->lpni_peer_net,
+ sd->sd_md_cpt, true);
+ /* If there is no best_ni we don't have a route */
+ if (!best_ni) {
+ CERROR("no path to %s from net %s\n",
+ libcfs_nid2str(best_lpni->lpni_nid),
+ libcfs_net2str(best_lpni->lpni_net->net_id));
+ return -EHOSTUNREACH;
+ }
+ }
+
+ sd->sd_best_ni = best_ni;
+
+ /* Set preferred NI if necessary. */
+ lnet_set_non_mr_pref_nid(sd);
+
+ return 0;
+}
+
+
+/*
+ * Source not specified
+ * Local destination
+ * Non-MR Peer
+ *
+ * always use the same source NID for NMR peers
+ * If we've talked to that peer before then we already have a preferred
+ * source NI associated with it. Otherwise, we select a preferred local NI
+ * and store it in the peer
+ */
+static int
+lnet_handle_any_local_nmr_dst(struct lnet_send_data *sd)
+{
+ int rc;
+
+ /* sd->sd_best_lpni is already set to the final destination */
+
+ /*
+ * At this point we should've created the peer ni and peer. If we
+ * can't find it, then something went wrong. Instead of assert
+ * output a relevant message and fail the send
+ */
+ if (!sd->sd_best_lpni) {
+ CERROR("Internal fault. Unable to send msg %s to %s. "
+ "NID not known\n",
+ lnet_msgtyp2str(sd->sd_msg->msg_type),
+ libcfs_nid2str(sd->sd_dst_nid));
+ return -EFAULT;
+ }
+
+ rc = lnet_select_preferred_best_ni(sd);
+ if (!rc)
+ rc = lnet_handle_send(sd);
+
+ return rc;
+}
+
+static int
+lnet_handle_any_mr_dsta(struct lnet_send_data *sd)
+{
+ /*
+ * NOTE we've already handled the remote peer case. So we only
+ * need to worry about the local case here.
+ *
+ * if we're sending a response, ACK or reply, we need to send it
+ * to the destination NID given to us. At this point we already
+ * have the peer_ni we're suppose to send to, so just find the
+ * best_ni on the peer net and use that. Since we're sending to an
+ * MR peer then we can just run the selection algorithm on our
+ * local NIs and pick the best one.
+ */
+ if (sd->sd_send_case & SND_RESP) {
+ sd->sd_best_ni =
+ lnet_find_best_ni_on_spec_net(NULL, sd->sd_peer,
+ sd->sd_best_lpni->lpni_peer_net,
+ sd->sd_md_cpt, true);
+
+ if (!sd->sd_best_ni) {
+ /*
+ * We're not going to deal with not able to send
+ * a response to the provided final destination
+ */
+ CERROR("Can't send response to %s. "
+ "No local NI available\n",
+ libcfs_nid2str(sd->sd_dst_nid));
+ return -EHOSTUNREACH;
+ }
+
+ return lnet_handle_send(sd);
+ }
+
+ /*
+ * If we get here that means we're sending a fresh request, PUT or
+ * GET, so we need to run our standard selection algorithm.
+ * First find the best local interface that's on any of the peer's
+ * networks.
+ */
+ sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
+ sd->sd_md_cpt);
+ if (sd->sd_best_ni) {
+ sd->sd_best_lpni =
+ lnet_find_best_lpni_on_net(sd, sd->sd_peer,
+ sd->sd_best_ni->ni_net->net_id);
+
+ /*
+ * if we're successful in selecting a peer_ni on the local
+ * network, then send to it. Otherwise fall through and
+ * try and see if we can reach it over another routed
+ * network
+ */
+ if (sd->sd_best_lpni &&
+ sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
+ /*
+ * in case we initially started with a routed
+ * destination, let's reset to local
+ */
+ sd->sd_send_case &= ~REMOTE_DST;
+ sd->sd_send_case |= LOCAL_DST;
+ return lnet_handle_lo_send(sd);
+ } else if (sd->sd_best_lpni) {
+ /*
+ * in case we initially started with a routed
+ * destination, let's reset to local
+ */
+ sd->sd_send_case &= ~REMOTE_DST;
+ sd->sd_send_case |= LOCAL_DST;
+ return lnet_handle_send(sd);
+ }
+
+ CERROR("Internal Error. Expected to have a best_lpni: "
+ "%s -> %s\n",
+ libcfs_nid2str(sd->sd_src_nid),
+ libcfs_nid2str(sd->sd_dst_nid));
+
+ return -EFAULT;
+ }
+
+ /*
+ * Peer doesn't have a local network. Let's see if there is
+ * a remote network we can reach it on.
+ */
+ return PASS_THROUGH;
+}
+
+/*
+ * Case 1:
+ * Source NID not specified
+ * Local destination
+ * MR peer
+ *
+ * Case 2:
+ * Source NID not speified
+ * Remote destination
+ * MR peer
+ *
+ * In both of these cases if we're sending a response, ACK or REPLY, then
+ * we need to send to the destination NID provided.
+ *
+ * In the remote case let's deal with MR routers.
+ *
+ */
+
+static int
+lnet_handle_any_mr_dst(struct lnet_send_data *sd)
+{
+ int rc = 0;
+ struct lnet_peer *gw_peer = NULL;
+ struct lnet_peer_ni *gw_lpni = NULL;
+
+ /*
+ * handle sending a response to a remote peer here so we don't
+ * have to worry about it if we hit lnet_handle_any_mr_dsta()
+ */
+ if (sd->sd_send_case & REMOTE_DST &&
+ sd->sd_send_case & SND_RESP) {
+ struct lnet_peer_ni *gw;
+ struct lnet_peer *gw_peer;
+
+ rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw,
+ &gw_peer);
+ if (rc < 0) {
+ CERROR("Can't send response to %s. "
+ "No route available\n",
+ libcfs_nid2str(sd->sd_dst_nid));
+ return -EHOSTUNREACH;
+ } else if (rc > 0) {
+ return rc;
+ }
+
+ sd->sd_best_lpni = gw;
+ sd->sd_peer = gw_peer;
+
+ return lnet_handle_send(sd);
+ }
+
+ /*
+ * Even though the NID for the peer might not be on a local network,
+ * since the peer is MR there could be other interfaces on the
+ * local network. In that case we'd still like to prefer the local
+ * network over the routed network. If we're unable to do that
+ * then we select the best router among the different routed networks,
+ * and if the router is MR then we can deal with it as such.
+ */
+ rc = lnet_handle_any_mr_dsta(sd);
+ if (rc != PASS_THROUGH)
+ return rc;
+
+ /*
+ * TODO; One possible enhancement is to run the selection
+ * algorithm on the peer. However for remote peers the credits are
+ * not decremented, so we'll be basically going over the peer NIs
+ * in round robin. An MR router will run the selection algorithm
+ * on the next-hop interfaces.
+ */
+ rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
+ &gw_peer);
+ if (rc)
+ return rc;
+
+ sd->sd_send_case &= ~LOCAL_DST;
+ sd->sd_send_case |= REMOTE_DST;
+
+ sd->sd_peer = gw_peer;
+ sd->sd_best_lpni = gw_lpni;
+
+ return lnet_handle_send(sd);
+}
+
+/*
+ * Source not specified
+ * Remote destination
+ * Non-MR peer
+ *
+ * Must send to the specified peer NID using the same source NID that
+ * we've used before. If it's the first time to talk to that peer then
+ * find the source NI and assign it as preferred to that peer
+ */
+static int
+lnet_handle_any_router_nmr_dst(struct lnet_send_data *sd)
+{
+ int rc;
+ struct lnet_peer_ni *gw_lpni = NULL;
+ struct lnet_peer *gw_peer = NULL;
+
+ /*
+ * Let's set if we have a preferred NI to talk to this NMR peer
+ */
+ sd->sd_best_ni = lnet_find_existing_preferred_best_ni(sd);
+
+ /*
+ * find the router and that'll find the best NI if we didn't find
+ * it already.
+ */
+ rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
+ &gw_peer);
+ if (rc)
+ return rc;
+
+ /*
+ * set the best_ni we've chosen as the preferred one for
+ * this peer
+ */
+ lnet_set_non_mr_pref_nid(sd);
+
+ /* we'll be sending to the gw */
+ sd->sd_best_lpni = gw_lpni;
+ sd->sd_peer = gw_peer;
+
+ return lnet_handle_send(sd);
+}
+
+static int
+lnet_handle_send_case_locked(struct lnet_send_data *sd)
+{
+ /*
+ * turn off the SND_RESP bit.
+ * It will be checked in the case handling
+ */
+ __u32 send_case = sd->sd_send_case &= ~SND_RESP ;
+
+ CDEBUG(D_NET, "Source %s%s to %s %s %s destination\n",
+ (send_case & SRC_SPEC) ? "Specified: " : "ANY",
+ (send_case & SRC_SPEC) ? libcfs_nid2str(sd->sd_src_nid) : "",
+ (send_case & MR_DST) ? "MR: " : "NMR: ",
+ libcfs_nid2str(sd->sd_dst_nid),
+ (send_case & LOCAL_DST) ? "local" : "routed");
+
+ switch (send_case) {
+ /*
+ * For all cases where the source is specified, we should always
+ * use the destination NID, whether it's an MR destination or not,
+ * since we're continuing a series of related messages for the
+ * same RPC
+ */
+ case SRC_SPEC_LOCAL_NMR_DST:
+ return lnet_handle_spec_local_nmr_dst(sd);
+ case SRC_SPEC_LOCAL_MR_DST:
+ return lnet_handle_spec_local_mr_dst(sd);
+ case SRC_SPEC_ROUTER_NMR_DST:
+ case SRC_SPEC_ROUTER_MR_DST:
+ return lnet_handle_spec_router_dst(sd);
+ case SRC_ANY_LOCAL_NMR_DST:
+ return lnet_handle_any_local_nmr_dst(sd);
+ case SRC_ANY_LOCAL_MR_DST:
+ case SRC_ANY_ROUTER_MR_DST:
+ return lnet_handle_any_mr_dst(sd);
+ case SRC_ANY_ROUTER_NMR_DST:
+ return lnet_handle_any_router_nmr_dst(sd);
+ default:
+ CERROR("Unknown send case\n");
+ return -1;
+ }
+}
+
+static int
+lnet_select_pathway(lnet_nid_t src_nid, lnet_nid_t dst_nid,
+ struct lnet_msg *msg, lnet_nid_t rtr_nid)
+{
+ struct lnet_peer_ni *lpni;
+ struct lnet_peer *peer;
+ struct lnet_send_data send_data;
+ int cpt, rc;
+ int md_cpt;
+ __u32 send_case = 0;
+
+ memset(&send_data, 0, sizeof(send_data));
+
+ /*
+ * get an initial CPT to use for locking. The idea here is not to
+ * serialize the calls to select_pathway, so that as many
+ * operations can run concurrently as possible. To do that we use
+ * the CPT where this call is being executed. Later on when we
+ * determine the CPT to use in lnet_message_commit, we switch the
+ * lock and check if there was any configuration change. If none,
+ * then we proceed, if there is, then we restart the operation.
+ */
+ cpt = lnet_net_lock_current();
+
+ md_cpt = lnet_cpt_of_md(msg->msg_md, msg->msg_offset);
+ if (md_cpt == CFS_CPT_ANY)
+ md_cpt = cpt;
+
+again:
+
+ /*
+ * If we're being asked to send to the loopback interface, there
+ * is no need to go through any selection. We can just shortcut
+ * the entire process and send over lolnd
+ */
+ send_data.sd_msg = msg;
+ send_data.sd_cpt = cpt;
+ if (LNET_NETTYP(LNET_NIDNET(dst_nid)) == LOLND) {
+ rc = lnet_handle_lo_send(&send_data);
+ lnet_net_unlock(cpt);
+ return rc;
+ }
+
+ /*
+ * find an existing peer_ni, or create one and mark it as having been
+ * created due to network traffic. This call will create the
+ * peer->peer_net->peer_ni tree.
+ */
+ lpni = lnet_nid2peerni_locked(dst_nid, LNET_NID_ANY, cpt);
+ if (IS_ERR(lpni)) {
+ lnet_net_unlock(cpt);
+ return PTR_ERR(lpni);
+ }
+
+ /*
+ * Cache the original src_nid. If we need to resend the message
+ * then we'll need to know whether the src_nid was originally
+ * specified for this message. If it was originally specified,
+ * then we need to keep using the same src_nid since it's
+ * continuing the same sequence of messages.
+ */
+ msg->msg_src_nid_param = src_nid;
+
+ /*
+ * Now that we have a peer_ni, check if we want to discover
+ * the peer. Traffic to the LNET_RESERVED_PORTAL should not
+ * trigger discovery.
+ */
+ peer = lpni->lpni_peer_net->lpn_peer;
+ if (lnet_msg_discovery(msg) && !lnet_peer_is_uptodate(peer)) {
+ rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
+ lnet_peer_ni_decref_locked(lpni);
+ lnet_net_unlock(cpt);
+ return rc;
+ }
+ lnet_peer_ni_decref_locked(lpni);
+
+ /*
+ * Identify the different send cases
+ */
+ if (src_nid == LNET_NID_ANY)
+ send_case |= SRC_ANY;
+ else
+ send_case |= SRC_SPEC;
+
+ if (lnet_get_net_locked(LNET_NIDNET(dst_nid)))
+ send_case |= LOCAL_DST;
+ else
+ send_case |= REMOTE_DST;
+
+ /*
+ * if this is a non-MR peer or if we're recovering a peer ni then
+ * let's consider this an NMR case so we can hit the destination
+ * NID.
+ */
+ if (!lnet_peer_is_multi_rail(peer) || msg->msg_recovery)
+ send_case |= NMR_DST;
+ else
+ send_case |= MR_DST;
+
+ if (msg->msg_type == LNET_MSG_REPLY ||
+ msg->msg_type == LNET_MSG_ACK)
+ send_case |= SND_RESP;
+
+ /* assign parameters to the send_data */
+ send_data.sd_rtr_nid = rtr_nid;
+ send_data.sd_src_nid = src_nid;
+ send_data.sd_dst_nid = dst_nid;
+ send_data.sd_best_lpni = lpni;
+ /*
+ * keep a pointer to the final destination in case we're going to
+ * route, so we'll need to access it later
+ */
+ send_data.sd_final_dst_lpni = lpni;
+ send_data.sd_peer = peer;
+ send_data.sd_md_cpt = md_cpt;
+ send_data.sd_send_case = send_case;
+
+ rc = lnet_handle_send_case_locked(&send_data);
+
+ /*
+ * Update the local cpt since send_data.sd_cpt might've been
+ * updated as a result of calling lnet_handle_send_case_locked().
+ */
+ cpt = send_data.sd_cpt;
+
+ if (rc == REPEAT_SEND)
+ goto again;
+
+ lnet_net_unlock(cpt);
+
+ return rc;
+}
+
+int
+lnet_send(lnet_nid_t src_nid, struct lnet_msg *msg, lnet_nid_t rtr_nid)
+{
+ lnet_nid_t dst_nid = msg->msg_target.nid;
+ int rc;
+
+ /*
+ * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
+ * but we might want to use pre-determined router for ACK/REPLY
+ * in the future
+ */
+ /* NB: ni != NULL == interface pre-determined (ACK/REPLY) */
+ LASSERT(msg->msg_txpeer == NULL);
+ LASSERT(msg->msg_txni == NULL);
+ LASSERT(!msg->msg_sending);
+ LASSERT(!msg->msg_target_is_router);
+ LASSERT(!msg->msg_receiving);
+
+ msg->msg_sending = 1;
+
+ LASSERT(!msg->msg_tx_committed);
+
+ rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
+ if (rc < 0)
+ return rc;
+
+ if (rc == LNET_CREDIT_OK)
+ lnet_ni_send(msg->msg_txni, msg);
+
+ /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT or LNET_DC_WAIT */
+ return 0;
+}
+
+enum lnet_mt_event_type {
+ MT_TYPE_LOCAL_NI = 0,
+ MT_TYPE_PEER_NI
+};
+
+struct lnet_mt_event_info {
+ enum lnet_mt_event_type mt_type;
+ lnet_nid_t mt_nid;
+};
+
+/* called with res_lock held */
+void
+lnet_detach_rsp_tracker(struct lnet_libmd *md, int cpt)
+{
+ struct lnet_rsp_tracker *rspt;
+
+ /*
+ * msg has a refcount on the MD so the MD is not going away.
+ * The rspt queue for the cpt is protected by
+ * the lnet_net_lock(cpt). cpt is the cpt of the MD cookie.
+ */
+ if (!md->md_rspt_ptr)
+ return;
+
+ rspt = md->md_rspt_ptr;
+ md->md_rspt_ptr = NULL;
+
+ /* debug code */
+ LASSERT(rspt->rspt_cpt == cpt);
+
+ /*
+ * invalidate the handle to indicate that a response has been
+ * received, which will then lead the monitor thread to clean up
+ * the rspt block.
+ */
+ LNetInvalidateMDHandle(&rspt->rspt_mdh);
+}
+
+static void
+lnet_finalize_expired_responses(bool force)
+{
+ struct lnet_libmd *md;
+ struct list_head local_queue;
+ struct lnet_rsp_tracker *rspt, *tmp;
+ int i;
+
+ if (the_lnet.ln_mt_rstq == NULL)
+ return;
+
+ cfs_cpt_for_each(i, lnet_cpt_table()) {
+ INIT_LIST_HEAD(&local_queue);
+
+ lnet_net_lock(i);
+ if (!the_lnet.ln_mt_rstq[i]) {
+ lnet_net_unlock(i);
+ continue;
+ }
+ list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
+ lnet_net_unlock(i);
+
+ list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
+ /*
+ * The rspt mdh will be invalidated when a response
+ * is received or whenever we want to discard the
+ * block the monitor thread will walk the queue
+ * and clean up any rsts with an invalid mdh.
+ * The monitor thread will walk the queue until
+ * the first unexpired rspt block. This means that
+ * some rspt blocks which received their
+ * corresponding responses will linger in the
+ * queue until they are cleaned up eventually.
+ */
+ lnet_res_lock(i);
+ if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
+ lnet_res_unlock(i);
+ list_del_init(&rspt->rspt_on_list);
+ lnet_rspt_free(rspt, i);
+ continue;
+ }
+
+ if (ktime_compare(ktime_get(), rspt->rspt_deadline) >= 0 ||
+ force) {
+ struct lnet_peer_ni *lpni;
+ lnet_nid_t nid;
+
+ md = lnet_handle2md(&rspt->rspt_mdh);
+ if (!md) {
+ LNetInvalidateMDHandle(&rspt->rspt_mdh);
+ lnet_res_unlock(i);
+ list_del_init(&rspt->rspt_on_list);
+ lnet_rspt_free(rspt, i);
+ continue;
+ }
+ LASSERT(md->md_rspt_ptr == rspt);
+ md->md_rspt_ptr = NULL;
+ lnet_res_unlock(i);
+
+ lnet_net_lock(i);
+ the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
+ lnet_net_unlock(i);
+
+ list_del_init(&rspt->rspt_on_list);
+
+ nid = rspt->rspt_next_hop_nid;
+
+ CNETERR("Response timed out: md = %p: nid = %s\n",
+ md, libcfs_nid2str(nid));
+ LNetMDUnlink(rspt->rspt_mdh);
+ lnet_rspt_free(rspt, i);
+
+ /*
+ * If there is a timeout on the response
+ * from the next hop decrement its health
+ * value so that we don't use it
+ */
+ lnet_net_lock(0);
+ lpni = lnet_find_peer_ni_locked(nid);
+ if (lpni) {
+ lnet_handle_remote_failure_locked(lpni);
+ lnet_peer_ni_decref_locked(lpni);
+ }
+ lnet_net_unlock(0);
+ } else {
+ lnet_res_unlock(i);
+ break;
+ }
+ }
+
+ lnet_net_lock(i);
+ if (!list_empty(&local_queue))
+ list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
+ lnet_net_unlock(i);
+ }
+}
+
+static void
+lnet_resend_pending_msgs_locked(struct list_head *resendq, int cpt)
+{
+ struct lnet_msg *msg;
+
+ while (!list_empty(resendq)) {
+ struct lnet_peer_ni *lpni;
+
+ msg = list_entry(resendq->next, struct lnet_msg,
+ msg_list);
+
+ list_del_init(&msg->msg_list);
+
+ lpni = lnet_find_peer_ni_locked(msg->msg_hdr.dest_nid);
+ if (!lpni) {
+ lnet_net_unlock(cpt);
+ CERROR("Expected that a peer is already created for %s\n",
+ libcfs_nid2str(msg->msg_hdr.dest_nid));
+ msg->msg_no_resend = true;
+ lnet_finalize(msg, -EFAULT);
+ lnet_net_lock(cpt);
+ } else {
+ struct lnet_peer *peer;
+ int rc;
+ lnet_nid_t src_nid = LNET_NID_ANY;
+
+ /*
+ * if this message is not being routed and the
+ * peer is non-MR then we must use the same
+ * src_nid that was used in the original send.
+ * Otherwise if we're routing the message (IE
+ * we're a router) then we can use any of our
+ * local interfaces. It doesn't matter to the
+ * final destination.
+ */
+ peer = lpni->lpni_peer_net->lpn_peer;
+ if (!msg->msg_routing &&
+ !lnet_peer_is_multi_rail(peer))
+ src_nid = le64_to_cpu(msg->msg_hdr.src_nid);
+
+ /*
+ * If we originally specified a src NID, then we
+ * must attempt to reuse it in the resend as well.
+ */
+ if (msg->msg_src_nid_param != LNET_NID_ANY)
+ src_nid = msg->msg_src_nid_param;
+ lnet_peer_ni_decref_locked(lpni);
+
+ lnet_net_unlock(cpt);
+ CDEBUG(D_NET, "resending %s->%s: %s recovery %d try# %d\n",
+ libcfs_nid2str(src_nid),
+ libcfs_id2str(msg->msg_target),
+ lnet_msgtyp2str(msg->msg_type),
+ msg->msg_recovery,
+ msg->msg_retry_count);
+ rc = lnet_send(src_nid, msg, LNET_NID_ANY);
+ if (rc) {
+ CERROR("Error sending %s to %s: %d\n",
+ lnet_msgtyp2str(msg->msg_type),
+ libcfs_id2str(msg->msg_target), rc);
+ msg->msg_no_resend = true;
+ lnet_finalize(msg, rc);
+ }
+ lnet_net_lock(cpt);
+ if (!rc)
+ the_lnet.ln_counters[cpt]->lct_health.lch_resend_count++;
+ }
+ }
+}
+
+static void
+lnet_resend_pending_msgs(void)
+{
+ int i;
+
+ cfs_cpt_for_each(i, lnet_cpt_table()) {
+ lnet_net_lock(i);
+ lnet_resend_pending_msgs_locked(the_lnet.ln_mt_resendqs[i], i);
+ lnet_net_unlock(i);
+ }
+}
+
+/* called with cpt and ni_lock held */
+static void
+lnet_unlink_ni_recovery_mdh_locked(struct lnet_ni *ni, int cpt, bool force)
+{
+ struct lnet_handle_md recovery_mdh;
+
+ LNetInvalidateMDHandle(&recovery_mdh);
+
+ if (ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING ||
+ force) {
+ recovery_mdh = ni->ni_ping_mdh;
+ LNetInvalidateMDHandle(&ni->ni_ping_mdh);
+ }
+ lnet_ni_unlock(ni);
+ lnet_net_unlock(cpt);
+ if (!LNetMDHandleIsInvalid(recovery_mdh))
+ LNetMDUnlink(recovery_mdh);
+ lnet_net_lock(cpt);
+ lnet_ni_lock(ni);
+}
+
+static void
+lnet_recover_local_nis(void)
+{
+ struct lnet_mt_event_info *ev_info;
+ struct list_head processed_list;
+ struct list_head local_queue;
+ struct lnet_handle_md mdh;
+ struct lnet_ni *tmp;
+ struct lnet_ni *ni;
+ lnet_nid_t nid;
+ int healthv;
+ int rc;
+
+ INIT_LIST_HEAD(&local_queue);
+ INIT_LIST_HEAD(&processed_list);
+
+ /*
+ * splice the recovery queue on a local queue. We will iterate
+ * through the local queue and update it as needed. Once we're
+ * done with the traversal, we'll splice the local queue back on
+ * the head of the ln_mt_localNIRecovq. Any newly added local NIs
+ * will be traversed in the next iteration.
+ */
+ lnet_net_lock(0);
+ list_splice_init(&the_lnet.ln_mt_localNIRecovq,
+ &local_queue);
+ lnet_net_unlock(0);
+
+ list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
+ /*
+ * if an NI is being deleted or it is now healthy, there
+ * is no need to keep it around in the recovery queue.
+ * The monitor thread is the only thread responsible for
+ * removing the NI from the recovery queue.
+ * Multiple threads can be adding NIs to the recovery
+ * queue.
+ */
+ healthv = atomic_read(&ni->ni_healthv);
+
+ lnet_net_lock(0);
+ lnet_ni_lock(ni);
+ if (ni->ni_state != LNET_NI_STATE_ACTIVE ||
+ healthv == LNET_MAX_HEALTH_VALUE) {
+ list_del_init(&ni->ni_recovery);
+ lnet_unlink_ni_recovery_mdh_locked(ni, 0, false);
+ lnet_ni_unlock(ni);
+ lnet_ni_decref_locked(ni, 0);
+ lnet_net_unlock(0);
+ continue;
+ }
+
+ /*
+ * if the local NI failed recovery we must unlink the md.
+ * But we want to keep the local_ni on the recovery queue
+ * so we can continue the attempts to recover it.
+ */
+ if (ni->ni_recovery_state & LNET_NI_RECOVERY_FAILED) {
+ lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
+ ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
+ }
+
+ lnet_ni_unlock(ni);
+ lnet_net_unlock(0);
+
+
+ CDEBUG(D_NET, "attempting to recover local ni: %s\n",
+ libcfs_nid2str(ni->ni_nid));
+
+ lnet_ni_lock(ni);
+ if (!(ni->ni_recovery_state & LNET_NI_RECOVERY_PENDING)) {
+ ni->ni_recovery_state |= LNET_NI_RECOVERY_PENDING;
+ lnet_ni_unlock(ni);
+
+ LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
+ if (!ev_info) {
+ CERROR("out of memory. Can't recover %s\n",
+ libcfs_nid2str(ni->ni_nid));
+ lnet_ni_lock(ni);
+ ni->ni_recovery_state &=
+ ~LNET_NI_RECOVERY_PENDING;
+ lnet_ni_unlock(ni);
+ continue;
+ }
+
+ mdh = ni->ni_ping_mdh;
+ /*
+ * Invalidate the ni mdh in case it's deleted.
+ * We'll unlink the mdh in this case below.
+ */
+ LNetInvalidateMDHandle(&ni->ni_ping_mdh);
+ nid = ni->ni_nid;
+
+ /*
+ * remove the NI from the local queue and drop the
+ * reference count to it while we're recovering
+ * it. The reason for that, is that the NI could
+ * be deleted, and the way the code is structured
+ * is if we don't drop the NI, then the deletion
+ * code will enter a loop waiting for the
+ * reference count to be removed while holding the
+ * ln_mutex_lock(). When we look up the peer to
+ * send to in lnet_select_pathway() we will try to
+ * lock the ln_mutex_lock() as well, leading to
+ * a deadlock. By dropping the refcount and
+ * removing it from the list, we allow for the NI
+ * to be removed, then we use the cached NID to
+ * look it up again. If it's gone, then we just
+ * continue examining the rest of the queue.
+ */
+ lnet_net_lock(0);
+ list_del_init(&ni->ni_recovery);
+ lnet_ni_decref_locked(ni, 0);
+ lnet_net_unlock(0);
+
+ ev_info->mt_type = MT_TYPE_LOCAL_NI;
+ ev_info->mt_nid = nid;
+ rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
+ ev_info, the_lnet.ln_mt_eqh, true);
+ /* lookup the nid again */
+ lnet_net_lock(0);
+ ni = lnet_nid2ni_locked(nid, 0);
+ if (!ni) {
+ /*
+ * the NI has been deleted when we dropped
+ * the ref count
+ */
+ lnet_net_unlock(0);
+ LNetMDUnlink(mdh);
+ continue;
+ }
+ /*
+ * Same note as in lnet_recover_peer_nis(). When
+ * we're sending the ping, the NI is free to be
+ * deleted or manipulated. By this point it
+ * could've been added back on the recovery queue,
+ * and a refcount taken on it.
+ * So we can't just add it blindly again or we'll
+ * corrupt the queue. We must check under lock if
+ * it's not on any list and if not then add it
+ * to the processed list, which will eventually be
+ * spliced back on to the recovery queue.
+ */
+ ni->ni_ping_mdh = mdh;
+ if (list_empty(&ni->ni_recovery)) {
+ list_add_tail(&ni->ni_recovery, &processed_list);
+ lnet_ni_addref_locked(ni, 0);
+ }
+ lnet_net_unlock(0);
+
+ lnet_ni_lock(ni);
+ if (rc)
+ ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
+ }
+ lnet_ni_unlock(ni);
+ }
+
+ /*
+ * put back the remaining NIs on the ln_mt_localNIRecovq to be
+ * reexamined in the next iteration.
+ */
+ list_splice_init(&processed_list, &local_queue);
+ lnet_net_lock(0);
+ list_splice(&local_queue, &the_lnet.ln_mt_localNIRecovq);
+ lnet_net_unlock(0);
+}
+
+static struct list_head **
+lnet_create_array_of_queues(void)
+{
+ struct list_head **qs;
+ struct list_head *q;
+ int i;
+
+ qs = cfs_percpt_alloc(lnet_cpt_table(),
+ sizeof(struct list_head));
+ if (!qs) {
+ CERROR("Failed to allocate queues\n");
+ return NULL;
+ }
+
+ cfs_percpt_for_each(q, i, qs)
+ INIT_LIST_HEAD(q);
+
+ return qs;
+}
+
+static int
+lnet_resendqs_create(void)
+{
+ struct list_head **resendqs;
+ resendqs = lnet_create_array_of_queues();
+
+ if (!resendqs)
+ return -ENOMEM;
+
+ lnet_net_lock(LNET_LOCK_EX);
+ the_lnet.ln_mt_resendqs = resendqs;
+ lnet_net_unlock(LNET_LOCK_EX);
+
+ return 0;
+}
+
+static void
+lnet_clean_local_ni_recoveryq(void)
+{
+ struct lnet_ni *ni;
+
+ /* This is only called when the monitor thread has stopped */
+ lnet_net_lock(0);
+
+ while (!list_empty(&the_lnet.ln_mt_localNIRecovq)) {
+ ni = list_entry(the_lnet.ln_mt_localNIRecovq.next,
+ struct lnet_ni, ni_recovery);
+ list_del_init(&ni->ni_recovery);
+ lnet_ni_lock(ni);
+ lnet_unlink_ni_recovery_mdh_locked(ni, 0, true);
+ lnet_ni_unlock(ni);
+ lnet_ni_decref_locked(ni, 0);
+ }
+
+ lnet_net_unlock(0);
+}
+
+static void
+lnet_unlink_lpni_recovery_mdh_locked(struct lnet_peer_ni *lpni, int cpt,
+ bool force)
+{
+ struct lnet_handle_md recovery_mdh;
+
+ LNetInvalidateMDHandle(&recovery_mdh);
+
+ if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING || force) {
+ recovery_mdh = lpni->lpni_recovery_ping_mdh;
+ LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
+ }
+ spin_unlock(&lpni->lpni_lock);
+ lnet_net_unlock(cpt);
+ if (!LNetMDHandleIsInvalid(recovery_mdh))
+ LNetMDUnlink(recovery_mdh);
+ lnet_net_lock(cpt);
+ spin_lock(&lpni->lpni_lock);
+}
+
+static void
+lnet_clean_peer_ni_recoveryq(void)
+{
+ struct lnet_peer_ni *lpni, *tmp;
+
+ lnet_net_lock(LNET_LOCK_EX);
+
+ list_for_each_entry_safe(lpni, tmp, &the_lnet.ln_mt_peerNIRecovq,
+ lpni_recovery) {
+ list_del_init(&lpni->lpni_recovery);
+ spin_lock(&lpni->lpni_lock);
+ lnet_unlink_lpni_recovery_mdh_locked(lpni, LNET_LOCK_EX, true);
+ spin_unlock(&lpni->lpni_lock);
+ lnet_peer_ni_decref_locked(lpni);
+ }
+
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+
+static void
+lnet_clean_resendqs(void)
+{
+ struct lnet_msg *msg, *tmp;
+ struct list_head msgs;
+ int i;
+
+ INIT_LIST_HEAD(&msgs);
+
+ cfs_cpt_for_each(i, lnet_cpt_table()) {
+ lnet_net_lock(i);
+ list_splice_init(the_lnet.ln_mt_resendqs[i], &msgs);
+ lnet_net_unlock(i);
+ list_for_each_entry_safe(msg, tmp, &msgs, msg_list) {
+ list_del_init(&msg->msg_list);
+ msg->msg_no_resend = true;
+ lnet_finalize(msg, -ESHUTDOWN);
+ }
+ }
+
+ cfs_percpt_free(the_lnet.ln_mt_resendqs);
+}
+
+static void
+lnet_recover_peer_nis(void)
+{
+ struct lnet_mt_event_info *ev_info;
+ struct list_head processed_list;
+ struct list_head local_queue;
+ struct lnet_handle_md mdh;
+ struct lnet_peer_ni *lpni;
+ struct lnet_peer_ni *tmp;
+ lnet_nid_t nid;
+ int healthv;
+ int rc;
+
+ INIT_LIST_HEAD(&local_queue);
+ INIT_LIST_HEAD(&processed_list);
+
+ /*
+ * Always use cpt 0 for locking across all interactions with
+ * ln_mt_peerNIRecovq
+ */
+ lnet_net_lock(0);
+ list_splice_init(&the_lnet.ln_mt_peerNIRecovq,
+ &local_queue);
+ lnet_net_unlock(0);
+
+ list_for_each_entry_safe(lpni, tmp, &local_queue,
+ lpni_recovery) {
+ /*
+ * The same protection strategy is used here as is in the
+ * local recovery case.
+ */
+ lnet_net_lock(0);
+ healthv = atomic_read(&lpni->lpni_healthv);
+ spin_lock(&lpni->lpni_lock);
+ if (lpni->lpni_state & LNET_PEER_NI_DELETING ||
+ healthv == LNET_MAX_HEALTH_VALUE) {
+ list_del_init(&lpni->lpni_recovery);
+ lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, false);
+ spin_unlock(&lpni->lpni_lock);
+ lnet_peer_ni_decref_locked(lpni);
+ lnet_net_unlock(0);
+ continue;
+ }
+
+ /*
+ * If the peer NI has failed recovery we must unlink the
+ * md. But we want to keep the peer ni on the recovery
+ * queue so we can try to continue recovering it
+ */
+ if (lpni->lpni_state & LNET_PEER_NI_RECOVERY_FAILED) {
+ lnet_unlink_lpni_recovery_mdh_locked(lpni, 0, true);
+ lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_FAILED;
+ }
+
+ spin_unlock(&lpni->lpni_lock);
+ lnet_net_unlock(0);
+
+ /*
+ * NOTE: we're racing with peer deletion from user space.
+ * It's possible that a peer is deleted after we check its
+ * state. In this case the recovery can create a new peer
+ */
+ spin_lock(&lpni->lpni_lock);
+ if (!(lpni->lpni_state & LNET_PEER_NI_RECOVERY_PENDING) &&
+ !(lpni->lpni_state & LNET_PEER_NI_DELETING)) {
+ lpni->lpni_state |= LNET_PEER_NI_RECOVERY_PENDING;
+ spin_unlock(&lpni->lpni_lock);
+
+ LIBCFS_ALLOC(ev_info, sizeof(*ev_info));
+ if (!ev_info) {
+ CERROR("out of memory. Can't recover %s\n",
+ libcfs_nid2str(lpni->lpni_nid));
+ spin_lock(&lpni->lpni_lock);
+ lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
+ spin_unlock(&lpni->lpni_lock);
+ continue;
+ }
+
+ /* look at the comments in lnet_recover_local_nis() */
+ mdh = lpni->lpni_recovery_ping_mdh;
+ LNetInvalidateMDHandle(&lpni->lpni_recovery_ping_mdh);
+ nid = lpni->lpni_nid;
+ lnet_net_lock(0);
+ list_del_init(&lpni->lpni_recovery);
+ lnet_peer_ni_decref_locked(lpni);
+ lnet_net_unlock(0);
+
+ ev_info->mt_type = MT_TYPE_PEER_NI;
+ ev_info->mt_nid = nid;
+ rc = lnet_send_ping(nid, &mdh, LNET_INTERFACES_MIN,
+ ev_info, the_lnet.ln_mt_eqh, true);
+ lnet_net_lock(0);
+ /*
+ * lnet_find_peer_ni_locked() grabs a refcount for
+ * us. No need to take it explicitly.
+ */
+ lpni = lnet_find_peer_ni_locked(nid);
+ if (!lpni) {
+ lnet_net_unlock(0);
+ LNetMDUnlink(mdh);
+ continue;
+ }
+
+ lpni->lpni_recovery_ping_mdh = mdh;
+ /*
+ * While we're unlocked the lpni could've been
+ * readded on the recovery queue. In this case we
+ * don't need to add it to the local queue, since
+ * it's already on there and the thread that added
+ * it would've incremented the refcount on the
+ * peer, which means we need to decref the refcount
+ * that was implicitly grabbed by find_peer_ni_locked.
+ * Otherwise, if the lpni is still not on
+ * the recovery queue, then we'll add it to the
+ * processed list.
+ */
+ if (list_empty(&lpni->lpni_recovery))
+ list_add_tail(&lpni->lpni_recovery, &processed_list);
+ else
+ lnet_peer_ni_decref_locked(lpni);
+ lnet_net_unlock(0);
+
+ spin_lock(&lpni->lpni_lock);
+ if (rc)
+ lpni->lpni_state &= ~LNET_PEER_NI_RECOVERY_PENDING;
+ }
+ spin_unlock(&lpni->lpni_lock);
+ }
+
+ list_splice_init(&processed_list, &local_queue);
+ lnet_net_lock(0);
+ list_splice(&local_queue, &the_lnet.ln_mt_peerNIRecovq);
+ lnet_net_unlock(0);
+}
+
+static int
+lnet_monitor_thread(void *arg)
+{
+ time64_t recovery_timeout = 0;
+ time64_t rsp_timeout = 0;
+ int interval;
+ time64_t now;
+
+ /*
+ * The monitor thread takes care of the following:
+ * 1. Checks the aliveness of routers
+ * 2. Checks if there are messages on the resend queue to resend
+ * them.
+ * 3. Check if there are any NIs on the local recovery queue and
+ * pings them
+ * 4. Checks if there are any NIs on the remote recovery queue
+ * and pings them.
+ */
+ cfs_block_allsigs();
+
+ while (the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING) {
+ now = ktime_get_real_seconds();
+
+ if (lnet_router_checker_active())
+ lnet_check_routers();
+
+ lnet_resend_pending_msgs();
+
+ if (now >= rsp_timeout) {
+ lnet_finalize_expired_responses(false);
+ rsp_timeout = now + (lnet_transaction_timeout / 2);
+ }
+
+ if (now >= recovery_timeout) {
+ lnet_recover_local_nis();
+ lnet_recover_peer_nis();
+ recovery_timeout = now + lnet_recovery_interval;
+ }