return rc;
}
-/*
- * This function can be called from two paths:
- * 1. when sending a message
- * 2. when decommiting a message (lnet_msg_decommit_tx())
- * In both these cases the peer_ni should have it's reference count
- * acquired by the caller and therefore it is safe to drop the spin
- * lock before calling lnd_query()
- */
-static void
-lnet_ni_query_locked(struct lnet_ni *ni, struct lnet_peer_ni *lp)
-{
- time64_t last_alive = 0;
- int cpt = lnet_cpt_of_nid_locked(lp->lpni_nid, ni);
-
- LASSERT(lnet_peer_aliveness_enabled(lp));
- LASSERT(ni->ni_net->net_lnd->lnd_query != NULL);
-
- lnet_net_unlock(cpt);
- (ni->ni_net->net_lnd->lnd_query)(ni, lp->lpni_nid, &last_alive);
- lnet_net_lock(cpt);
-
- lp->lpni_last_query = ktime_get_seconds();
-
- if (last_alive != 0) /* NI has updated timestamp */
- lp->lpni_last_alive = last_alive;
-}
-
-/* NB: always called with lnet_net_lock held */
-static inline int
-lnet_peer_is_alive(struct lnet_peer_ni *lp, time64_t now)
-{
- int alive;
- time64_t deadline;
-
- LASSERT (lnet_peer_aliveness_enabled(lp));
-
- /*
- * Trust lnet_notify() if it has more recent aliveness news, but
- * ignore the initial assumed death (see lnet_peers_start_down()).
- */
- spin_lock(&lp->lpni_lock);
- if (!lp->lpni_alive && lp->lpni_alive_count > 0 &&
- lp->lpni_timestamp >= lp->lpni_last_alive) {
- spin_unlock(&lp->lpni_lock);
- return 0;
- }
-
- deadline = lp->lpni_last_alive +
- lp->lpni_net->net_tunables.lct_peer_timeout;
- alive = deadline > now;
-
- /*
- * Update obsolete lp_alive except for routers assumed to be dead
- * initially, because router checker would update aliveness in this
- * case, and moreover lpni_last_alive at peer creation is assumed.
- */
- if (alive && !lp->lpni_alive &&
- !(lnet_isrouter(lp) && lp->lpni_alive_count == 0)) {
- spin_unlock(&lp->lpni_lock);
- lnet_notify_locked(lp, 0, 1, lp->lpni_last_alive);
- } else {
- spin_unlock(&lp->lpni_lock);
- }
-
- return alive;
-}
-
-
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
static int
-lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lp,
+lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
struct lnet_msg *msg)
{
- time64_t now = ktime_get_seconds();
-
- if (!lnet_peer_aliveness_enabled(lp))
+ if (!lnet_peer_aliveness_enabled(lpni))
return -ENODEV;
- if (lnet_peer_is_alive(lp, now))
- return 1;
-
/*
* If we're resending a message, let's attempt to send it even if
* the peer is down to fulfill our resend quota on the message
if (msg->msg_retry_count > 0)
return 1;
- /*
- * Peer appears dead, but we should avoid frequent NI queries (at
- * most once per lnet_queryinterval seconds).
- */
- if (lp->lpni_last_query != 0) {
- static const int lnet_queryinterval = 1;
- time64_t next_query;
-
- next_query = lp->lpni_last_query + lnet_queryinterval;
-
- if (now < next_query) {
- if (lp->lpni_alive)
- CWARN("Unexpected aliveness of peer %s: "
- "%lld < %lld (%d/%d)\n",
- libcfs_nid2str(lp->lpni_nid),
- now, next_query,
- lnet_queryinterval,
- lp->lpni_net->net_tunables.lct_peer_timeout);
- return 0;
- }
- }
-
- /* query NI for latest aliveness news */
- lnet_ni_query_locked(ni, lp);
+ /* try and send recovery messages irregardless */
+ if (msg->msg_recovery)
+ return 1;
- if (lnet_peer_is_alive(lp, now))
+ /* always send any responses */
+ if (msg->msg_type == LNET_MSG_ACK ||
+ msg->msg_type == LNET_MSG_REPLY)
return 1;
- lnet_notify_locked(lp, 0, 0, lp->lpni_last_alive);
- return 0;
+ return lnet_is_peer_ni_alive(lpni);
}
/**
LASSERT(!do_send || msg->msg_tx_delayed);
LASSERT(!msg->msg_receiving);
LASSERT(msg->msg_tx_committed);
+ /* can't get here if we're sending to the loopback interface */
+ LASSERT(lp->lpni_nid != the_lnet.ln_loni->ni_nid);
/* NB 'lp' is always the next hop */
if ((msg->msg_target.pid & LNET_PID_USERFLAG) == 0 &&
CNETERR("Dropping message for %s: peer not alive\n",
libcfs_id2str(msg->msg_target));
- msg->msg_health_status = LNET_MSG_STATUS_LOCAL_DROPPED;
+ msg->msg_health_status = LNET_MSG_STATUS_REMOTE_DROPPED;
if (do_send)
lnet_finalize(msg, -EHOSTUNREACH);
libcfs_id2str(msg->msg_target));
if (do_send) {
msg->msg_no_resend = true;
+ CDEBUG(D_NET, "msg %p to %s canceled and will not be resent\n",
+ msg, libcfs_id2str(msg->msg_target));
lnet_finalize(msg, -ECANCELED);
}
* sets do_recv FALSE and I don't do the unlock/send/lock bit.
* I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
* received or OK to receive */
- struct lnet_peer_ni *lp = msg->msg_rxpeer;
+ struct lnet_peer_ni *lpni = msg->msg_rxpeer;
+ struct lnet_peer *lp;
struct lnet_rtrbufpool *rbp;
struct lnet_rtrbuf *rb;
- LASSERT (msg->msg_iov == NULL);
- LASSERT (msg->msg_kiov == NULL);
- LASSERT (msg->msg_niov == 0);
- LASSERT (msg->msg_routing);
- LASSERT (msg->msg_receiving);
- LASSERT (!msg->msg_sending);
+ LASSERT(msg->msg_iov == NULL);
+ LASSERT(msg->msg_kiov == NULL);
+ LASSERT(msg->msg_niov == 0);
+ LASSERT(msg->msg_routing);
+ LASSERT(msg->msg_receiving);
+ LASSERT(!msg->msg_sending);
+ LASSERT(lpni->lpni_peer_net);
+ LASSERT(lpni->lpni_peer_net->lpn_peer);
+
+ lp = lpni->lpni_peer_net->lpn_peer;
/* non-lnet_parse callers only receive delayed messages */
LASSERT(!do_recv || msg->msg_rx_delayed);
if (!msg->msg_peerrtrcredit) {
- spin_lock(&lp->lpni_lock);
- LASSERT((lp->lpni_rtrcredits < 0) ==
- !list_empty(&lp->lpni_rtrq));
+ /* lpni_lock protects the credit manipulation */
+ spin_lock(&lpni->lpni_lock);
+ /* lp_lock protects the lp_rtrq */
+ spin_lock(&lp->lp_lock);
msg->msg_peerrtrcredit = 1;
- lp->lpni_rtrcredits--;
- if (lp->lpni_rtrcredits < lp->lpni_minrtrcredits)
- lp->lpni_minrtrcredits = lp->lpni_rtrcredits;
+ lpni->lpni_rtrcredits--;
+ if (lpni->lpni_rtrcredits < lpni->lpni_minrtrcredits)
+ lpni->lpni_minrtrcredits = lpni->lpni_rtrcredits;
- if (lp->lpni_rtrcredits < 0) {
+ if (lpni->lpni_rtrcredits < 0) {
/* must have checked eager_recv before here */
LASSERT(msg->msg_rx_ready_delay);
msg->msg_rx_delayed = 1;
- list_add_tail(&msg->msg_list, &lp->lpni_rtrq);
- spin_unlock(&lp->lpni_lock);
+ list_add_tail(&msg->msg_list, &lp->lp_rtrq);
+ spin_unlock(&lp->lp_lock);
+ spin_unlock(&lpni->lpni_lock);
return LNET_CREDIT_WAIT;
}
- spin_unlock(&lp->lpni_lock);
+ spin_unlock(&lp->lp_lock);
+ spin_unlock(&lpni->lpni_lock);
}
rbp = lnet_msg2bufpool(msg);
0, 0, 0, msg->msg_hdr.payload_length);
list_del_init(&msg->msg_list);
msg->msg_no_resend = true;
+ msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
lnet_finalize(msg, -ECANCELED);
}
void
lnet_return_rx_credits_locked(struct lnet_msg *msg)
{
- struct lnet_peer_ni *rxpeer = msg->msg_rxpeer;
+ struct lnet_peer_ni *rxpeerni = msg->msg_rxpeer;
+ struct lnet_peer *lp;
struct lnet_ni *rxni = msg->msg_rxni;
struct lnet_msg *msg2;
routing_off:
if (msg->msg_peerrtrcredit) {
+ LASSERT(rxpeerni);
+ LASSERT(rxpeerni->lpni_peer_net);
+ LASSERT(rxpeerni->lpni_peer_net->lpn_peer);
+
+ lp = rxpeerni->lpni_peer_net->lpn_peer;
+
/* give back peer router credits */
msg->msg_peerrtrcredit = 0;
- spin_lock(&rxpeer->lpni_lock);
- LASSERT((rxpeer->lpni_rtrcredits < 0) ==
- !list_empty(&rxpeer->lpni_rtrq));
+ spin_lock(&rxpeerni->lpni_lock);
+ spin_lock(&lp->lp_lock);
- rxpeer->lpni_rtrcredits++;
+ rxpeerni->lpni_rtrcredits++;
/* drop all messages which are queued to be routed on that
* peer. */
if (!the_lnet.ln_routing) {
struct list_head drop;
INIT_LIST_HEAD(&drop);
- list_splice_init(&rxpeer->lpni_rtrq, &drop);
- spin_unlock(&rxpeer->lpni_lock);
+ list_splice_init(&lp->lp_rtrq, &drop);
+ spin_unlock(&lp->lp_lock);
+ spin_unlock(&rxpeerni->lpni_lock);
lnet_drop_routed_msgs_locked(&drop, msg->msg_rx_cpt);
- } else if (rxpeer->lpni_rtrcredits <= 0) {
- msg2 = list_entry(rxpeer->lpni_rtrq.next,
+ } else if (!list_empty(&lp->lp_rtrq)) {
+ int msg2_cpt;
+
+ msg2 = list_entry(lp->lp_rtrq.next,
struct lnet_msg, msg_list);
list_del(&msg2->msg_list);
- spin_unlock(&rxpeer->lpni_lock);
+ msg2_cpt = msg2->msg_rx_cpt;
+ spin_unlock(&lp->lp_lock);
+ spin_unlock(&rxpeerni->lpni_lock);
+ /*
+ * messages on the lp_rtrq can be from any NID in
+ * the peer, which means they might have different
+ * cpts. We need to make sure we lock the right
+ * one.
+ */
+ if (msg2_cpt != msg->msg_rx_cpt) {
+ lnet_net_unlock(msg->msg_rx_cpt);
+ lnet_net_lock(msg2_cpt);
+ }
(void) lnet_post_routed_recv_locked(msg2, 1);
+ if (msg2_cpt != msg->msg_rx_cpt) {
+ lnet_net_unlock(msg2_cpt);
+ lnet_net_lock(msg->msg_rx_cpt);
+ }
} else {
- spin_unlock(&rxpeer->lpni_lock);
+ spin_unlock(&lp->lp_lock);
+ spin_unlock(&rxpeerni->lpni_lock);
}
}
if (rxni != NULL) {
msg->msg_rxni = NULL;
lnet_ni_decref_locked(rxni, msg->msg_rx_cpt);
}
- if (rxpeer != NULL) {
+ if (rxpeerni != NULL) {
msg->msg_rxpeer = NULL;
- lnet_peer_ni_decref_locked(rxpeer);
+ lnet_peer_ni_decref_locked(rxpeerni);
}
}
+#if 0
static int
lnet_compare_peers(struct lnet_peer_ni *p1, struct lnet_peer_ni *p2)
{
return 0;
}
+#endif
static int
lnet_compare_routes(struct lnet_route *r1, struct lnet_route *r2)
{
+ /* TODO re-implement gateway comparison
struct lnet_peer_ni *p1 = r1->lr_gateway;
struct lnet_peer_ni *p2 = r2->lr_gateway;
+ */
int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
- int rc;
+ /*int rc;*/
if (r1->lr_priority < r2->lr_priority)
return 1;
if (r1_hops > r2_hops)
return -1;
+ /*
rc = lnet_compare_peers(p1, p2);
if (rc)
return rc;
+ */
if (r1->lr_seq - r2->lr_seq <= 0)
return 1;
return -1;
}
-static struct lnet_peer_ni *
+/* TODO: lnet_find_route_locked() needs to be reimplemented */
+static struct lnet_route *
lnet_find_route_locked(struct lnet_net *net, __u32 remote_net,
- lnet_nid_t rtr_nid)
+ lnet_nid_t rtr_nid, struct lnet_route **prev_route)
{
- struct lnet_remotenet *rnet;
- struct lnet_route *route;
- struct lnet_route *best_route;
- struct lnet_route *last_route;
- struct lnet_peer_ni *lpni_best;
- struct lnet_peer_ni *lp;
- int rc;
+ struct lnet_remotenet *rnet;
+ struct lnet_route *route;
+ struct lnet_route *best_route;
+ struct lnet_route *last_route;
+ struct lnet_peer *lp_best;
+ struct lnet_peer *lp;
+ int rc;
/* If @rtr_nid is not LNET_NID_ANY, return the gateway with
* rtr_nid nid, otherwise find the best gateway I can use */
if (rnet == NULL)
return NULL;
- lpni_best = NULL;
+ lp_best = NULL;
best_route = last_route = NULL;
list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
lp = route->lr_gateway;
if (!lnet_is_route_alive(route))
continue;
- if (net != NULL && lp->lpni_net != net)
- continue;
-
- if (lp->lpni_nid == rtr_nid) /* it's pre-determined router */
- return lp;
-
- if (lpni_best == NULL) {
+ if (lp_best == NULL) {
best_route = last_route = route;
- lpni_best = lp;
+ lp_best = lp;
continue;
}
continue;
best_route = route;
- lpni_best = lp;
+ lp_best = lp;
}
- /* set sequence number on the best router to the latest sequence + 1
- * so we can round-robin all routers, it's race and inaccurate but
- * harmless and functional */
- if (best_route != NULL)
- best_route->lr_seq = last_route->lr_seq + 1;
- return lpni_best;
+ *prev_route = last_route;
+
+ return best_route;
}
static struct lnet_ni *
#define SRC_ANY_ROUTER_NMR_DST (SRC_ANY | REMOTE_DST | NMR_DST)
static int
+lnet_handle_lo_send(struct lnet_send_data *sd)
+{
+ struct lnet_msg *msg = sd->sd_msg;
+ int cpt = sd->sd_cpt;
+
+ /* No send credit hassles with LOLND */
+ lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
+ msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
+ if (!msg->msg_routing)
+ msg->msg_hdr.src_nid =
+ cpu_to_le64(the_lnet.ln_loni->ni_nid);
+ msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
+ lnet_msg_commit(msg, cpt);
+ msg->msg_txni = the_lnet.ln_loni;
+
+ return LNET_CREDIT_OK;
+}
+
+static int
lnet_handle_send(struct lnet_send_data *sd)
{
struct lnet_ni *best_ni = sd->sd_best_ni;
sd->sd_best_ni->ni_net->net_id);
}
- if (sd->sd_best_lpni)
+ if (sd->sd_best_lpni &&
+ sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid)
+ return lnet_handle_lo_send(sd);
+ else if (sd->sd_best_lpni)
return lnet_handle_send(sd);
CERROR("can't send to %s. no NI on %s\n",
}
static int
+lnet_initiate_peer_discovery(struct lnet_peer_ni *lpni,
+ struct lnet_msg *msg, lnet_nid_t rtr_nid,
+ int cpt)
+{
+ struct lnet_peer *peer;
+ lnet_nid_t primary_nid;
+ int rc;
+
+ lnet_peer_ni_addref_locked(lpni);
+
+ rc = lnet_discover_peer_locked(lpni, cpt, false);
+ if (rc) {
+ lnet_peer_ni_decref_locked(lpni);
+ return rc;
+ }
+ /* The peer may have changed. */
+ peer = lpni->lpni_peer_net->lpn_peer;
+ /* queue message and return */
+ msg->msg_rtr_nid_param = rtr_nid;
+ msg->msg_sending = 0;
+ msg->msg_txpeer = NULL;
+ spin_lock(&peer->lp_lock);
+ list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
+ spin_unlock(&peer->lp_lock);
+ lnet_peer_ni_decref_locked(lpni);
+ primary_nid = peer->lp_primary_nid;
+
+ CDEBUG(D_NET, "msg %p delayed. %s pending discovery\n",
+ msg, libcfs_nid2str(primary_nid));
+
+ return LNET_DC_WAIT;
+}
+
+static int
lnet_handle_find_routed_path(struct lnet_send_data *sd,
lnet_nid_t dst_nid,
struct lnet_peer_ni **gw_lpni,
struct lnet_peer **gw_peer)
{
- struct lnet_peer_ni *gw;
+ struct lnet_peer *gw;
+ struct lnet_route *best_route;
+ struct lnet_route *last_route;
+ struct lnet_peer_ni *lpni = NULL;
lnet_nid_t src_nid = sd->sd_src_nid;
- gw = lnet_find_route_locked(NULL, LNET_NIDNET(dst_nid),
- sd->sd_rtr_nid);
- if (!gw) {
+ best_route = lnet_find_route_locked(NULL, LNET_NIDNET(dst_nid),
+ sd->sd_rtr_nid, &last_route);
+ if (!best_route) {
CERROR("no route to %s from %s\n",
libcfs_nid2str(dst_nid), libcfs_nid2str(src_nid));
return -EHOSTUNREACH;
}
- /* get the peer of the gw_ni */
- LASSERT(gw->lpni_peer_net);
- LASSERT(gw->lpni_peer_net->lpn_peer);
+ gw = best_route->lr_gateway;
+ *gw_peer = gw;
- *gw_peer = gw->lpni_peer_net->lpn_peer;
+ /*
+ * Discover this gateway if it hasn't already been discovered.
+ * This means we might delay the message until discovery has
+ * completed
+ */
+#if 0
+ /* TODO: disable discovey for now */
+ if (lnet_msg_discovery(sd->sd_msg) &&
+ !lnet_peer_is_uptodate(*gw_peer)) {
+ sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
+ return lnet_initiate_peer_discovery(gw, sd->sd_msg,
+ sd->sd_rtr_nid, sd->sd_cpt);
+ }
+#endif
if (!sd->sd_best_ni)
- sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, *gw_peer,
- gw->lpni_peer_net,
+ sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
+ lnet_peer_get_net_locked(gw,
+ best_route->lr_lnet),
sd->sd_md_cpt,
true);
if (!sd->sd_best_ni) {
CERROR("Internal Error. Expected local ni on %s "
"but non found :%s\n",
- libcfs_net2str(gw->lpni_peer_net->lpn_net_id),
+ libcfs_net2str(best_route->lr_lnet),
libcfs_nid2str(sd->sd_src_nid));
return -EFAULT;
}
/*
* if gw is MR let's find its best peer_ni
*/
- if (lnet_peer_is_multi_rail(*gw_peer)) {
- gw = lnet_find_best_lpni_on_net(sd, *gw_peer,
- sd->sd_best_ni->ni_net->net_id);
+ if (lnet_peer_is_multi_rail(gw)) {
+ lpni = lnet_find_best_lpni_on_net(sd, gw,
+ sd->sd_best_ni->ni_net->net_id);
/*
* We've already verified that the gw has an NI on that
* desired net, but we're not finding it. Something is
* wrong.
*/
- if (!gw) {
+ if (!lpni) {
+ CERROR("Internal Error. Route expected to %s from %s\n",
+ libcfs_nid2str(dst_nid),
+ libcfs_nid2str(src_nid));
+ return -EFAULT;
+ }
+ } else {
+ struct lnet_peer_net *lpn;
+ lpn = lnet_peer_get_net_locked(gw, best_route->lr_lnet);
+ if (!lpn) {
+ CERROR("Internal Error. Route expected to %s from %s\n",
+ libcfs_nid2str(dst_nid),
+ libcfs_nid2str(src_nid));
+ return -EFAULT;
+ }
+ lpni = list_entry(lpn->lpn_peer_nis.next, struct lnet_peer_ni,
+ lpni_peer_nis);
+ if (!lpni) {
CERROR("Internal Error. Route expected to %s from %s\n",
libcfs_nid2str(dst_nid),
libcfs_nid2str(src_nid));
}
}
- *gw_lpni = gw;
+ *gw_lpni = lpni;
+
+ /*
+ * increment the route sequence number since now we're sure we're
+ * going to use it
+ */
+ LASSERT(best_route && last_route);
+ best_route->lr_seq = last_route->lr_seq + 1;
return 0;
}
rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
&gw_peer);
- if (rc < 0)
+ if (rc)
return rc;
if (sd->sd_send_case & NMR_DST)
* try and see if we can reach it over another routed
* network
*/
- if (sd->sd_best_lpni) {
+ if (sd->sd_best_lpni &&
+ sd->sd_best_lpni->lpni_nid == the_lnet.ln_loni->ni_nid) {
+ /*
+ * in case we initially started with a routed
+ * destination, let's reset to local
+ */
+ sd->sd_send_case &= ~REMOTE_DST;
+ sd->sd_send_case |= LOCAL_DST;
+ return lnet_handle_lo_send(sd);
+ } else if (sd->sd_best_lpni) {
/*
* in case we initially started with a routed
* destination, let's reset to local
"No route available\n",
libcfs_nid2str(sd->sd_dst_nid));
return -EHOSTUNREACH;
+ } else if (rc > 0) {
+ return rc;
}
sd->sd_best_lpni = gw;
*/
rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
&gw_peer);
- if (rc < 0)
+ if (rc)
return rc;
sd->sd_send_case &= ~LOCAL_DST;
*/
rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
&gw_peer);
- if (rc < 0)
+ if (rc)
return rc;
/*
* is no need to go through any selection. We can just shortcut
* the entire process and send over lolnd
*/
+ send_data.sd_msg = msg;
+ send_data.sd_cpt = cpt;
if (LNET_NETTYP(LNET_NIDNET(dst_nid)) == LOLND) {
- /* No send credit hassles with LOLND */
- lnet_ni_addref_locked(the_lnet.ln_loni, cpt);
- msg->msg_hdr.dest_nid = cpu_to_le64(the_lnet.ln_loni->ni_nid);
- if (!msg->msg_routing)
- msg->msg_hdr.src_nid =
- cpu_to_le64(the_lnet.ln_loni->ni_nid);
- msg->msg_target.nid = the_lnet.ln_loni->ni_nid;
- lnet_msg_commit(msg, cpt);
- msg->msg_txni = the_lnet.ln_loni;
+ rc = lnet_handle_lo_send(&send_data);
lnet_net_unlock(cpt);
-
- return LNET_CREDIT_OK;
+ return rc;
}
/*
*/
peer = lpni->lpni_peer_net->lpn_peer;
if (lnet_msg_discovery(msg) && !lnet_peer_is_uptodate(peer)) {
- lnet_nid_t primary_nid;
- rc = lnet_discover_peer_locked(lpni, cpt, false);
- if (rc) {
- lnet_peer_ni_decref_locked(lpni);
- lnet_net_unlock(cpt);
- return rc;
- }
- /* The peer may have changed. */
- peer = lpni->lpni_peer_net->lpn_peer;
- /* queue message and return */
- msg->msg_rtr_nid_param = rtr_nid;
- msg->msg_sending = 0;
- list_add_tail(&msg->msg_list, &peer->lp_dc_pendq);
+ rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
lnet_peer_ni_decref_locked(lpni);
- primary_nid = peer->lp_primary_nid;
lnet_net_unlock(cpt);
-
- CDEBUG(D_NET, "%s pending discovery\n",
- libcfs_nid2str(primary_nid));
-
- return LNET_DC_WAIT;
+ return rc;
}
lnet_peer_ni_decref_locked(lpni);
send_case |= SND_RESP;
/* assign parameters to the send_data */
- send_data.sd_msg = msg;
send_data.sd_rtr_nid = rtr_nid;
send_data.sd_src_nid = src_nid;
send_data.sd_dst_nid = dst_nid;
send_data.sd_final_dst_lpni = lpni;
send_data.sd_peer = peer;
send_data.sd_md_cpt = md_cpt;
- send_data.sd_cpt = cpt;
send_data.sd_send_case = send_case;
rc = lnet_handle_send_case_locked(&send_data);
+ /*
+ * Update the local cpt since send_data.sd_cpt might've been
+ * updated as a result of calling lnet_handle_send_case_locked().
+ */
+ cpt = send_data.sd_cpt;
+
if (rc == REPEAT_SEND)
goto again;
- lnet_net_unlock(send_data.sd_cpt);
+ lnet_net_unlock(cpt);
return rc;
}
cfs_time_seconds(interval));
}
- /* clean up the router checker */
- lnet_prune_rc_data(1);
-
/* Shutting down */
+ lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
+ lnet_net_unlock(LNET_LOCK_EX);
/* signal that the monitor thread is exiting */
up(&the_lnet.ln_mt_signal);
}
}
-static void
+void
lnet_mt_event_handler(struct lnet_event *event)
{
struct lnet_mt_event_info *ev_info = event->md.user_ptr;
if (rc)
goto clean_queues;
- rc = LNetEQAlloc(0, lnet_mt_event_handler, &the_lnet.ln_mt_eqh);
- if (rc != 0) {
- CERROR("Can't allocate monitor thread EQ: %d\n", rc);
- goto clean_queues;
- }
-
- /* Pre monitor thread start processing */
- rc = lnet_router_pre_mt_start();
- if (rc)
- goto free_mem;
-
sema_init(&the_lnet.ln_mt_signal, 0);
+ lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_mt_state = LNET_MT_STATE_RUNNING;
+ lnet_net_unlock(LNET_LOCK_EX);
task = kthread_run(lnet_monitor_thread, NULL, "monitor_thread");
if (IS_ERR(task)) {
rc = PTR_ERR(task);
return 0;
clean_thread:
+ lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
+ lnet_net_unlock(LNET_LOCK_EX);
/* block until event callback signals exit */
down(&the_lnet.ln_mt_signal);
/* clean up */
- lnet_router_cleanup();
-free_mem:
+ lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_mt_state = LNET_MT_STATE_SHUTDOWN;
+ lnet_net_unlock(LNET_LOCK_EX);
lnet_rsp_tracker_clean();
lnet_clean_local_ni_recoveryq();
lnet_clean_peer_ni_recoveryq();
lnet_clean_resendqs();
- LNetEQFree(the_lnet.ln_mt_eqh);
LNetInvalidateEQHandle(&the_lnet.ln_mt_eqh);
return rc;
clean_queues:
void lnet_monitor_thr_stop(void)
{
- int rc;
-
if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
return;
LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_RUNNING);
+ lnet_net_lock(LNET_LOCK_EX);
the_lnet.ln_mt_state = LNET_MT_STATE_STOPPING;
+ lnet_net_unlock(LNET_LOCK_EX);
/* tell the monitor thread that we're shutting down */
wake_up(&the_lnet.ln_mt_waitq);
LASSERT(the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN);
/* perform cleanup tasks */
- lnet_router_cleanup();
lnet_rsp_tracker_clean();
lnet_clean_local_ni_recoveryq();
lnet_clean_peer_ni_recoveryq();
lnet_clean_resendqs();
- rc = LNetEQFree(the_lnet.ln_mt_eqh);
- LASSERT(rc == 0);
+
return;
}
rnet = lnet_find_rnet_locked(LNET_NIDNET(src_nid));
if (rnet) {
- struct lnet_peer_ni *gw = NULL;
+ struct lnet_peer *gw = NULL;
+ struct lnet_peer_ni *lpni = NULL;
struct lnet_route *route;
list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
found = false;
gw = route->lr_gateway;
- if (gw->lpni_net != net)
+ if (route->lr_lnet != net->net_id)
continue;
- if (gw->lpni_nid == from_nid) {
- found = true;
- break;
+ /*
+ * if the nid is one of the gateway's NIDs
+ * then this is a valid gateway
+ */
+ while ((lpni = lnet_get_next_peer_ni_locked(gw,
+ NULL, lpni)) != NULL) {
+ if (lpni->lpni_nid == from_nid) {
+ found = true;
+ break;
+ }
}
}
}
/* Multi-Rail: Primary NID of source. */
msg->msg_initiator = lnet_peer_primary_nid_locked(src_nid);
- if (lnet_isrouter(msg->msg_rxpeer)) {
- lnet_peer_set_alive(msg->msg_rxpeer);
- if (avoid_asym_router_failure &&
- LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
- /* received a remote message from router, update
- * remote NI status on this router.
- * NB: multi-hop routed message will be ignored.
- */
- lnet_router_ni_update_locked(msg->msg_rxpeer,
- LNET_NIDNET(src_nid));
- }
- }
+ /*
+ * mark the status of this lpni as UP since we received a message
+ * from it. The ping response reports back the ns_status which is
+ * marked on the remote as up or down and we cache it here.
+ */
+ msg->msg_rxpeer->lpni_ns_status = LNET_NI_STATUS_UP;
lnet_msg_commit(msg, cpt);
LASSERT(shortest != NULL);
hops = shortest_hops;
if (srcnidp != NULL) {
- ni = lnet_get_next_ni_locked(
- shortest->lr_gateway->lpni_net,
- NULL);
+ struct lnet_net *net;
+ net = lnet_get_net_locked(shortest->lr_lnet);
+ LASSERT(net);
+ ni = lnet_get_next_ni_locked(net, NULL);
*srcnidp = ni->ni_nid;
}
if (orderp != NULL)