#include <linux/nsproxy.h>
#include <net/net_namespace.h>
+extern unsigned int lnet_current_net_count;
+
static int local_nid_dist_zero = 1;
module_param(local_nid_dist_zero, int, 0444);
MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
return rc;
}
+static bool
+lnet_is_peer_deadline_passed(struct lnet_peer_ni *lpni, time64_t now)
+{
+ time64_t deadline;
+
+ deadline = lpni->lpni_last_alive +
+ lpni->lpni_net->net_tunables.lct_peer_timeout;
+
+ /*
+ * assume peer_ni is alive as long as we're within the configured
+ * peer timeout
+ */
+ if (deadline > now)
+ return false;
+
+ return true;
+}
+
/* NB: returns 1 when alive, 0 when dead, negative when error;
* may drop the lnet_net_lock */
static int
lnet_peer_alive_locked(struct lnet_ni *ni, struct lnet_peer_ni *lpni,
struct lnet_msg *msg)
{
+ time64_t now = ktime_get_seconds();
+
if (!lnet_peer_aliveness_enabled(lpni))
return -ENODEV;
msg->msg_type == LNET_MSG_REPLY)
return 1;
+ if (!lnet_is_peer_deadline_passed(lpni, now))
+ return true;
+
return lnet_is_peer_ni_alive(lpni);
}
return 0;
}
+ if (!lnet_msg_discovery(msg) || lnet_peer_is_uptodate(peer)) {
+ lnet_peer_ni_decref_locked(lpni);
+ return 0;
+ }
+
rc = lnet_discover_peer_locked(lpni, cpt, false);
if (rc) {
lnet_peer_ni_decref_locked(lpni);
struct lnet_peer_ni **gw_lpni,
struct lnet_peer **gw_peer)
{
+ int rc;
struct lnet_peer *gw;
+ struct lnet_peer *lp;
+ struct lnet_peer_net *lpn;
+ struct lnet_peer_net *best_lpn = NULL;
+ struct lnet_remotenet *rnet;
struct lnet_route *best_route;
struct lnet_route *last_route;
struct lnet_peer_ni *lpni = NULL;
+ struct lnet_peer_ni *gwni = NULL;
lnet_nid_t src_nid = sd->sd_src_nid;
- best_route = lnet_find_route_locked(NULL, LNET_NIDNET(dst_nid),
+ /* we've already looked up the initial lpni using dst_nid */
+ lpni = sd->sd_best_lpni;
+ /* the peer tree must be in existence */
+ LASSERT(lpni && lpni->lpni_peer_net && lpni->lpni_peer_net->lpn_peer);
+ lp = lpni->lpni_peer_net->lpn_peer;
+
+ list_for_each_entry(lpn, &lp->lp_peer_nets, lpn_peer_nets) {
+ /* is this remote network reachable? */
+ rnet = lnet_find_rnet_locked(lpn->lpn_net_id);
+ if (!rnet)
+ continue;
+
+ if (!best_lpn)
+ best_lpn = lpn;
+
+ if (best_lpn->lpn_seq <= lpn->lpn_seq)
+ continue;
+
+ best_lpn = lpn;
+ }
+
+ if (!best_lpn) {
+ CERROR("peer %s has no available nets \n",
+ libcfs_nid2str(sd->sd_dst_nid));
+ return -EHOSTUNREACH;
+ }
+
+ sd->sd_best_lpni = lnet_find_best_lpni_on_net(sd, lp, best_lpn->lpn_net_id);
+ if (!sd->sd_best_lpni) {
+ CERROR("peer %s down\n", libcfs_nid2str(sd->sd_dst_nid));
+ return -EHOSTUNREACH;
+ }
+
+ best_route = lnet_find_route_locked(NULL, best_lpn->lpn_net_id,
sd->sd_rtr_nid, &last_route,
- &lpni);
+ &gwni);
if (!best_route) {
CERROR("no route to %s from %s\n",
libcfs_nid2str(dst_nid), libcfs_nid2str(src_nid));
return -EHOSTUNREACH;
}
- if (!lpni) {
+ if (!gwni) {
CERROR("Internal Error. Route expected to %s from %s\n",
libcfs_nid2str(dst_nid),
libcfs_nid2str(src_nid));
}
gw = best_route->lr_gateway;
- LASSERT(gw == lpni->lpni_peer_net->lpn_peer);
+ LASSERT(gw == gwni->lpni_peer_net->lpn_peer);
/*
* Discover this gateway if it hasn't already been discovered.
* This means we might delay the message until discovery has
* completed
*/
- if (lnet_msg_discovery(sd->sd_msg) &&
- !lnet_peer_is_uptodate(gw)) {
- sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
- return lnet_initiate_peer_discovery(lpni, sd->sd_msg,
- sd->sd_rtr_nid, sd->sd_cpt);
- }
+ sd->sd_msg->msg_src_nid_param = sd->sd_src_nid;
+ rc = lnet_initiate_peer_discovery(gwni, sd->sd_msg, sd->sd_rtr_nid,
+ sd->sd_cpt);
+ if (rc)
+ return rc;
if (!sd->sd_best_ni)
sd->sd_best_ni = lnet_find_best_ni_on_spec_net(NULL, gw,
return -EFAULT;
}
- *gw_lpni = lpni;
+ *gw_lpni = gwni;
*gw_peer = gw;
/*
- * increment the route sequence number since now we're sure we're
- * going to use it
+ * increment the sequence numbers since now we're sure we're
+ * going to use this path
*/
LASSERT(best_route && last_route);
best_route->lr_seq = last_route->lr_seq + 1;
+ best_lpn->lpn_seq++;
return 0;
}
}
struct lnet_ni *
-lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt)
+lnet_find_best_ni_on_local_net(struct lnet_peer *peer, int md_cpt,
+ bool discovery)
{
struct lnet_peer_net *peer_net = NULL;
struct lnet_ni *best_ni = NULL;
continue;
best_ni = lnet_find_best_ni_on_spec_net(best_ni, peer,
peer_net, md_cpt, false);
+
+ /*
+ * if this is a discovery message and lp_disc_net_id is
+ * specified then use that net to send the discovery on.
+ */
+ if (peer->lp_disc_net_id == peer_net->lpn_net_id &&
+ discovery)
+ break;
}
if (best_ni)
* networks.
*/
sd->sd_best_ni = lnet_find_best_ni_on_local_net(sd->sd_peer,
- sd->sd_md_cpt);
+ sd->sd_md_cpt,
+ lnet_msg_discovery(sd->sd_msg));
if (sd->sd_best_ni) {
sd->sd_best_lpni =
lnet_find_best_lpni_on_net(sd, sd->sd_peer,
return rc;
/*
- * TODO; One possible enhancement is to run the selection
- * algorithm on the peer. However for remote peers the credits are
- * not decremented, so we'll be basically going over the peer NIs
- * in round robin. An MR router will run the selection algorithm
- * on the next-hop interfaces.
+ * Now that we must route to the destination, we must consider the
+ * MR case, where the destination has multiple interfaces, some of
+ * which we can route to and others we do not. For this reason we
+ * need to select the destination which we can route to and if
+ * there are multiple, we need to round robin.
*/
rc = lnet_handle_find_routed_path(sd, sd->sd_dst_nid, &gw_lpni,
&gw_peer);
* trigger discovery.
*/
peer = lpni->lpni_peer_net->lpn_peer;
- if (lnet_msg_discovery(msg) && !lnet_peer_is_uptodate(peer)) {
- rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
+ rc = lnet_initiate_peer_discovery(lpni, msg, rtr_nid, cpt);
+ if (rc) {
lnet_peer_ni_decref_locked(lpni);
lnet_net_unlock(cpt);
return rc;
LASSERT(!msg->msg_tx_committed);
rc = lnet_select_pathway(src_nid, dst_nid, msg, rtr_nid);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -EHOSTUNREACH)
+ msg->msg_health_status = LNET_MSG_STATUS_REMOTE_ERROR;
+ else
+ msg->msg_health_status = LNET_MSG_STATUS_LOCAL_ERROR;
return rc;
+ }
if (rc == LNET_CREDIT_OK)
lnet_ni_send(msg->msg_txni, msg);
return;
rspt = md->md_rspt_ptr;
- md->md_rspt_ptr = NULL;
/* debug code */
LASSERT(rspt->rspt_cpt == cpt);
- /*
- * invalidate the handle to indicate that a response has been
- * received, which will then lead the monitor thread to clean up
- * the rspt block.
- */
- LNetInvalidateMDHandle(&rspt->rspt_mdh);
+ md->md_rspt_ptr = NULL;
+
+ if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
+ /*
+ * The monitor thread has invalidated this handle because the
+ * response timed out, but it failed to lookup the MD. That
+ * means this response tracker is on the zombie list. We can
+ * safely remove it under the resource lock (held by caller) and
+ * free the response tracker block.
+ */
+ list_del(&rspt->rspt_on_list);
+ lnet_rspt_free(rspt, cpt);
+ } else {
+ /*
+ * invalidate the handle to indicate that a response has been
+ * received, which will then lead the monitor thread to clean up
+ * the rspt block.
+ */
+ LNetInvalidateMDHandle(&rspt->rspt_mdh);
+ }
+}
+
+void
+lnet_clean_zombie_rstqs(void)
+{
+ struct lnet_rsp_tracker *rspt, *tmp;
+ int i;
+
+ cfs_cpt_for_each(i, lnet_cpt_table()) {
+ list_for_each_entry_safe(rspt, tmp,
+ the_lnet.ln_mt_zombie_rstqs[i],
+ rspt_on_list) {
+ list_del(&rspt->rspt_on_list);
+ lnet_rspt_free(rspt, i);
+ }
+ }
+
+ cfs_percpt_free(the_lnet.ln_mt_zombie_rstqs);
}
static void
-lnet_finalize_expired_responses(bool force)
+lnet_finalize_expired_responses(void)
{
struct lnet_libmd *md;
struct list_head local_queue;
struct lnet_rsp_tracker *rspt, *tmp;
+ ktime_t now;
int i;
if (the_lnet.ln_mt_rstq == NULL)
list_splice_init(the_lnet.ln_mt_rstq[i], &local_queue);
lnet_net_unlock(i);
+ now = ktime_get();
+
list_for_each_entry_safe(rspt, tmp, &local_queue, rspt_on_list) {
/*
* The rspt mdh will be invalidated when a response
lnet_res_lock(i);
if (LNetMDHandleIsInvalid(rspt->rspt_mdh)) {
lnet_res_unlock(i);
- list_del_init(&rspt->rspt_on_list);
+ list_del(&rspt->rspt_on_list);
lnet_rspt_free(rspt, i);
continue;
}
- if (ktime_compare(ktime_get(), rspt->rspt_deadline) >= 0 ||
- force) {
+ if (ktime_compare(now, rspt->rspt_deadline) >= 0 ||
+ the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN) {
struct lnet_peer_ni *lpni;
lnet_nid_t nid;
md = lnet_handle2md(&rspt->rspt_mdh);
if (!md) {
+ /* MD has been queued for unlink, but
+ * rspt hasn't been detached (Note we've
+ * checked above that the rspt_mdh is
+ * valid). Since we cannot lookup the MD
+ * we're unable to detach the rspt
+ * ourselves. Thus, move the rspt to the
+ * zombie list where we'll wait for
+ * either:
+ * 1. The remaining operations on the
+ * MD to complete. In this case the
+ * final operation will result in
+ * lnet_msg_detach_md()->
+ * lnet_detach_rsp_tracker() where
+ * we will clean up this response
+ * tracker.
+ * 2. LNet to shutdown. In this case
+ * we'll wait until after all LND Nets
+ * have shutdown and then we can
+ * safely free any remaining response
+ * tracker blocks on the zombie list.
+ * Note: We need to hold the resource
+ * lock when adding to the zombie list
+ * because we may have concurrent access
+ * with lnet_detach_rsp_tracker().
+ */
LNetInvalidateMDHandle(&rspt->rspt_mdh);
+ list_move(&rspt->rspt_on_list,
+ the_lnet.ln_mt_zombie_rstqs[i]);
lnet_res_unlock(i);
- list_del_init(&rspt->rspt_on_list);
- lnet_rspt_free(rspt, i);
continue;
}
LASSERT(md->md_rspt_ptr == rspt);
md->md_rspt_ptr = NULL;
lnet_res_unlock(i);
- lnet_net_lock(i);
- the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
- lnet_net_unlock(i);
-
- list_del_init(&rspt->rspt_on_list);
+ LNetMDUnlink(rspt->rspt_mdh);
nid = rspt->rspt_next_hop_nid;
- CNETERR("Response timed out: md = %p: nid = %s\n",
- md, libcfs_nid2str(nid));
- LNetMDUnlink(rspt->rspt_mdh);
+ list_del(&rspt->rspt_on_list);
lnet_rspt_free(rspt, i);
+ /* If we're shutting down we just want to clean
+ * up the rspt blocks
+ */
+ if (the_lnet.ln_mt_state == LNET_MT_STATE_SHUTDOWN)
+ continue;
+
+ lnet_net_lock(i);
+ the_lnet.ln_counters[i]->lct_health.lch_response_timeout_count++;
+ lnet_net_unlock(i);
+
+ CDEBUG(D_NET,
+ "Response timeout: md = %p: nid = %s\n",
+ md, libcfs_nid2str(nid));
+
/*
* If there is a timeout on the response
* from the next hop decrement its health
}
}
- lnet_net_lock(i);
- if (!list_empty(&local_queue))
+ if (!list_empty(&local_queue)) {
+ lnet_net_lock(i);
list_splice(&local_queue, the_lnet.ln_mt_rstq[i]);
- lnet_net_unlock(i);
+ lnet_net_unlock(i);
+ }
}
}
lnet_net_unlock(0);
}
-static struct list_head **
-lnet_create_array_of_queues(void)
-{
- struct list_head **qs;
- struct list_head *q;
- int i;
-
- qs = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct list_head));
- if (!qs) {
- CERROR("Failed to allocate queues\n");
- return NULL;
- }
-
- cfs_percpt_for_each(q, i, qs)
- INIT_LIST_HEAD(q);
-
- return qs;
-}
-
static int
lnet_resendqs_create(void)
{
int interval;
time64_t now;
+ wait_for_completion(&the_lnet.ln_started);
/*
* The monitor thread takes care of the following:
* 1. Checks the aliveness of routers
lnet_resend_pending_msgs();
if (now >= rsp_timeout) {
- lnet_finalize_expired_responses(false);
+ lnet_finalize_expired_responses();
rsp_timeout = now + (lnet_transaction_timeout / 2);
}
* if we wake up every 1 second? Although, we've seen
* cases where we get a complaint that an idle thread
* is waking up unnecessarily.
+ *
+ * Take into account the current net_count when you wake
+ * up for alive router checking, since we need to check
+ * possibly as many networks as we have configured.
*/
interval = min(lnet_recovery_interval,
- min((unsigned int) alive_router_check_interval,
+ min((unsigned int) alive_router_check_interval /
+ lnet_current_net_count,
lnet_transaction_timeout / 2));
- wait_event_interruptible_timeout(the_lnet.ln_mt_waitq,
- false,
- cfs_time_seconds(interval));
+ wait_for_completion_interruptible_timeout(
+ &the_lnet.ln_mt_wait_complete,
+ cfs_time_seconds(interval));
+ /* Must re-init the completion before testing anything,
+ * including ln_mt_state.
+ */
+ reinit_completion(&the_lnet.ln_mt_wait_complete);
}
/* Shutting down */
case LNET_EVENT_UNLINK:
CDEBUG(D_NET, "%s recovery ping unlinked\n",
libcfs_nid2str(ev_info->mt_nid));
+ /* fallthrough */
case LNET_EVENT_REPLY:
lnet_handle_recovery_reply(ev_info, event->status,
event->type == LNET_EVENT_UNLINK);
static void
lnet_rsp_tracker_clean(void)
{
- lnet_finalize_expired_responses(true);
+ lnet_finalize_expired_responses();
cfs_percpt_free(the_lnet.ln_mt_rstq);
the_lnet.ln_mt_rstq = NULL;
lnet_net_unlock(LNET_LOCK_EX);
/* tell the monitor thread that we're shutting down */
- wake_up(&the_lnet.ln_mt_waitq);
+ complete(&the_lnet.ln_mt_wait_complete);
/* block until monitor thread signals that it's done */
down(&the_lnet.ln_mt_signal);
lnet_parse(struct lnet_ni *ni, struct lnet_hdr *hdr, lnet_nid_t from_nid,
void *private, int rdma_req)
{
- int rc = 0;
- int cpt;
- int for_me;
- struct lnet_msg *msg;
- lnet_pid_t dest_pid;
- lnet_nid_t dest_nid;
- lnet_nid_t src_nid;
struct lnet_peer_ni *lpni;
- __u32 payload_length;
- __u32 type;
+ struct lnet_msg *msg;
+ __u32 payload_length;
+ lnet_pid_t dest_pid;
+ lnet_nid_t dest_nid;
+ lnet_nid_t src_nid;
+ bool push = false;
+ int for_me;
+ __u32 type;
+ int rc = 0;
+ int cpt;
LASSERT (!in_interrupt ());
}
if (the_lnet.ln_routing &&
- ni->ni_last_alive != ktime_get_real_seconds()) {
- /* NB: so far here is the only place to set NI status to "up */
+ ni->ni_net->net_last_alive != ktime_get_real_seconds()) {
lnet_ni_lock(ni);
- ni->ni_last_alive = ktime_get_real_seconds();
+ spin_lock(&ni->ni_net->net_lock);
+ ni->ni_net->net_last_alive = ktime_get_real_seconds();
+ spin_unlock(&ni->ni_net->net_lock);
if (ni->ni_status != NULL &&
- ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
+ ni->ni_status->ns_status == LNET_NI_STATUS_DOWN) {
ni->ni_status->ns_status = LNET_NI_STATUS_UP;
+ push = true;
+ }
lnet_ni_unlock(ni);
}
+ if (push)
+ lnet_push_update_to_peers(1);
+
/* Regard a bad destination NID as a protocol error. Senders should
* know what they're doing; if they don't they're misconfigured, buggy
* or malicious so we chop them off at the knees :) */
return 0;
goto drop;
}
+
+ if (the_lnet.ln_routing)
+ lpni->lpni_last_alive = ktime_get_seconds();
+
msg->msg_rxpeer = lpni;
msg->msg_rxni = ni;
lnet_ni_addref_locked(ni, cpt);