lnet_msg_commit(struct lnet_msg *msg, int cpt)
{
struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
- struct lnet_counters *counters = the_lnet.ln_counters[cpt];
+ struct lnet_counters_common *common;
+ s64 timeout_ns;
+
+ /* set the message deadline */
+ timeout_ns = lnet_transaction_timeout * NSEC_PER_SEC;
+ msg->msg_deadline = ktime_add_ns(ktime_get(), timeout_ns);
/* routed message can be committed for both receiving and sending */
LASSERT(!msg->msg_tx_committed);
if (msg->msg_sending) {
LASSERT(!msg->msg_receiving);
-
msg->msg_tx_cpt = cpt;
msg->msg_tx_committed = 1;
if (msg->msg_rx_committed) { /* routed message REPLY */
}
LASSERT(!msg->msg_onactivelist);
+
msg->msg_onactivelist = 1;
- list_add(&msg->msg_activelist, &container->msc_active);
+ list_add_tail(&msg->msg_activelist, &container->msc_active);
- counters->msgs_alloc++;
- if (counters->msgs_alloc > counters->msgs_max)
- counters->msgs_max = counters->msgs_alloc;
+ common = &the_lnet.ln_counters[cpt]->lct_common;
+ common->lcc_msgs_alloc++;
+ if (common->lcc_msgs_alloc > common->lcc_msgs_max)
+ common->lcc_msgs_max = common->lcc_msgs_alloc;
}
static void
lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
{
- struct lnet_counters *counters;
+ struct lnet_counters_common *common;
struct lnet_event *ev = &msg->msg_ev;
LASSERT(msg->msg_tx_committed);
if (status != 0)
goto out;
- counters = the_lnet.ln_counters[msg->msg_tx_cpt];
+ common = &(the_lnet.ln_counters[msg->msg_tx_cpt]->lct_common);
switch (ev->type) {
default: /* routed message */
LASSERT(msg->msg_routing);
LASSERT(msg->msg_rx_committed);
LASSERT(ev->type == 0);
- counters->route_length += msg->msg_len;
- counters->route_count++;
+ common->lcc_route_length += msg->msg_len;
+ common->lcc_route_count++;
goto incr_stats;
case LNET_EVENT_PUT:
case LNET_EVENT_SEND:
LASSERT(!msg->msg_rx_committed);
if (msg->msg_type == LNET_MSG_PUT)
- counters->send_length += msg->msg_len;
+ common->lcc_send_length += msg->msg_len;
break;
case LNET_EVENT_GET:
break;
}
- counters->send_count++;
+ common->lcc_send_count++;
incr_stats:
if (msg->msg_txpeer)
static void
lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
{
- struct lnet_counters *counters;
+ struct lnet_counters_common *common;
struct lnet_event *ev = &msg->msg_ev;
LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
if (status != 0)
goto out;
- counters = the_lnet.ln_counters[msg->msg_rx_cpt];
+ common = &(the_lnet.ln_counters[msg->msg_rx_cpt]->lct_common);
switch (ev->type) {
default:
LASSERT(ev->type == 0);
* lnet_msg_decommit_tx(), see details in lnet_parse_get() */
LASSERT(msg->msg_type == LNET_MSG_REPLY ||
msg->msg_type == LNET_MSG_GET);
- counters->send_length += msg->msg_wanted;
+ common->lcc_send_length += msg->msg_wanted;
break;
case LNET_EVENT_PUT:
break;
}
- counters->recv_count++;
+ common->lcc_recv_count++;
incr_stats:
if (msg->msg_rxpeer)
msg->msg_type,
LNET_STATS_TYPE_RECV);
if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
- counters->recv_length += msg->msg_wanted;
+ common->lcc_recv_length += msg->msg_wanted;
out:
lnet_return_rx_credits_locked(msg);
list_del(&msg->msg_activelist);
msg->msg_onactivelist = 0;
- the_lnet.ln_counters[cpt2]->msgs_alloc--;
+ the_lnet.ln_counters[cpt2]->lct_common.lcc_msgs_alloc--;
if (cpt2 != cpt) {
lnet_net_unlock(cpt2);
lnet_md_deconstruct(md, &msg->msg_ev.md);
}
-void
-lnet_msg_detach_md(struct lnet_msg *msg, int status)
-{
- struct lnet_libmd *md = msg->msg_md;
- int unlink;
-
- /* Now it's safe to drop my caller's ref */
- md->md_refcount--;
- LASSERT(md->md_refcount >= 0);
-
- unlink = lnet_md_unlinkable(md);
- if (md->md_eq != NULL) {
- msg->msg_ev.status = status;
- msg->msg_ev.unlinked = unlink;
- lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
- }
-
- if (unlink)
- lnet_md_unlink(md);
-
- msg->msg_md = NULL;
-}
-
static int
lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
{
msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
- /* NB: we probably want to use NID of msg::msg_from as 3rd
- * parameter (router NID) if it's routed message */
- rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
+ rc = lnet_send(msg->msg_ev.target.nid, msg, msg->msg_from);
lnet_net_lock(cpt);
/*
return 0;
}
+static void
+lnet_dec_healthv_locked(atomic_t *healthv, int sensitivity)
+{
+ int h = atomic_read(healthv);
+
+ if (h < sensitivity) {
+ atomic_set(healthv, 0);
+ } else {
+ h -= sensitivity;
+ atomic_set(healthv, h);
+ }
+}
+
+static void
+lnet_handle_local_failure(struct lnet_ni *local_ni)
+{
+ /*
+ * the lnet_net_lock(0) is used to protect the addref on the ni
+ * and the recovery queue.
+ */
+ lnet_net_lock(0);
+ /* the mt could've shutdown and cleaned up the queues */
+ if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
+ lnet_net_unlock(0);
+ return;
+ }
+
+ lnet_dec_healthv_locked(&local_ni->ni_healthv, lnet_health_sensitivity);
+ /*
+ * add the NI to the recovery queue if it's not already there
+ * and it's health value is actually below the maximum. It's
+ * possible that the sensitivity might be set to 0, and the health
+ * value will not be reduced. In this case, there is no reason to
+ * invoke recovery
+ */
+ if (list_empty(&local_ni->ni_recovery) &&
+ atomic_read(&local_ni->ni_healthv) < LNET_MAX_HEALTH_VALUE) {
+ CERROR("ni %s added to recovery queue. Health = %d\n",
+ libcfs_nid2str(local_ni->ni_nid),
+ atomic_read(&local_ni->ni_healthv));
+ list_add_tail(&local_ni->ni_recovery,
+ &the_lnet.ln_mt_localNIRecovq);
+ lnet_ni_addref_locked(local_ni, 0);
+ }
+ lnet_net_unlock(0);
+}
+
+void
+lnet_handle_remote_failure_locked(struct lnet_peer_ni *lpni)
+{
+ __u32 sensitivity = lnet_health_sensitivity;
+ __u32 lp_sensitivity;
+
+ /* lpni could be NULL if we're in the LOLND case */
+ if (!lpni)
+ return;
+
+ /*
+ * If there is a health sensitivity in the peer then use that
+ * instead of the globally set one.
+ */
+ lp_sensitivity = lpni->lpni_peer_net->lpn_peer->lp_health_sensitivity;
+ if (lp_sensitivity)
+ sensitivity = lp_sensitivity;
+
+ lnet_dec_healthv_locked(&lpni->lpni_healthv, sensitivity);
+ /*
+ * add the peer NI to the recovery queue if it's not already there
+ * and it's health value is actually below the maximum. It's
+ * possible that the sensitivity might be set to 0, and the health
+ * value will not be reduced. In this case, there is no reason to
+ * invoke recovery
+ */
+ lnet_peer_ni_add_to_recoveryq_locked(lpni);
+}
+
+static void
+lnet_handle_remote_failure(struct lnet_peer_ni *lpni)
+{
+ /* lpni could be NULL if we're in the LOLND case */
+ if (!lpni)
+ return;
+
+ lnet_net_lock(0);
+ /* the mt could've shutdown and cleaned up the queues */
+ if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
+ lnet_net_unlock(0);
+ return;
+ }
+ lnet_handle_remote_failure_locked(lpni);
+ lnet_net_unlock(0);
+}
+
+static void
+lnet_incr_hstats(struct lnet_msg *msg, enum lnet_msg_hstatus hstatus)
+{
+ struct lnet_ni *ni = msg->msg_txni;
+ struct lnet_peer_ni *lpni = msg->msg_txpeer;
+ struct lnet_counters_health *health;
+
+ health = &the_lnet.ln_counters[0]->lct_health;
+
+ switch (hstatus) {
+ case LNET_MSG_STATUS_LOCAL_INTERRUPT:
+ atomic_inc(&ni->ni_hstats.hlt_local_interrupt);
+ health->lch_local_interrupt_count++;
+ break;
+ case LNET_MSG_STATUS_LOCAL_DROPPED:
+ atomic_inc(&ni->ni_hstats.hlt_local_dropped);
+ health->lch_local_dropped_count++;
+ break;
+ case LNET_MSG_STATUS_LOCAL_ABORTED:
+ atomic_inc(&ni->ni_hstats.hlt_local_aborted);
+ health->lch_local_aborted_count++;
+ break;
+ case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
+ atomic_inc(&ni->ni_hstats.hlt_local_no_route);
+ health->lch_local_no_route_count++;
+ break;
+ case LNET_MSG_STATUS_LOCAL_TIMEOUT:
+ atomic_inc(&ni->ni_hstats.hlt_local_timeout);
+ health->lch_local_timeout_count++;
+ break;
+ case LNET_MSG_STATUS_LOCAL_ERROR:
+ atomic_inc(&ni->ni_hstats.hlt_local_error);
+ health->lch_local_error_count++;
+ break;
+ case LNET_MSG_STATUS_REMOTE_DROPPED:
+ if (lpni)
+ atomic_inc(&lpni->lpni_hstats.hlt_remote_dropped);
+ health->lch_remote_dropped_count++;
+ break;
+ case LNET_MSG_STATUS_REMOTE_ERROR:
+ if (lpni)
+ atomic_inc(&lpni->lpni_hstats.hlt_remote_error);
+ health->lch_remote_error_count++;
+ break;
+ case LNET_MSG_STATUS_REMOTE_TIMEOUT:
+ if (lpni)
+ atomic_inc(&lpni->lpni_hstats.hlt_remote_timeout);
+ health->lch_remote_timeout_count++;
+ break;
+ case LNET_MSG_STATUS_NETWORK_TIMEOUT:
+ if (lpni)
+ atomic_inc(&lpni->lpni_hstats.hlt_network_timeout);
+ health->lch_network_timeout_count++;
+ break;
+ case LNET_MSG_STATUS_OK:
+ break;
+ default:
+ LBUG();
+ }
+}
+
+static void
+lnet_resend_msg_locked(struct lnet_msg *msg)
+{
+ msg->msg_retry_count++;
+
+ /*
+ * remove message from the active list and reset it to prepare
+ * for a resend. Two exceptions to this
+ *
+ * 1. the router case. When a message is being routed it is
+ * committed for rx when received and committed for tx when
+ * forwarded. We don't want to remove it from the active list, since
+ * code which handles receiving expects it to remain on the active
+ * list.
+ *
+ * 2. The REPLY case. Reply messages use the same message
+ * structure for the GET that was received.
+ */
+ if (!msg->msg_routing && msg->msg_type != LNET_MSG_REPLY) {
+ list_del_init(&msg->msg_activelist);
+ msg->msg_onactivelist = 0;
+ }
+ /*
+ * The msg_target.nid which was originally set
+ * when calling LNetGet() or LNetPut() might've
+ * been overwritten if we're routing this message.
+ * Call lnet_msg_decommit_tx() to return the credit
+ * this message consumed. The message will
+ * consume another credit when it gets resent.
+ */
+ msg->msg_target.nid = msg->msg_hdr.dest_nid;
+ lnet_msg_decommit_tx(msg, -EAGAIN);
+ msg->msg_sending = 0;
+ msg->msg_receiving = 0;
+ msg->msg_target_is_router = 0;
+
+ CDEBUG(D_NET, "%s->%s:%s:%s - queuing msg (%p) for resend\n",
+ libcfs_nid2str(msg->msg_hdr.src_nid),
+ libcfs_nid2str(msg->msg_hdr.dest_nid),
+ lnet_msgtyp2str(msg->msg_type),
+ lnet_health_error2str(msg->msg_health_status), msg);
+
+ list_add_tail(&msg->msg_list, the_lnet.ln_mt_resendqs[msg->msg_tx_cpt]);
+
+ complete(&the_lnet.ln_mt_wait_complete);
+}
+
+int
+lnet_check_finalize_recursion_locked(struct lnet_msg *msg,
+ struct list_head *containerq,
+ int nworkers, void **workers)
+{
+ int my_slot = -1;
+ int i;
+
+ list_add_tail(&msg->msg_list, containerq);
+
+ for (i = 0; i < nworkers; i++) {
+ if (workers[i] == current)
+ break;
+
+ if (my_slot < 0 && workers[i] == NULL)
+ my_slot = i;
+ }
+
+ if (i < nworkers || my_slot < 0)
+ return -1;
+
+ workers[my_slot] = current;
+
+ return my_slot;
+}
+
+int
+lnet_attempt_msg_resend(struct lnet_msg *msg)
+{
+ struct lnet_msg_container *container;
+ int my_slot;
+ int cpt;
+
+ /* we can only resend tx_committed messages */
+ LASSERT(msg->msg_tx_committed);
+
+ /* don't resend recovery messages */
+ if (msg->msg_recovery) {
+ CDEBUG(D_NET, "msg %s->%s is a recovery ping. retry# %d\n",
+ libcfs_nid2str(msg->msg_from),
+ libcfs_nid2str(msg->msg_target.nid),
+ msg->msg_retry_count);
+ return -ENOTRECOVERABLE;
+ }
+
+ /*
+ * if we explicitly indicated we don't want to resend then just
+ * return
+ */
+ if (msg->msg_no_resend) {
+ CDEBUG(D_NET, "msg %s->%s requested no resend. retry# %d\n",
+ libcfs_nid2str(msg->msg_from),
+ libcfs_nid2str(msg->msg_target.nid),
+ msg->msg_retry_count);
+ return -ENOTRECOVERABLE;
+ }
+
+ /* check if the message has exceeded the number of retries */
+ if (msg->msg_retry_count >= lnet_retry_count) {
+ CNETERR("msg %s->%s exceeded retry count %d\n",
+ libcfs_nid2str(msg->msg_from),
+ libcfs_nid2str(msg->msg_target.nid),
+ msg->msg_retry_count);
+ return -ENOTRECOVERABLE;
+ }
+
+ cpt = msg->msg_tx_cpt;
+ lnet_net_lock(cpt);
+
+ /* check again under lock */
+ if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING) {
+ lnet_net_unlock(cpt);
+ return -ESHUTDOWN;
+ }
+
+ container = the_lnet.ln_msg_containers[cpt];
+ my_slot =
+ lnet_check_finalize_recursion_locked(msg,
+ &container->msc_resending,
+ container->msc_nfinalizers,
+ container->msc_resenders);
+
+ /* enough threads are resending */
+ if (my_slot == -1) {
+ lnet_net_unlock(cpt);
+ return 0;
+ }
+
+ while (!list_empty(&container->msc_resending)) {
+ msg = list_entry(container->msc_resending.next,
+ struct lnet_msg, msg_list);
+ list_del(&msg->msg_list);
+
+ /*
+ * resending the message will require us to call
+ * lnet_msg_decommit_tx() which will return the credit
+ * which this message holds. This could trigger another
+ * queued message to be sent. If that message fails and
+ * requires a resend we will recurse.
+ * But since at this point the slot is taken, the message
+ * will be queued in the container and dealt with
+ * later. This breaks the recursion.
+ */
+ lnet_resend_msg_locked(msg);
+ }
+
+ /*
+ * msc_resenders is an array of process pointers. Each entry holds
+ * a pointer to the current process operating on the message. An
+ * array entry is created per CPT. If the array slot is already
+ * set, then it means that there is a thread on the CPT currently
+ * resending a message.
+ * Once the thread finishes clear the slot to enable the thread to
+ * take on more resend work.
+ */
+ container->msc_resenders[my_slot] = NULL;
+ lnet_net_unlock(cpt);
+
+ return 0;
+}
+
+/*
+ * Do a health check on the message:
+ * return -1 if we're not going to handle the error or
+ * if we've reached the maximum number of retries.
+ * success case will return -1 as well
+ * return 0 if it the message is requeued for send
+ */
+static int
+lnet_health_check(struct lnet_msg *msg)
+{
+ enum lnet_msg_hstatus hstatus = msg->msg_health_status;
+ struct lnet_peer_ni *lpni;
+ struct lnet_ni *ni;
+ bool lo = false;
+
+ /* if we're shutting down no point in handling health. */
+ if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
+ return -1;
+
+ LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
+
+ /*
+ * if we're sending to the LOLND then the msg_txpeer will not be
+ * set. So no need to sanity check it.
+ */
+ if (msg->msg_tx_committed &&
+ LNET_NETTYP(LNET_NIDNET(msg->msg_txni->ni_nid)) != LOLND)
+ LASSERT(msg->msg_txpeer);
+ else if (msg->msg_tx_committed &&
+ LNET_NETTYP(LNET_NIDNET(msg->msg_txni->ni_nid)) == LOLND)
+ lo = true;
+
+ if (hstatus != LNET_MSG_STATUS_OK &&
+ ktime_compare(ktime_get(), msg->msg_deadline) >= 0)
+ return -1;
+
+ /*
+ * stats are only incremented for errors so avoid wasting time
+ * incrementing statistics if there is no error.
+ */
+ if (hstatus != LNET_MSG_STATUS_OK) {
+ lnet_net_lock(0);
+ lnet_incr_hstats(msg, hstatus);
+ lnet_net_unlock(0);
+ }
+
+ /*
+ * always prefer txni/txpeer if they message is committed for both
+ * directions.
+ */
+ if (msg->msg_tx_committed) {
+ ni = msg->msg_txni;
+ lpni = msg->msg_txpeer;
+ } else {
+ ni = msg->msg_rxni;
+ lpni = msg->msg_rxpeer;
+ }
+
+ if (!lo)
+ LASSERT(ni && lpni);
+ else
+ LASSERT(ni);
+
+ CDEBUG(D_NET, "health check: %s->%s: %s: %s\n",
+ libcfs_nid2str(ni->ni_nid),
+ (lo) ? "self" : libcfs_nid2str(lpni->lpni_nid),
+ lnet_msgtyp2str(msg->msg_type),
+ lnet_health_error2str(hstatus));
+
+ switch (hstatus) {
+ case LNET_MSG_STATUS_OK:
+ /*
+ * increment the local ni health weather we successfully
+ * received or sent a message on it.
+ */
+ lnet_inc_healthv(&ni->ni_healthv);
+ /*
+ * It's possible msg_txpeer is NULL in the LOLND
+ * case. Only increment the peer's health if we're
+ * receiving a message from it. It's the only sure way to
+ * know that a remote interface is up.
+ * If this interface is part of a router, then take that
+ * as indication that the router is fully healthy.
+ */
+ if (lpni && msg->msg_rx_committed) {
+ /*
+ * If we're receiving a message from the router or
+ * I'm a router, then set that lpni's health to
+ * maximum so we can commence communication
+ */
+ if (lnet_isrouter(lpni) || the_lnet.ln_routing)
+ lnet_set_healthv(&lpni->lpni_healthv,
+ LNET_MAX_HEALTH_VALUE);
+ else
+ lnet_inc_healthv(&lpni->lpni_healthv);
+ }
+
+ /* we can finalize this message */
+ return -1;
+ case LNET_MSG_STATUS_LOCAL_INTERRUPT:
+ case LNET_MSG_STATUS_LOCAL_DROPPED:
+ case LNET_MSG_STATUS_LOCAL_ABORTED:
+ case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
+ case LNET_MSG_STATUS_LOCAL_TIMEOUT:
+ lnet_handle_local_failure(ni);
+ if (msg->msg_tx_committed)
+ /* add to the re-send queue */
+ return lnet_attempt_msg_resend(msg);
+ break;
+
+ /*
+ * These errors will not trigger a resend so simply
+ * finalize the message
+ */
+ case LNET_MSG_STATUS_LOCAL_ERROR:
+ lnet_handle_local_failure(ni);
+ return -1;
+
+ /*
+ * TODO: since the remote dropped the message we can
+ * attempt a resend safely.
+ */
+ case LNET_MSG_STATUS_REMOTE_DROPPED:
+ lnet_handle_remote_failure(lpni);
+ if (msg->msg_tx_committed)
+ return lnet_attempt_msg_resend(msg);
+ break;
+
+ case LNET_MSG_STATUS_REMOTE_ERROR:
+ case LNET_MSG_STATUS_REMOTE_TIMEOUT:
+ case LNET_MSG_STATUS_NETWORK_TIMEOUT:
+ lnet_handle_remote_failure(lpni);
+ return -1;
+ default:
+ LBUG();
+ }
+
+ /* no resend is needed */
+ return -1;
+}
+
+static void
+lnet_msg_detach_md(struct lnet_msg *msg, int cpt, int status)
+{
+ struct lnet_libmd *md = msg->msg_md;
+ int unlink;
+
+ /* Now it's safe to drop my caller's ref */
+ md->md_refcount--;
+ LASSERT(md->md_refcount >= 0);
+
+ unlink = lnet_md_unlinkable(md);
+ if (md->md_eq != NULL) {
+ if ((md->md_flags & LNET_MD_FLAG_ABORTED) && !status) {
+ msg->msg_ev.status = -ETIMEDOUT;
+ CDEBUG(D_NET, "md 0x%p already unlinked\n", md);
+ } else {
+ msg->msg_ev.status = status;
+ }
+ msg->msg_ev.unlinked = unlink;
+ lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
+ }
+
+ if (unlink || (md->md_refcount == 0 &&
+ md->md_threshold == LNET_MD_THRESH_INF))
+ lnet_detach_rsp_tracker(md, cpt);
+
+ if (unlink)
+ lnet_md_unlink(md);
+
+ msg->msg_md = NULL;
+}
+
+static bool
+lnet_is_health_check(struct lnet_msg *msg)
+{
+ bool hc = true;
+ int status = msg->msg_ev.status;
+
+ if ((!msg->msg_tx_committed && !msg->msg_rx_committed) ||
+ !msg->msg_onactivelist) {
+ CDEBUG(D_NET, "msg %p not committed for send or receive\n",
+ msg);
+ return false;
+ }
+
+ if ((msg->msg_tx_committed && !msg->msg_txpeer) ||
+ (msg->msg_rx_committed && !msg->msg_rxpeer)) {
+ /* The optimized GET case does not set msg_rxpeer, but status
+ * could be zero. Only print the error message if we have a
+ * non-zero status.
+ */
+ if (status)
+ CDEBUG(D_NET, "msg %p status %d cannot retry\n", msg,
+ status);
+ return false;
+ }
+
+ /* Check for status inconsistencies */
+ if ((!status && msg->msg_health_status != LNET_MSG_STATUS_OK) ||
+ (status && msg->msg_health_status == LNET_MSG_STATUS_OK)) {
+ CDEBUG(D_NET, "Msg %p is in inconsistent state, don't perform health "
+ "checking (%d, %d)\n", msg, status,
+ msg->msg_health_status);
+ hc = false;
+ }
+
+ CDEBUG(D_NET, "health check = %d, status = %d, hstatus = %d\n",
+ hc, status, msg->msg_health_status);
+
+ return hc;
+}
+
+char *
+lnet_health_error2str(enum lnet_msg_hstatus hstatus)
+{
+ switch (hstatus) {
+ case LNET_MSG_STATUS_LOCAL_INTERRUPT:
+ return "LOCAL_INTERRUPT";
+ case LNET_MSG_STATUS_LOCAL_DROPPED:
+ return "LOCAL_DROPPED";
+ case LNET_MSG_STATUS_LOCAL_ABORTED:
+ return "LOCAL_ABORTED";
+ case LNET_MSG_STATUS_LOCAL_NO_ROUTE:
+ return "LOCAL_NO_ROUTE";
+ case LNET_MSG_STATUS_LOCAL_TIMEOUT:
+ return "LOCAL_TIMEOUT";
+ case LNET_MSG_STATUS_LOCAL_ERROR:
+ return "LOCAL_ERROR";
+ case LNET_MSG_STATUS_REMOTE_DROPPED:
+ return "REMOTE_DROPPED";
+ case LNET_MSG_STATUS_REMOTE_ERROR:
+ return "REMOTE_ERROR";
+ case LNET_MSG_STATUS_REMOTE_TIMEOUT:
+ return "REMOTE_TIMEOUT";
+ case LNET_MSG_STATUS_NETWORK_TIMEOUT:
+ return "NETWORK_TIMEOUT";
+ case LNET_MSG_STATUS_OK:
+ return "OK";
+ default:
+ return "<UNKNOWN>";
+ }
+}
+
+bool
+lnet_send_error_simulation(struct lnet_msg *msg,
+ enum lnet_msg_hstatus *hstatus)
+{
+ if (!msg)
+ return false;
+
+ if (list_empty(&the_lnet.ln_drop_rules))
+ return false;
+
+ /* match only health rules */
+ if (!lnet_drop_rule_match(&msg->msg_hdr, LNET_NID_ANY,
+ hstatus))
+ return false;
+
+ CDEBUG(D_NET, "src %s(%s)->dst %s: %s simulate health error: %s\n",
+ libcfs_nid2str(msg->msg_hdr.src_nid),
+ libcfs_nid2str(msg->msg_txni->ni_nid),
+ libcfs_nid2str(msg->msg_hdr.dest_nid),
+ lnet_msgtyp2str(msg->msg_type),
+ lnet_health_error2str(*hstatus));
+
+ return true;
+}
+EXPORT_SYMBOL(lnet_send_error_simulation);
+
void
lnet_finalize(struct lnet_msg *msg, int status)
{
- struct lnet_msg_container *container;
- int my_slot;
- int cpt;
- int rc;
- int i;
+ struct lnet_msg_container *container;
+ int my_slot;
+ int cpt;
+ int rc;
LASSERT(!in_interrupt());
msg->msg_ev.status = status;
+ if (lnet_is_health_check(msg)) {
+ /*
+ * Check the health status of the message. If it has one
+ * of the errors that we're supposed to handle, and it has
+ * not timed out, then
+ * 1. Decrement the appropriate health_value
+ * 2. queue the message on the resend queue
+
+ * if the message send is success, timed out or failed in the
+ * health check for any reason then we'll just finalize the
+ * message. Otherwise just return since the message has been
+ * put on the resend queue.
+ */
+ if (!lnet_health_check(msg))
+ return;
+ }
+
+ /*
+ * We're not going to resend this message so detach its MD and invoke
+ * the appropriate callbacks
+ */
if (msg->msg_md != NULL) {
cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
-
lnet_res_lock(cpt);
- lnet_msg_detach_md(msg, status);
+ lnet_msg_detach_md(msg, cpt, status);
lnet_res_unlock(cpt);
}
- again:
- rc = 0;
+again:
if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
/* not committed to network yet */
LASSERT(!msg->msg_onactivelist);
lnet_net_lock(cpt);
container = the_lnet.ln_msg_containers[cpt];
- list_add_tail(&msg->msg_list, &container->msc_finalizing);
/* Recursion breaker. Don't complete the message here if I am (or
* enough other threads are) already completing messages */
+ my_slot = lnet_check_finalize_recursion_locked(msg,
+ &container->msc_finalizing,
+ container->msc_nfinalizers,
+ container->msc_finalizers);
- my_slot = -1;
- for (i = 0; i < container->msc_nfinalizers; i++) {
- if (container->msc_finalizers[i] == current)
- break;
-
- if (my_slot < 0 && container->msc_finalizers[i] == NULL)
- my_slot = i;
- }
-
- if (i < container->msc_nfinalizers || my_slot < 0) {
+ /* enough threads are resending */
+ if (my_slot == -1) {
lnet_net_unlock(cpt);
return;
}
- container->msc_finalizers[my_slot] = current;
-
+ rc = 0;
while (!list_empty(&container->msc_finalizing)) {
msg = list_entry(container->msc_finalizing.next,
struct lnet_msg, msg_list);
- list_del(&msg->msg_list);
+ list_del_init(&msg->msg_list);
/* NB drops and regains the lnet lock if it actually does
* anything, so my finalizing friends can chomp along too */
struct lnet_msg, msg_activelist);
LASSERT(msg->msg_onactivelist);
msg->msg_onactivelist = 0;
- list_del(&msg->msg_activelist);
+ list_del_init(&msg->msg_activelist);
lnet_msg_free(msg);
count++;
}
sizeof(*container->msc_finalizers));
container->msc_finalizers = NULL;
}
+
+ if (container->msc_resenders != NULL) {
+ LIBCFS_FREE(container->msc_resenders,
+ container->msc_nfinalizers *
+ sizeof(*container->msc_resenders));
+ container->msc_resenders = NULL;
+ }
container->msc_init = 0;
}
INIT_LIST_HEAD(&container->msc_active);
INIT_LIST_HEAD(&container->msc_finalizing);
+ INIT_LIST_HEAD(&container->msc_resending);
/* number of CPUs */
container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
return -ENOMEM;
}
+ LIBCFS_CPT_ALLOC(container->msc_resenders, lnet_cpt_table(), cpt,
+ container->msc_nfinalizers *
+ sizeof(*container->msc_resenders));
+
+ if (container->msc_resenders == NULL) {
+ CERROR("Failed to allocate message resenders\n");
+ lnet_msg_container_cleanup(container);
+ return -ENOMEM;
+ }
+
return rc;
}