rc = lnet_discover_peer_locked(lpni, cpt, true);
if (rc)
goto out_decref;
+ /* The lpni (or lp) for this NID may have changed and our ref is
+ * the only thing keeping the old one around. Release the ref
+ * and lookup the lpni again
+ */
+ lnet_peer_ni_decref_locked(lpni);
+ lpni = lnet_find_peer_ni_locked(nid);
+ if (!lpni) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
lp = lpni->lpni_peer_net->lpn_peer;
/* Only try once if discovery is disabled */
return rc;
}
+/* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
+void
+lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
+{
+ /* The discovery thread holds net_lock/EX and lp_lock when it splices
+ * the lp_dc_pendq onto a local list for resending. Thus, we do the same
+ * when adding to the list and queuing the peer to ensure that we do not
+ * strand any messages on the lp_dc_pendq. This scheme ensures the
+ * message will be resent even if the peer is already being discovered.
+ * Therefore we needn't check the return value of
+ * lnet_peer_queue_for_discovery(lp).
+ */
+ lnet_net_lock(LNET_LOCK_EX);
+ spin_lock(&lp->lp_lock);
+ list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
+ spin_unlock(&lp->lp_lock);
+ lnet_peer_queue_for_discovery(lp);
+ lnet_net_unlock(LNET_LOCK_EX);
+}
+
/*
* Queue a peer for the attention of the discovery thread. Call with
* lnet_net_lock/EX held. Returns 0 if the peer was queued, and
return rc;
}
+/* must hold net_lock/0 */
void
-lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
+lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
+ struct list_head *recovery_queue,
+ time64_t now)
{
/* the mt could've shutdown and cleaned up the queues */
if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
return;
- if (list_empty(&lpni->lpni_recovery) &&
- atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
- CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
- libcfs_nid2str(lpni->lpni_nid),
- atomic_read(&lpni->lpni_healthv));
- list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
- lnet_peer_ni_addref_locked(lpni);
+ if (!list_empty(&lpni->lpni_recovery))
+ return;
+
+ if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
+ return;
+
+ if (now > lpni->lpni_last_alive + lnet_recovery_limit) {
+ CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
+ libcfs_nid2str(lpni->lpni_nid),
+ lpni->lpni_last_alive);
+ return;
}
+
+ /* This peer NI is going on the recovery queue, so take a ref on it */
+ lnet_peer_ni_addref_locked(lpni);
+
+ CDEBUG(D_NET, "%s added to recovery queue. last alive: %lld health: %d\n",
+ libcfs_nid2str(lpni->lpni_nid),
+ lpni->lpni_last_alive,
+ atomic_read(&lpni->lpni_healthv));
+
+ list_add_tail(&lpni->lpni_recovery, recovery_queue);
}
/* Call with the ln_api_mutex held */
struct lnet_peer_ni *lpni;
int lncpt;
int cpt;
+ time64_t now;
if (the_lnet.ln_state != LNET_STATE_RUNNING)
return;
+ now = ktime_get_seconds();
+
if (!all) {
lnet_net_lock(LNET_LOCK_EX);
lpni = lnet_find_peer_ni_locked(nid);
return;
}
atomic_set(&lpni->lpni_healthv, value);
- lnet_peer_ni_add_to_recoveryq_locked(lpni);
+ lnet_peer_ni_add_to_recoveryq_locked(lpni,
+ &the_lnet.ln_mt_peerNIRecovq, now);
lnet_peer_ni_decref_locked(lpni);
lnet_net_unlock(LNET_LOCK_EX);
return;
lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
/*
- * Walk all the peers and reset the healhv for each one to the
- * maximum value.
+ * Walk all the peers and reset the health value for each one to the
+ * specified value.
*/
lnet_net_lock(LNET_LOCK_EX);
for (cpt = 0; cpt < lncpt; cpt++) {
list_for_each_entry(lpni, &lpn->lpn_peer_nis,
lpni_peer_nis) {
atomic_set(&lpni->lpni_healthv, value);
- lnet_peer_ni_add_to_recoveryq_locked(lpni);
+ lnet_peer_ni_add_to_recoveryq_locked(lpni,
+ &the_lnet.ln_mt_peerNIRecovq, now);
}
}
}