Whamcloud - gitweb
LU-13569 lnet: Age peer NI out of recovery
[fs/lustre-release.git] / lnet / lnet / peer.c
index 9a6780c..a1d2552 100644 (file)
@@ -1357,6 +1357,16 @@ LNetPrimaryNID(lnet_nid_t nid)
                rc = lnet_discover_peer_locked(lpni, cpt, true);
                if (rc)
                        goto out_decref;
+               /* The lpni (or lp) for this NID may have changed and our ref is
+                * the only thing keeping the old one around. Release the ref
+                * and lookup the lpni again
+                */
+               lnet_peer_ni_decref_locked(lpni);
+               lpni = lnet_find_peer_ni_locked(nid);
+               if (!lpni) {
+                       rc = -ENOENT;
+                       goto out_unlock;
+               }
                lp = lpni->lpni_peer_net->lpn_peer;
 
                /* Only try once if discovery is disabled */
@@ -2063,6 +2073,26 @@ __must_hold(&lp->lp_lock)
        return rc;
 }
 
+/* Add the message to the peer's lp_dc_pendq and queue the peer for discovery */
+void
+lnet_peer_queue_message(struct lnet_peer *lp, struct lnet_msg *msg)
+{
+       /* The discovery thread holds net_lock/EX and lp_lock when it splices
+        * the lp_dc_pendq onto a local list for resending. Thus, we do the same
+        * when adding to the list and queuing the peer to ensure that we do not
+        * strand any messages on the lp_dc_pendq. This scheme ensures the
+        * message will be resent even if the peer is already being discovered.
+        * Therefore we needn't check the return value of
+        * lnet_peer_queue_for_discovery(lp).
+        */
+       lnet_net_lock(LNET_LOCK_EX);
+       spin_lock(&lp->lp_lock);
+       list_add_tail(&msg->msg_list, &lp->lp_dc_pendq);
+       spin_unlock(&lp->lp_lock);
+       lnet_peer_queue_for_discovery(lp);
+       lnet_net_unlock(LNET_LOCK_EX);
+}
+
 /*
  * Queue a peer for the attention of the discovery thread.  Call with
  * lnet_net_lock/EX held. Returns 0 if the peer was queued, and
@@ -3971,21 +4001,38 @@ out:
        return rc;
 }
 
+/* must hold net_lock/0 */
 void
-lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni)
+lnet_peer_ni_add_to_recoveryq_locked(struct lnet_peer_ni *lpni,
+                                    struct list_head *recovery_queue,
+                                    time64_t now)
 {
        /* the mt could've shutdown and cleaned up the queues */
        if (the_lnet.ln_mt_state != LNET_MT_STATE_RUNNING)
                return;
 
-       if (list_empty(&lpni->lpni_recovery) &&
-           atomic_read(&lpni->lpni_healthv) < LNET_MAX_HEALTH_VALUE) {
-               CDEBUG(D_NET, "lpni %s added to recovery queue. Health = %d\n",
-                       libcfs_nid2str(lpni->lpni_nid),
-                       atomic_read(&lpni->lpni_healthv));
-               list_add_tail(&lpni->lpni_recovery, &the_lnet.ln_mt_peerNIRecovq);
-               lnet_peer_ni_addref_locked(lpni);
+       if (!list_empty(&lpni->lpni_recovery))
+               return;
+
+       if (atomic_read(&lpni->lpni_healthv) == LNET_MAX_HEALTH_VALUE)
+               return;
+
+       if (now > lpni->lpni_last_alive + lnet_recovery_limit) {
+               CDEBUG(D_NET, "lpni %s aged out last alive %lld\n",
+                      libcfs_nid2str(lpni->lpni_nid),
+                      lpni->lpni_last_alive);
+               return;
        }
+
+       /* This peer NI is going on the recovery queue, so take a ref on it */
+       lnet_peer_ni_addref_locked(lpni);
+
+       CDEBUG(D_NET, "%s added to recovery queue. last alive: %lld health: %d\n",
+              libcfs_nid2str(lpni->lpni_nid),
+              lpni->lpni_last_alive,
+              atomic_read(&lpni->lpni_healthv));
+
+       list_add_tail(&lpni->lpni_recovery, recovery_queue);
 }
 
 /* Call with the ln_api_mutex held */
@@ -3998,10 +4045,13 @@ lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
        struct lnet_peer_ni *lpni;
        int lncpt;
        int cpt;
+       time64_t now;
 
        if (the_lnet.ln_state != LNET_STATE_RUNNING)
                return;
 
+       now = ktime_get_seconds();
+
        if (!all) {
                lnet_net_lock(LNET_LOCK_EX);
                lpni = lnet_find_peer_ni_locked(nid);
@@ -4010,7 +4060,8 @@ lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
                        return;
                }
                atomic_set(&lpni->lpni_healthv, value);
-               lnet_peer_ni_add_to_recoveryq_locked(lpni);
+               lnet_peer_ni_add_to_recoveryq_locked(lpni,
+                                            &the_lnet.ln_mt_peerNIRecovq, now);
                lnet_peer_ni_decref_locked(lpni);
                lnet_net_unlock(LNET_LOCK_EX);
                return;
@@ -4019,8 +4070,8 @@ lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
        lncpt = cfs_percpt_number(the_lnet.ln_peer_tables);
 
        /*
-        * Walk all the peers and reset the healhv for each one to the
-        * maximum value.
+        * Walk all the peers and reset the health value for each one to the
+        * specified value.
         */
        lnet_net_lock(LNET_LOCK_EX);
        for (cpt = 0; cpt < lncpt; cpt++) {
@@ -4030,7 +4081,8 @@ lnet_peer_ni_set_healthv(lnet_nid_t nid, int value, bool all)
                                list_for_each_entry(lpni, &lpn->lpn_peer_nis,
                                                    lpni_peer_nis) {
                                        atomic_set(&lpni->lpni_healthv, value);
-                                       lnet_peer_ni_add_to_recoveryq_locked(lpni);
+                                       lnet_peer_ni_add_to_recoveryq_locked(lpni,
+                                            &the_lnet.ln_mt_peerNIRecovq, now);
                                }
                        }
                }