Whamcloud - gitweb
LU-13569 lnet: Recover local NI w/exponential backoff interval 21/39721/15
authorChris Horn <chris.horn@hpe.com>
Fri, 21 Aug 2020 20:16:43 +0000 (15:16 -0500)
committerOleg Drokin <green@whamcloud.com>
Wed, 28 Apr 2021 02:10:34 +0000 (02:10 +0000)
Use an exponential backoff algorithm to determine the interval at
which unhealthy local NIs are ping'd

Introduce lnet_ni_add_to_recoveryq_locked() which handles checking
pre-conditions for whether the NI should be added to the recovery
queue, and takes a ref on the NI as appropriate.

Test-Parameters: trivial
HPE-bug-id: LUS-9109
Signed-off-by: Chris Horn <chris.horn@hpe.com>
Change-Id: Idb3789366b2e450837989f9a12eb2d598f80081c
Reviewed-on: https://review.whamcloud.com/39721
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Alexander Boyko <alexander.boyko@hpe.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lnet/include/lnet/lib-lnet.h
lnet/include/lnet/lib-types.h
lnet/lnet/lib-move.c
lnet/lnet/lib-msg.c

index d53f39f..a620aaa 100644 (file)
@@ -560,6 +560,9 @@ extern void lnet_peer_clr_pref_nids(struct lnet_peer_ni *lpni);
 extern int lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid);
 void lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni,
                                         __u32 priority);
 extern int lnet_peer_del_pref_nid(struct lnet_peer_ni *lpni, lnet_nid_t nid);
 void lnet_peer_ni_set_selection_priority(struct lnet_peer_ni *lpni,
                                         __u32 priority);
+extern void lnet_ni_add_to_recoveryq_locked(struct lnet_ni *ni,
+                                           struct list_head *queue,
+                                           time64_t now);
 
 void lnet_router_debugfs_init(void);
 void lnet_router_debugfs_fini(void);
 
 void lnet_router_debugfs_init(void);
 void lnet_router_debugfs_fini(void);
@@ -1003,6 +1006,12 @@ lnet_peer_ni_set_next_ping(struct lnet_peer_ni *lpni, time64_t now)
                lnet_get_next_recovery_ping(lpni->lpni_ping_count, now);
 }
 
                lnet_get_next_recovery_ping(lpni->lpni_ping_count, now);
 }
 
+static inline void
+lnet_ni_set_next_ping(struct lnet_ni *ni, time64_t now)
+{
+       ni->ni_next_ping = lnet_get_next_recovery_ping(ni->ni_ping_count, now);
+}
+
 /*
  * A peer NI is alive if it satisfies the following two conditions:
  *  1. peer NI health >= LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage
 /*
  * A peer NI is alive if it satisfies the following two conditions:
  *  1. peer NI health >= LNET_MAX_HEALTH_VALUE * router_sensitivity_percentage
index 3b1e46f..51afa78 100644 (file)
@@ -473,6 +473,13 @@ struct lnet_ni {
        /* Recovery state. Protected by lnet_ni_lock() */
        __u32                   ni_recovery_state;
 
        /* Recovery state. Protected by lnet_ni_lock() */
        __u32                   ni_recovery_state;
 
+       /* When to send the next recovery ping */
+       time64_t                ni_next_ping;
+       /* How many pings sent during current recovery period did not receive
+        * a reply. NB: reset whenever _any_ message arrives on this NI
+        */
+       unsigned int            ni_ping_count;
+
        /* per NI LND tunables */
        struct lnet_lnd_tunables ni_lnd_tunables;
 
        /* per NI LND tunables */
        struct lnet_lnd_tunables ni_lnd_tunables;
 
index 2a856c8..c9b14f5 100644 (file)
@@ -3348,6 +3348,7 @@ lnet_recover_local_nis(void)
        lnet_nid_t nid;
        int healthv;
        int rc;
        lnet_nid_t nid;
        int healthv;
        int rc;
+       time64_t now;
 
        /*
         * splice the recovery queue on a local queue. We will iterate
 
        /*
         * splice the recovery queue on a local queue. We will iterate
@@ -3361,6 +3362,8 @@ lnet_recover_local_nis(void)
                         &local_queue);
        lnet_net_unlock(0);
 
                         &local_queue);
        lnet_net_unlock(0);
 
+       now = ktime_get_seconds();
+
        list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
                /*
                 * if an NI is being deleted or it is now healthy, there
        list_for_each_entry_safe(ni, tmp, &local_queue, ni_recovery) {
                /*
                 * if an NI is being deleted or it is now healthy, there
@@ -3394,9 +3397,15 @@ lnet_recover_local_nis(void)
                        ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
                }
 
                        ni->ni_recovery_state &= ~LNET_NI_RECOVERY_FAILED;
                }
 
+
                lnet_ni_unlock(ni);
                lnet_ni_unlock(ni);
-               lnet_net_unlock(0);
 
 
+               if (now < ni->ni_next_ping) {
+                       lnet_net_unlock(0);
+                       continue;
+               }
+
+               lnet_net_unlock(0);
 
                CDEBUG(D_NET, "attempting to recover local ni: %s\n",
                       libcfs_nid2str(ni->ni_nid));
 
                CDEBUG(D_NET, "attempting to recover local ni: %s\n",
                       libcfs_nid2str(ni->ni_nid));
@@ -3464,30 +3473,20 @@ lnet_recover_local_nis(void)
                                LNetMDUnlink(mdh);
                                continue;
                        }
                                LNetMDUnlink(mdh);
                                continue;
                        }
-                       /*
-                        * Same note as in lnet_recover_peer_nis(). When
-                        * we're sending the ping, the NI is free to be
-                        * deleted or manipulated. By this point it
-                        * could've been added back on the recovery queue,
-                        * and a refcount taken on it.
-                        * So we can't just add it blindly again or we'll
-                        * corrupt the queue. We must check under lock if
-                        * it's not on any list and if not then add it
-                        * to the processed list, which will eventually be
-                        * spliced back on to the recovery queue.
-                        */
+                       ni->ni_ping_count++;
+
                        ni->ni_ping_mdh = mdh;
                        ni->ni_ping_mdh = mdh;
-                       if (list_empty(&ni->ni_recovery)) {
-                               list_add_tail(&ni->ni_recovery, &processed_list);
-                               lnet_ni_addref_locked(ni, 0);
-                       }
-                       lnet_net_unlock(0);
+                       lnet_ni_add_to_recoveryq_locked(ni, &processed_list,
+                                                       now);
 
 
-                       lnet_ni_lock(ni);
-                       if (rc)
+                       if (rc) {
+                               lnet_ni_lock(ni);
                                ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
                                ni->ni_recovery_state &= ~LNET_NI_RECOVERY_PENDING;
-               }
-               lnet_ni_unlock(ni);
+                               lnet_ni_unlock(ni);
+                       }
+                       lnet_net_unlock(0);
+               } else
+                       lnet_ni_unlock(ni);
        }
 
        /*
        }
 
        /*
index a5e0204..7dab8d5 100644 (file)
@@ -449,6 +449,31 @@ lnet_dec_healthv_locked(atomic_t *healthv, int sensitivity)
        }
 }
 
        }
 }
 
+/* must hold net_lock/0 */
+void
+lnet_ni_add_to_recoveryq_locked(struct lnet_ni *ni,
+                               struct list_head *recovery_queue, time64_t now)
+{
+       if (!list_empty(&ni->ni_recovery))
+               return;
+
+       if (atomic_read(&ni->ni_healthv) == LNET_MAX_HEALTH_VALUE)
+               return;
+
+       /* This NI is going on the recovery queue, so take a ref on it */
+       lnet_ni_addref_locked(ni, 0);
+
+       lnet_ni_set_next_ping(ni, now);
+
+       CDEBUG(D_NET, "%s added to recovery queue. ping count: %u next ping: %lld health :%d\n",
+              libcfs_nid2str(ni->ni_nid),
+              ni->ni_ping_count,
+              ni->ni_next_ping,
+              atomic_read(&ni->ni_healthv));
+
+       list_add_tail(&ni->ni_recovery, recovery_queue);
+}
+
 static void
 lnet_handle_local_failure(struct lnet_ni *local_ni)
 {
 static void
 lnet_handle_local_failure(struct lnet_ni *local_ni)
 {
@@ -464,22 +489,8 @@ lnet_handle_local_failure(struct lnet_ni *local_ni)
        }
 
        lnet_dec_healthv_locked(&local_ni->ni_healthv, lnet_health_sensitivity);
        }
 
        lnet_dec_healthv_locked(&local_ni->ni_healthv, lnet_health_sensitivity);
-       /*
-        * add the NI to the recovery queue if it's not already there
-        * and it's health value is actually below the maximum. It's
-        * possible that the sensitivity might be set to 0, and the health
-        * value will not be reduced. In this case, there is no reason to
-        * invoke recovery
-        */
-       if (list_empty(&local_ni->ni_recovery) &&
-           atomic_read(&local_ni->ni_healthv) < LNET_MAX_HEALTH_VALUE) {
-               CDEBUG(D_NET, "ni %s added to recovery queue. Health = %d\n",
-                       libcfs_nid2str(local_ni->ni_nid),
-                       atomic_read(&local_ni->ni_healthv));
-               list_add_tail(&local_ni->ni_recovery,
-                             &the_lnet.ln_mt_localNIRecovq);
-               lnet_ni_addref_locked(local_ni, 0);
-       }
+       lnet_ni_add_to_recoveryq_locked(local_ni, &the_lnet.ln_mt_localNIRecovq,
+                                       ktime_get_seconds());
        lnet_net_unlock(0);
 }
 
        lnet_net_unlock(0);
 }
 
@@ -873,6 +884,8 @@ lnet_health_check(struct lnet_msg *msg)
                 * faster recovery.
                 */
                lnet_inc_healthv(&ni->ni_healthv, lnet_health_sensitivity);
                 * faster recovery.
                 */
                lnet_inc_healthv(&ni->ni_healthv, lnet_health_sensitivity);
+               lnet_net_lock(0);
+               ni->ni_ping_count = 0;
                /*
                 * It's possible msg_txpeer is NULL in the LOLND
                 * case. Only increment the peer's health if we're
                /*
                 * It's possible msg_txpeer is NULL in the LOLND
                 * case. Only increment the peer's health if we're
@@ -888,7 +901,6 @@ lnet_health_check(struct lnet_msg *msg)
                         * I'm a router, then set that lpni's health to
                         * maximum so we can commence communication
                         */
                         * I'm a router, then set that lpni's health to
                         * maximum so we can commence communication
                         */
-                       lnet_net_lock(0);
                        if (lnet_isrouter(lpni) || the_lnet.ln_routing) {
                                lnet_set_lpni_healthv_locked(lpni,
                                        LNET_MAX_HEALTH_VALUE);
                        if (lnet_isrouter(lpni) || the_lnet.ln_routing) {
                                lnet_set_lpni_healthv_locked(lpni,
                                        LNET_MAX_HEALTH_VALUE);
@@ -909,8 +921,8 @@ lnet_health_check(struct lnet_msg *msg)
                                                &the_lnet.ln_mt_peerNIRecovq,
                                                ktime_get_seconds());
                        }
                                                &the_lnet.ln_mt_peerNIRecovq,
                                                ktime_get_seconds());
                        }
-                       lnet_net_unlock(0);
                }
                }
+               lnet_net_unlock(0);
 
                /* we can finalize this message */
                return -1;
 
                /* we can finalize this message */
                return -1;