From c0dece77445d19b9d02f5856fb5e86cce527f88a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sun, 1 Jan 2017 11:41:33 -0500 Subject: [PATCH] LU-4423 ldlm: use 64-bit time for pl_recalc The ldlm pool calculates elapsed time by comparing the previous and current get_seconds() values, which is unsafe on 32-bit machines after 2038. This changes the code to use time64_t and ktime_get_real_seconds(), keeping the 'real' instead of 'monotonic' time because of the debug prints. Linux-commit: 8f83409cf2382c968f96877368cd5b542b92af1d Linux-commit: b8cb86fd95bb461c3496e1f4b4083b198c963a9c Signed-off-by: Arnd Bergmann Change-Id: I81cca5b529dbf5615cf46461ad1c9179fdee7835 Signed-off-by: Oleg Drokin Signed-off-by: Greg Kroah-Hartman Signed-off-by: James Simmons Reviewed-on: https://review.whamcloud.com/23350 Tested-by: Jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Dmitry Eremin --- lustre/include/lustre_dlm.h | 4 ++-- lustre/ldlm/ldlm_pool.c | 32 +++++++++++++++----------------- 2 files changed, 17 insertions(+), 19 deletions(-) diff --git a/lustre/include/lustre_dlm.h b/lustre/include/lustre_dlm.h index 9d6058a..e5ae724 100644 --- a/lustre/include/lustre_dlm.h +++ b/lustre/include/lustre_dlm.h @@ -251,9 +251,9 @@ struct ldlm_pool { * server_slv * lock_volume_factor. */ atomic_t pl_lock_volume_factor; /** Time when last SLV from server was obtained. */ - time_t pl_recalc_time; + time64_t pl_recalc_time; /** Recalculation period for pool. */ - time_t pl_recalc_period; + time64_t pl_recalc_period; /** Recalculation and shrink operations. */ struct ldlm_pool_ops *pl_ops; /** Number of planned locks for next period. */ diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c index 5beb503..be1f784 100644 --- a/lustre/ldlm/ldlm_pool.c +++ b/lustre/ldlm/ldlm_pool.c @@ -335,15 +335,15 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl) */ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl) { - time_t recalc_interval_sec; + time64_t recalc_interval_sec; ENTRY; - recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time; + recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; if (recalc_interval_sec < pl->pl_recalc_period) RETURN(0); spin_lock(&pl->pl_lock); - recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time; + recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; if (recalc_interval_sec < pl->pl_recalc_period) { spin_unlock(&pl->pl_lock); RETURN(0); @@ -364,7 +364,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl) */ ldlm_pool_recalc_grant_plan(pl); - pl->pl_recalc_time = cfs_time_current_sec(); + pl->pl_recalc_time = ktime_get_real_seconds(); lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, recalc_interval_sec); spin_unlock(&pl->pl_lock); @@ -473,11 +473,11 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl) */ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) { - time_t recalc_interval_sec; + time64_t recalc_interval_sec; int ret; ENTRY; - recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time; + recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; if (recalc_interval_sec < pl->pl_recalc_period) RETURN(0); @@ -485,7 +485,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl) /* * Check if we need to recalc lists now. */ - recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time; + recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; if (recalc_interval_sec < pl->pl_recalc_period) { spin_unlock(&pl->pl_lock); RETURN(0); @@ -518,7 +518,7 @@ out: * Time of LRU resizing might be longer than period, * so update after LRU resizing rather than before it. */ - pl->pl_recalc_time = cfs_time_current_sec(); + pl->pl_recalc_time = ktime_get_real_seconds(); lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT, recalc_interval_sec); spin_unlock(&pl->pl_lock); @@ -576,14 +576,13 @@ static struct ldlm_pool_ops ldlm_cli_pool_ops = { */ int ldlm_pool_recalc(struct ldlm_pool *pl) { - time_t recalc_interval_sec; + time64_t recalc_interval_sec; int count; - recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time; + recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; if (recalc_interval_sec > 0) { spin_lock(&pl->pl_lock); - recalc_interval_sec = cfs_time_current_sec() - - pl->pl_recalc_time; + recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time; if (recalc_interval_sec > 0) { /* @@ -606,14 +605,13 @@ int ldlm_pool_recalc(struct ldlm_pool *pl) count); } - recalc_interval_sec = pl->pl_recalc_time - cfs_time_current_sec() + + recalc_interval_sec = pl->pl_recalc_time - ktime_get_real_seconds() + pl->pl_recalc_period; if (recalc_interval_sec <= 0) { /* DEBUG: should be re-removed after LU-4536 is fixed */ - CDEBUG(D_DLMTRACE, "%s: Negative interval(%ld), " - "too short period(%ld)\n", + CDEBUG(D_DLMTRACE, "%s: Negative interval(%lld), too short period(%lld)\n", pl->pl_name, recalc_interval_sec, - pl->pl_recalc_period); + (s64)pl->pl_recalc_period); /* Prevent too frequent recalculation. */ recalc_interval_sec = 1; @@ -852,7 +850,7 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns, spin_lock_init(&pl->pl_lock); atomic_set(&pl->pl_granted, 0); - pl->pl_recalc_time = cfs_time_current_sec(); + pl->pl_recalc_time = ktime_get_real_seconds(); atomic_set(&pl->pl_lock_volume_factor, 1); atomic_set(&pl->pl_grant_rate, 0); -- 1.8.3.1