Whamcloud - gitweb
LU-8835 osc: handle 64 bit time properly in osc_cache_too_much 14/23814/5
authorJames Simmons <uja.ornl@yahoo.com>
Tue, 10 Jan 2017 22:40:40 +0000 (17:40 -0500)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 18 Jan 2017 18:59:31 +0000 (18:59 +0000)
Use 64 bit time for cl_lru_last_used and the function
osc_cache_too_much(). The use of 64 bit time will
introduce an expensive 64 bit division operation. Since
the time lapse being calculated in osc_cache_too_much
will never be more than seventy years we can cast the
time lapse to an long and we can perform a normal 32 bit
divison operation instead.

Signed-off-by: James Simmons <uja.ornl@yahoo.com>
Change-Id: Ic38db2d22436fc2a8aa431562e4f5d22ec18d842
Reviewed-on: https://review.whamcloud.com/23814
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Dmitry Eremin <dmitry.eremin@intel.com>
lustre/include/obd.h
lustre/osc/osc_page.c

index beec9ea..4094966 100644 (file)
@@ -271,7 +271,7 @@ struct client_obd {
         * allowed to have multiple threads shrinking LRU cache. */
        atomic_t                 cl_lru_shrinkers;
        /** The time when this LRU cache was last used. */
         * allowed to have multiple threads shrinking LRU cache. */
        atomic_t                 cl_lru_shrinkers;
        /** The time when this LRU cache was last used. */
-       time_t                   cl_lru_last_used;
+       time64_t                 cl_lru_last_used;
        /** stats: how many reclaims have happened for this client_obd.
         * reclaim and shrink - shrink is async, voluntarily rebalancing;
         * reclaim is sync, initiated by IO thread when the LRU slots are
        /** stats: how many reclaims have happened for this client_obd.
         * reclaim and shrink - shrink is async, voluntarily rebalancing;
         * reclaim is sync, initiated by IO thread when the LRU slots are
index eb7c6e7..c1650b8 100644 (file)
@@ -376,11 +376,18 @@ static int osc_cache_too_much(struct client_obd *cli)
                else if (pages >= budget / 2)
                        return lru_shrink_min(cli);
        } else {
                else if (pages >= budget / 2)
                        return lru_shrink_min(cli);
        } else {
-               int duration = cfs_time_current_sec() - cli->cl_lru_last_used;
+               time64_t duration = ktime_get_real_seconds();
+               long timediff;
 
                /* knock out pages by duration of no IO activity */
 
                /* knock out pages by duration of no IO activity */
-               duration >>= 6; /* approximately 1 minute */
-               if (duration > 0 && pages >= budget / duration)
+               duration -= cli->cl_lru_last_used;
+               /*
+                * The difference shouldn't be more than 70 years
+                * so we can safely case to a long. Round to
+                * approximately 1 minute.
+                */
+               timediff = (long)(duration >> 6);
+               if (timediff > 0 && pages >= budget / timediff)
                        return lru_shrink_min(cli);
        }
        return 0;
                        return lru_shrink_min(cli);
        }
        return 0;
@@ -429,7 +436,7 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
                list_splice_tail(&lru, &cli->cl_lru_list);
                atomic_long_sub(npages, &cli->cl_lru_busy);
                atomic_long_add(npages, &cli->cl_lru_in_list);
                list_splice_tail(&lru, &cli->cl_lru_list);
                atomic_long_sub(npages, &cli->cl_lru_busy);
                atomic_long_add(npages, &cli->cl_lru_in_list);
-               cli->cl_lru_last_used = cfs_time_current_sec();
+               cli->cl_lru_last_used = ktime_get_real_seconds();
                spin_unlock(&cli->cl_lru_list_lock);
 
                if (waitqueue_active(&osc_lru_waitq))
                spin_unlock(&cli->cl_lru_list_lock);
 
                if (waitqueue_active(&osc_lru_waitq))