Whamcloud - gitweb
LU-6142 obdclass: make ccc_users in cl_client_cache a refcount_t 81/48881/2
authorMr. NeilBrown <neilb@suse.de>
Fri, 7 Oct 2022 13:53:38 +0000 (09:53 -0400)
committerOleg Drokin <green@whamcloud.com>
Wed, 2 Nov 2022 07:09:41 +0000 (07:09 +0000)
As this is used as a refcount, it should be declared
as one.

Change-Id: I5af513ccb2b706a398e647ce0427affa4516a9b5
Signed-off-by: Mr. NeilBrown <neilb@suse.de>
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/48881
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Arshad Hussain <arshad.hussain@aeoncomputing.com>
Reviewed-by: Emoly Liu <emoly@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/include/cl_object.h
lustre/llite/lproc_llite.c
lustre/obdclass/cl_page.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c

index 672004a..3094b61 100644 (file)
@@ -2276,7 +2276,7 @@ struct cl_client_cache {
         * # of client cache refcount
         * # of users (OSCs) + 2 (held by llite and lov)
         */
-       atomic_t                ccc_users;
+       refcount_t              ccc_users;
        /**
         * # of threads are doing shrinking
         */
index 0853eff..5391cc4 100644 (file)
@@ -474,7 +474,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
                      "reclaim_count: %u\n"
                      "max_read_ahead_mb: %lu\n"
                      "used_read_ahead_mb: %d\n",
-                  atomic_read(&cache->ccc_users),
+                  refcount_read(&cache->ccc_users),
                   max_cached_mb,
                   max_cached_mb - unused_mb,
                   unused_mb,
index 91a879f..d7e384d 100644 (file)
@@ -1240,7 +1240,7 @@ struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
                RETURN(NULL);
 
        /* Initialize cache data */
-       atomic_set(&cache->ccc_users, 1);
+       refcount_set(&cache->ccc_users, 1);
        cache->ccc_lru_max = lru_page_max;
        atomic_long_set(&cache->ccc_lru_left, lru_page_max);
        spin_lock_init(&cache->ccc_lru_lock);
@@ -1261,7 +1261,7 @@ EXPORT_SYMBOL(cl_cache_init);
  */
 void cl_cache_incref(struct cl_client_cache *cache)
 {
-       atomic_inc(&cache->ccc_users);
+       refcount_inc(&cache->ccc_users);
 }
 EXPORT_SYMBOL(cl_cache_incref);
 
@@ -1272,7 +1272,7 @@ EXPORT_SYMBOL(cl_cache_incref);
  */
 void cl_cache_decref(struct cl_client_cache *cache)
 {
-       if (atomic_dec_and_test(&cache->ccc_users))
+       if (refcount_dec_and_test(&cache->ccc_users))
                OBD_FREE(cache, sizeof(*cache));
 }
 EXPORT_SYMBOL(cl_cache_decref);
index 40fcf4d..857545d 100644 (file)
@@ -358,7 +358,7 @@ static int osc_cache_too_much(struct client_obd *cli)
        unsigned long budget;
 
        LASSERT(cache != NULL);
-       budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
+       budget = cache->ccc_lru_max / (refcount_read(&cache->ccc_users) - 2);
 
        /* if it's going to run out LRU slots, we should free some, but not
         * too much to maintain faireness among OSCs. */
@@ -727,7 +727,7 @@ static long osc_lru_reclaim(struct client_obd *cli, unsigned long npages)
        cache->ccc_lru_shrinkers++;
        list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
 
-       max_scans = atomic_read(&cache->ccc_users) - 2;
+       max_scans = refcount_read(&cache->ccc_users) - 2;
        while (--max_scans > 0 &&
               (scan = list_first_entry_or_null(&cache->ccc_lru,
                                                  struct client_obd,
index b9e7d5c..57bc02a 100644 (file)
@@ -3796,7 +3796,7 @@ int osc_cleanup_common(struct obd_device *obd)
 
        /* lru cleanup */
        if (cli->cl_cache != NULL) {
-               LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
+               LASSERT(refcount_read(&cli->cl_cache->ccc_users) > 0);
                spin_lock(&cli->cl_cache->ccc_lru_lock);
                list_del_init(&cli->cl_lru_osc);
                spin_unlock(&cli->cl_cache->ccc_lru_lock);