* # of client cache refcount
* # of users (OSCs) + 2 (held by llite and lov)
*/
- atomic_t ccc_users;
+ refcount_t ccc_users;
/**
* # of threads are doing shrinking
*/
"reclaim_count: %u\n"
"max_read_ahead_mb: %lu\n"
"used_read_ahead_mb: %d\n",
- atomic_read(&cache->ccc_users),
+ refcount_read(&cache->ccc_users),
max_cached_mb,
max_cached_mb - unused_mb,
unused_mb,
RETURN(NULL);
/* Initialize cache data */
- atomic_set(&cache->ccc_users, 1);
+ refcount_set(&cache->ccc_users, 1);
cache->ccc_lru_max = lru_page_max;
atomic_long_set(&cache->ccc_lru_left, lru_page_max);
spin_lock_init(&cache->ccc_lru_lock);
*/
void cl_cache_incref(struct cl_client_cache *cache)
{
- atomic_inc(&cache->ccc_users);
+ refcount_inc(&cache->ccc_users);
}
EXPORT_SYMBOL(cl_cache_incref);
*/
void cl_cache_decref(struct cl_client_cache *cache)
{
- if (atomic_dec_and_test(&cache->ccc_users))
+ if (refcount_dec_and_test(&cache->ccc_users))
OBD_FREE(cache, sizeof(*cache));
}
EXPORT_SYMBOL(cl_cache_decref);
unsigned long budget;
LASSERT(cache != NULL);
- budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
+ budget = cache->ccc_lru_max / (refcount_read(&cache->ccc_users) - 2);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */
cache->ccc_lru_shrinkers++;
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- max_scans = atomic_read(&cache->ccc_users) - 2;
+ max_scans = refcount_read(&cache->ccc_users) - 2;
while (--max_scans > 0 &&
(scan = list_first_entry_or_null(&cache->ccc_lru,
struct client_obd,
/* lru cleanup */
if (cli->cl_cache != NULL) {
- LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
+ LASSERT(refcount_read(&cli->cl_cache->ccc_users) > 0);
spin_lock(&cli->cl_cache->ccc_lru_lock);
list_del_init(&cli->cl_lru_osc);
spin_unlock(&cli->cl_cache->ccc_lru_lock);