int unused_mb;
max_cached_mb = cache->ccc_lru_max >> shift;
- unused_mb = cfs_atomic_read(&cache->ccc_lru_left) >> shift;
+ unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
return seq_printf(m,
"users: %d\n"
"max_cached_mb: %d\n"
"used_mb: %d\n"
"unused_mb: %d\n"
"reclaim_count: %u\n",
- cfs_atomic_read(&cache->ccc_users),
+ atomic_read(&cache->ccc_users),
max_cached_mb,
max_cached_mb - unused_mb,
unused_mb,
/* easy - add more LRU slots. */
if (diff >= 0) {
- cfs_atomic_add(diff, &cache->ccc_lru_left);
+ atomic_add(diff, &cache->ccc_lru_left);
GOTO(out, rc = 0);
}
do {
int ov, nv;
- ov = cfs_atomic_read(&cache->ccc_lru_left);
+ ov = atomic_read(&cache->ccc_lru_left);
if (ov == 0)
break;
nv = ov > diff ? ov - diff : 0;
- rc = cfs_atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
+ rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
if (likely(ov == rc)) {
diff -= ov - nv;
nrpages += ov - nv;
spin_unlock(&sbi->ll_lock);
rc = count;
} else {
- cfs_atomic_add(nrpages, &cache->ccc_lru_left);
+ atomic_add(nrpages, &cache->ccc_lru_left);
}
return rc;
}
struct cl_client_cache *cache = &sbi->ll_cache;
int pages, mb;
- pages = cfs_atomic_read(&cache->ccc_unstable_nr);
+ pages = atomic_read(&cache->ccc_unstable_nr);
mb = (pages * PAGE_CACHE_SIZE) >> 20;
return seq_printf(m, "unstable_pages: %8d\n"