#define atomic_cmpxchg(v, ov, nv) \
((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
+typedef struct { volatile long counter; } atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) { (i) }
+
+#define atomic_long_read(a) ((a)->counter)
+#define atomic_long_set(a, b) do {(a)->counter = b; } while (0)
+#define atomic_long_dec_and_test(a) ((--((a)->counter)) == 0)
+#define atomic_long_dec_and_lock(a, b) ((--((a)->counter)) == 0)
+#define atomic_long_inc(a) (((a)->counter)++)
+#define atomic_long_dec(a) do { (a)->counter--; } while (0)
+#define atomic_long_add(b, a) do {(a)->counter += b; } while (0)
+#define atomic_long_add_return(n, a) ((a)->counter += n)
+#define atomic_long_inc_return(a) atomic_long_add_return(1, a)
+#define atomic_long_sub(b, a) do {(a)->counter -= b; } while (0)
+#define atomic_long_sub_return(n, a) ((a)->counter -= n)
+#define atomic_long_dec_return(a) atomic_long_sub_return(1, a)
+#define atomic_long_add_unless(v, a, u) \
+ ((v)->counter != u ? (v)->counter += a : 0)
+#define atomic_long_inc_not_zero(v) atomic_long_add_unless((v), 1, 0)
+#define atomic_long_cmpxchg(v, ov, nv) \
+ ((v)->counter == ov ? ((v)->counter = nv, ov) : (v)->counter)
+
#ifdef HAVE_LIBPTHREAD
#include <pthread.h>
*/
atomic_t ccc_users;
/**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
* # of LRU entries available
*/
- atomic_t ccc_lru_left;
+ atomic_long_t ccc_lru_left;
/**
* List of entities(OSCs) for this LRU cache
*/
*/
spinlock_t ccc_lru_lock;
/**
- * # of threads are doing shrinking
- */
- unsigned int ccc_lru_shrinkers;
- /**
* Set if unstable check is enabled
*/
unsigned int ccc_unstable_check:1;
/**
+ * # of unstable pages for this mount point
+ */
+ atomic_long_t ccc_unstable_nr;
+ /**
* Waitq for awaiting unstable pages to reach zero.
* Used at umounting time and signaled on BRW commit
*/
wait_queue_head_t ccc_unstable_waitq;
- /**
- * # of unstable pages for this mount point
- */
- atomic_t ccc_unstable_nr;
};
enum {
/* lru for osc caching pages */
struct cl_client_cache *cl_cache;
struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */
- atomic_t *cl_lru_left;
- atomic_t cl_lru_busy;
- atomic_t cl_lru_shrinkers;
- atomic_t cl_lru_in_list;
+ atomic_long_t *cl_lru_left;
+ atomic_long_t cl_lru_busy;
+ atomic_long_t cl_lru_in_list;
+ atomic_long_t cl_unstable_count;
struct list_head cl_lru_list; /* lru page list */
client_obd_lock_t cl_lru_list_lock; /* page list protector */
- atomic_t cl_unstable_count;
+ atomic_t cl_lru_shrinkers;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
atomic_t cl_destroy_in_flight;
extern unsigned int at_history;
extern int at_early_margin;
extern int at_extra;
-extern unsigned int obd_max_dirty_pages;
-extern atomic_t obd_dirty_pages;
-extern atomic_t obd_dirty_transit_pages;
+extern unsigned long obd_max_dirty_pages;
+extern atomic_long_t obd_dirty_pages;
+extern atomic_long_t obd_dirty_transit_pages;
extern unsigned int obd_alloc_fail_rate;
extern char obd_jobid_var[];
/* lru for osc. */
INIT_LIST_HEAD(&cli->cl_lru_osc);
atomic_set(&cli->cl_lru_shrinkers, 0);
- atomic_set(&cli->cl_lru_busy, 0);
- atomic_set(&cli->cl_lru_in_list, 0);
+ atomic_long_set(&cli->cl_lru_busy, 0);
+ atomic_long_set(&cli->cl_lru_in_list, 0);
INIT_LIST_HEAD(&cli->cl_lru_list);
client_obd_list_lock_init(&cli->cl_lru_list_lock);
- atomic_set(&cli->cl_unstable_count, 0);
+ atomic_long_set(&cli->cl_unstable_count, 0);
init_waitqueue_head(&cli->cl_destroy_waitq);
atomic_set(&cli->cl_destroy_in_flight, 0);
/* initialize ll_cache data */
atomic_set(&sbi->ll_cache.ccc_users, 0);
sbi->ll_cache.ccc_lru_max = lru_page_max;
- atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
+ atomic_long_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
/* turn unstable check off by default as it impacts performance */
sbi->ll_cache.ccc_unstable_check = 0;
- atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
+ atomic_long_set(&sbi->ll_cache.ccc_unstable_nr, 0);
init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
char *profilenm = get_profile_name(sb);
- int ccc_count, next, force = 1, rc = 0;
+ long ccc_count;
+ int next, force = 1, rc = 0;
ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
if (force == 0) {
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
- atomic_read(&sbi->ll_cache.ccc_unstable_nr) == 0,
+ atomic_long_read(&sbi->ll_cache.ccc_unstable_nr) == 0,
&lwi);
}
- ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+ ccc_count = atomic_long_read(&sbi->ll_cache.ccc_unstable_nr);
if (force == 0 && rc != -EINTR)
- LASSERTF(ccc_count == 0, "count: %i\n", ccc_count);
+ LASSERTF(ccc_count == 0, "count: %li\n", ccc_count);
/* We need to set force before the lov_disconnect in
{
struct seq_file *m = file->private_data;
struct ll_sb_info *sbi = ll_s2sbi((struct super_block *)m->private);
- int mult, rc, pages_number;
+ __u64 val;
+ long pages_number;
+ int mult;
+ int rc;
mult = 1 << (20 - PAGE_CACHE_SHIFT);
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
if (rc)
return rc;
+ if (val > LONG_MAX)
+ return -ERANGE;
+ pages_number = (long)val;
+
if (pages_number < 0 || pages_number > totalram_pages / 2) {
/* 1/2 of RAM */
CERROR("can't set file readahead more than %lu MB\n",
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
int shift = 20 - PAGE_CACHE_SHIFT;
- int max_cached_mb;
- int unused_mb;
+ long max_cached_mb;
+ long unused_mb;
max_cached_mb = cache->ccc_lru_max >> shift;
- unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
+ unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
return seq_printf(m,
"users: %d\n"
- "max_cached_mb: %d\n"
- "used_mb: %d\n"
- "unused_mb: %d\n"
+ "max_cached_mb: %ld\n"
+ "used_mb: %ld\n"
+ "unused_mb: %ld\n"
"reclaim_count: %u\n",
atomic_read(&cache->ccc_users),
max_cached_mb,
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
struct lu_env *env;
+ __u64 val;
+ long diff = 0;
+ long nrpages = 0;
+ long pages_number;
int refcheck;
- int mult, rc, pages_number;
- int diff = 0;
- int nrpages = 0;
+ int mult;
+ long rc;
char kernbuf[128];
ENTRY;
mult = 1 << (20 - PAGE_CACHE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
kernbuf;
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
if (rc)
RETURN(rc);
+ if (val > LONG_MAX)
+ return -ERANGE;
+ pages_number = (long)val;
+
if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
/* easy - add more LRU slots. */
if (diff >= 0) {
- atomic_add(diff, &cache->ccc_lru_left);
+ atomic_long_add(diff, &cache->ccc_lru_left);
GOTO(out, rc = 0);
}
diff = -diff;
while (diff > 0) {
- int tmp;
+ long tmp;
/* reduce LRU budget from free slots. */
do {
- int ov, nv;
+ long ov, nv;
- ov = atomic_read(&cache->ccc_lru_left);
+ ov = atomic_long_read(&cache->ccc_lru_left);
if (ov == 0)
break;
nv = ov > diff ? ov - diff : 0;
- rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
+ rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
if (likely(ov == rc)) {
diff -= ov - nv;
nrpages += ov - nv;
spin_unlock(&sbi->ll_lock);
rc = count;
} else {
- atomic_add(nrpages, &cache->ccc_lru_left);
+ atomic_long_add(nrpages, &cache->ccc_lru_left);
}
return rc;
}
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
- int pages, mb;
+ long pages;
+ int mb;
- pages = atomic_read(&cache->ccc_unstable_nr);
+ pages = atomic_long_read(&cache->ccc_unstable_nr);
mb = (pages * PAGE_CACHE_SIZE) >> 20;
- return seq_printf(m, "unstable_check: %8d\n"
- "unstable_pages: %8d\n"
- "unstable_mb: %8d\n",
+ return seq_printf(m, "unstable_check: %8d\n"
+ "unstable_pages: %12ld\n"
+ "unstable_mb: %8d\n",
cache->ccc_unstable_check, pages, mb);
}
/* If read-ahead pages left are less than 1M, do not do read-ahead,
* otherwise it will form small read RPC(< 1M), which hurt server
* performance a lot. */
- ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), pages);
+ ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages),
+ pages);
if (ret < 0 || ret < min_t(long, PTLRPC_MAX_BRW_PAGES, pages))
GOTO(out, ret = 0);
EXPORT_SYMBOL(obd_dump_on_timeout);
unsigned int obd_dump_on_eviction;
EXPORT_SYMBOL(obd_dump_on_eviction);
-unsigned int obd_max_dirty_pages = 256;
+unsigned long obd_max_dirty_pages;
EXPORT_SYMBOL(obd_max_dirty_pages);
-atomic_t obd_dirty_pages;
+atomic_long_t obd_dirty_pages;
EXPORT_SYMBOL(obd_dirty_pages);
unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
EXPORT_SYMBOL(obd_timeout);
int at_extra = 30;
EXPORT_SYMBOL(at_extra);
-atomic_t obd_dirty_transit_pages;
+atomic_long_t obd_dirty_transit_pages;
EXPORT_SYMBOL(obd_dirty_transit_pages);
char obd_jobid_var[JOBSTATS_JOBID_VAR_MAX_LEN + 1] = JOBSTATS_DISABLE;
proc_max_dirty_pages_in_mb(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
+ __u64 val;
int rc = 0;
if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) {
return 0;
}
if (write) {
- rc = lprocfs_write_frac_helper(buffer, *lenp,
- (unsigned int *)table->data,
+ rc = lprocfs_write_frac_u64_helper(buffer, *lenp, &val,
1 << (20 - PAGE_CACHE_SHIFT));
+ obd_max_dirty_pages = (unsigned long)val;
+
/* Don't allow them to let dirty pages exceed 90% of system
* memory and set a hard minimum of 4MB. */
if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
- CERROR("Refusing to set max dirty pages to %u, which "
+ CERROR("Refusing to set max dirty pages to %lu, which "
"is more than 90%% of available RAM; setting "
"to %lu\n", obd_max_dirty_pages,
((totalram_pages / 10) * 9));
int len;
len = lprocfs_read_frac_helper(buf, sizeof(buf),
- *(unsigned int *)table->data,
+ *(unsigned long *)table->data,
1 << (20 - PAGE_CACHE_SHIFT));
if (len > *lenp)
len = *lenp;
INIT_CTL_NAME
.procname = "max_dirty_mb",
.data = &obd_max_dirty_pages,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
.proc_handler = &proc_max_dirty_pages_in_mb
},
int rc;
rc = seq_printf(m,
- "used_mb: %d\n"
- "busy_cnt: %d\n",
- (atomic_read(&cli->cl_lru_in_list) +
- atomic_read(&cli->cl_lru_busy)) >> shift,
- atomic_read(&cli->cl_lru_busy));
+ "used_mb: %ld\n"
+ "busy_cnt: %ld\n",
+ (atomic_long_read(&cli->cl_lru_in_list) +
+ atomic_long_read(&cli->cl_lru_busy)) >> shift,
+ atomic_long_read(&cli->cl_lru_busy));
return rc;
}
{
struct obd_device *dev = ((struct seq_file *)file->private_data)->private;
struct client_obd *cli = &dev->u.cli;
- int pages_number, mult, rc;
+ __u64 val;
+ long pages_number;
+ long rc;
+ int mult;
char kernbuf[128];
if (count >= sizeof(kernbuf))
mult = 1 << (20 - PAGE_CACHE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
kernbuf;
- rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
+ rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
+
if (rc)
return rc;
+ if (val > LONG_MAX)
+ return -ERANGE;
+ pages_number = (long)val;
+
if (pages_number < 0)
return -ERANGE;
- rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
+ rc = atomic_long_read(&cli->cl_lru_in_list) - pages_number;
if (rc > 0) {
struct lu_env *env;
int refcheck;
{
struct obd_device *dev = m->private;
struct client_obd *cli = &dev->u.cli;
- int pages, mb;
+ long pages;
+ int mb;
- pages = atomic_read(&cli->cl_unstable_count);
+ pages = atomic_long_read(&cli->cl_unstable_count);
mb = (pages * PAGE_CACHE_SIZE) >> 20;
- return seq_printf(m, "unstable_pages: %8d\n"
- "unstable_mb: %8d\n",
+ return seq_printf(m, "unstable_pages: %20ld\n"
+ "unstable_mb: %10d\n",
pages, mb);
}
LPROC_SEQ_FOPS_RO(osc_unstable_stats);
#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
+ CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
"dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
- "lru {in list: %d, left: %d, waiters: %d }" fmt "\n", \
+ "lru {in list: %ld, left: %ld, waiters: %d }" fmt, \
__tmp->cl_import->imp_obd->obd_name, \
__tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
- atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
+ atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
__tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
- atomic_read(&__tmp->cl_lru_in_list), \
- atomic_read(&__tmp->cl_lru_busy), \
+ atomic_long_read(&__tmp->cl_lru_in_list), \
+ atomic_long_read(&__tmp->cl_lru_busy), \
atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
} while (0)
{
assert_spin_locked(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- atomic_inc(&obd_dirty_pages);
+ atomic_long_inc(&obd_dirty_pages);
cli->cl_dirty_pages++;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
}
pga->flag &= ~OBD_BRW_FROM_GRANT;
- atomic_dec(&obd_dirty_pages);
+ atomic_long_dec(&obd_dirty_pages);
cli->cl_dirty_pages--;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
- atomic_dec(&obd_dirty_transit_pages);
+ atomic_long_dec(&obd_dirty_transit_pages);
cli->cl_dirty_transit--;
}
EXIT;
int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
client_obd_list_lock(&cli->cl_loi_list_lock);
- atomic_sub(nr_pages, &obd_dirty_pages);
+ atomic_long_sub(nr_pages, &obd_dirty_pages);
cli->cl_dirty_pages -= nr_pages;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
return 0;
if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
- 1 + atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
+ 1 + atomic_long_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
cli->cl_dirty_transit++;
- atomic_inc(&obd_dirty_transit_pages);
+ atomic_long_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
rc = 1;
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
if ((cli->cl_dirty_pages >= cli->cl_dirty_max_pages) ||
- (1 + atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
+ (1 + atomic_long_read(&obd_dirty_pages) >
+ obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
- "osc max %ld, sys max %d\n", cli->cl_dirty_pages,
- cli->cl_dirty_max_pages, obd_max_dirty_pages);
+ "osc max %ld, sys max %ld\n",
+ cli->cl_dirty_pages, cli->cl_dirty_max_pages,
+ obd_max_dirty_pages);
goto wakeup;
}
int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct list_head *ext_list, int cmd, pdl_policy_t p);
-int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
- int target, bool force);
-int osc_lru_reclaim(struct client_obd *cli);
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ long target, bool force);
+long osc_lru_reclaim(struct client_obd *cli);
extern spinlock_t osc_ast_guard;
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
struct osc_object *osc = cl2osc(ios->cis_obj);
struct client_obd *cli = osc_cli(osc);
unsigned long c;
- unsigned int npages;
- unsigned int max_pages;
+ unsigned long npages;
+ unsigned long max_pages;
ENTRY;
if (cl_io_is_append(io))
if (npages > max_pages)
npages = max_pages;
- c = atomic_read(cli->cl_lru_left);
+ c = atomic_long_read(cli->cl_lru_left);
if (c < npages && osc_lru_reclaim(cli) > 0)
- c = atomic_read(cli->cl_lru_left);
+ c = atomic_long_read(cli->cl_lru_left);
while (c >= npages) {
- if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ if (c == atomic_long_cmpxchg(cli->cl_lru_left, c, c - npages)) {
oio->oi_lru_reserved = npages;
break;
}
- c = atomic_read(cli->cl_lru_left);
+ c = atomic_long_read(cli->cl_lru_left);
}
RETURN(0);
struct client_obd *cli = osc_cli(osc);
if (oio->oi_lru_reserved > 0) {
- atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
+ atomic_long_add(oio->oi_lru_reserved, cli->cl_lru_left);
oio->oi_lru_reserved = 0;
}
}
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
- int pages = atomic_read(&cli->cl_lru_in_list);
+ long pages = atomic_long_read(&cli->cl_lru_in_list);
unsigned long budget;
budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain faireness among OSCs. */
- if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
+ if (atomic_long_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
if (pages >= budget)
return lru_shrink_max;
else if (pages >= budget / 2)
{
struct list_head lru = LIST_HEAD_INIT(lru);
struct osc_async_page *oap;
- int npages = 0;
+ long npages = 0;
list_for_each_entry(oap, plist, oap_pending_item) {
struct osc_page *opg = oap2osc_page(oap);
if (npages > 0) {
client_obd_list_lock(&cli->cl_lru_list_lock);
list_splice_tail(&lru, &cli->cl_lru_list);
- atomic_sub(npages, &cli->cl_lru_busy);
- atomic_add(npages, &cli->cl_lru_in_list);
+ atomic_long_sub(npages, &cli->cl_lru_busy);
+ atomic_long_add(npages, &cli->cl_lru_in_list);
client_obd_list_unlock(&cli->cl_lru_list_lock);
/* XXX: May set force to be true for better performance */
static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
{
- LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) > 0);
list_del_init(&opg->ops_lru);
- atomic_dec(&cli->cl_lru_in_list);
+ atomic_long_dec(&cli->cl_lru_in_list);
}
/**
if (!list_empty(&opg->ops_lru)) {
__osc_lru_del(cli, opg);
} else {
- LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
- atomic_dec(&cli->cl_lru_busy);
+ LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
+ atomic_long_dec(&cli->cl_lru_busy);
}
client_obd_list_unlock(&cli->cl_lru_list_lock);
- atomic_inc(cli->cl_lru_left);
+ atomic_long_inc(cli->cl_lru_left);
/* this is a great place to release more LRU pages if
* this osc occupies too many LRU pages and kernel is
* stealing one of them. */
client_obd_list_lock(&cli->cl_lru_list_lock);
__osc_lru_del(cli, opg);
client_obd_list_unlock(&cli->cl_lru_list_lock);
- atomic_inc(&cli->cl_lru_busy);
+ atomic_long_inc(&cli->cl_lru_busy);
}
}
/**
* Drop @target of pages from LRU at most.
*/
-int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
- int target, bool force)
+long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ long target, bool force)
{
struct cl_io *io;
struct cl_object *clobj = NULL;
struct cl_page **pvec;
struct osc_page *opg;
+ long count = 0;
int maxscan = 0;
- int count = 0;
int index = 0;
int rc = 0;
ENTRY;
- LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
- if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
+ LASSERT(atomic_long_read(&cli->cl_lru_in_list) >= 0);
+ if (atomic_long_read(&cli->cl_lru_in_list) == 0 || target <= 0)
RETURN(0);
if (!force) {
io = &osc_env_info(env)->oti_io;
client_obd_list_lock(&cli->cl_lru_list_lock);
- maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
+ maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
while (!list_empty(&cli->cl_lru_list)) {
struct cl_page *page;
bool will_free = false;
atomic_dec(&cli->cl_lru_shrinkers);
if (count > 0) {
- atomic_add(count, cli->cl_lru_left);
+ atomic_long_add(count, cli->cl_lru_left);
wake_up_all(&osc_lru_waitq);
}
RETURN(count > 0 ? count : rc);
}
-static inline int max_to_shrink(struct client_obd *cli)
-{
- return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
-}
-
-int osc_lru_reclaim(struct client_obd *cli)
+long osc_lru_reclaim(struct client_obd *cli)
{
struct cl_env_nest nest;
struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
+ long rc = 0;
int max_scans;
- int rc = 0;
ENTRY;
LASSERT(cache != NULL);
if (rc == -EBUSY)
rc = 0;
- CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
+ CDEBUG(D_CACHE, "%s: Free %ld pages from own LRU: %p.\n",
cli->cl_import->imp_obd->obd_name, rc, cli);
GOTO(out, rc);
}
- CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
+ CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %ld, busy: %ld.\n",
cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen. */
cli = list_entry(cache->ccc_lru.next, struct client_obd,
cl_lru_osc);
- CDEBUG(D_CACHE, "%s: cli %p LRU pages: %d, busy: %d.\n",
+ CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
cli->cl_import->imp_obd->obd_name, cli,
- atomic_read(&cli->cl_lru_in_list),
- atomic_read(&cli->cl_lru_busy));
+ atomic_long_read(&cli->cl_lru_in_list),
+ atomic_long_read(&cli->cl_lru_busy));
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
if (osc_cache_too_much(cli) > 0) {
out:
cl_env_nested_put(&nest, env);
- CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
+ CDEBUG(D_CACHE, "%s: cli %p freed %ld pages.\n",
cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
}
goto out;
}
- LASSERT(atomic_read(cli->cl_lru_left) >= 0);
- while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ LASSERT(atomic_long_read(cli->cl_lru_left) >= 0);
+ while (!atomic_long_add_unless(cli->cl_lru_left, -1, 0)) {
/* run out of LRU spaces, try to drop some by itself */
rc = osc_lru_reclaim(cli);
cond_resched();
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0,
+ atomic_long_read(cli->cl_lru_left) > 0,
&lwi);
if (rc < 0)
break;
out:
if (rc >= 0) {
- atomic_inc(&cli->cl_lru_busy);
+ atomic_long_inc(&cli->cl_lru_busy);
opg->ops_in_lru = 1;
rc = 0;
}
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
int page_count = desc->bd_iov_count;
- int unstable_count;
+ long unstable_count;
LASSERT(page_count >= 0);
dec_unstable_page_accounting(desc);
- unstable_count = atomic_sub_return(page_count, &cli->cl_unstable_count);
+ unstable_count = atomic_long_sub_return(page_count,
+ &cli->cl_unstable_count);
LASSERT(unstable_count >= 0);
- unstable_count = atomic_sub_return(page_count,
+ unstable_count = atomic_long_sub_return(page_count,
&cli->cl_cache->ccc_unstable_nr);
LASSERT(unstable_count >= 0);
if (unstable_count == 0)
{
struct ptlrpc_bulk_desc *desc = req->rq_bulk;
struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
- obd_count page_count = desc->bd_iov_count;
+ long page_count = desc->bd_iov_count;
/* No unstable page tracking */
if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
return;
add_unstable_page_accounting(desc);
- atomic_add(page_count, &cli->cl_unstable_count);
- atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+ atomic_long_add(page_count, &cli->cl_unstable_count);
+ atomic_long_add(page_count, &cli->cl_cache->ccc_unstable_nr);
/* If the request has already been committed (i.e. brw_commit
* called via rq_commit_cb), we need to undo the unstable page
if (cli->cl_cache == NULL || !cli->cl_cache->ccc_unstable_check)
return false;
- osc_unstable_count = atomic_read(&cli->cl_unstable_count);
- unstable_nr = atomic_read(&cli->cl_cache->ccc_unstable_nr);
+ osc_unstable_count = atomic_long_read(&cli->cl_unstable_count);
+ unstable_nr = atomic_long_read(&cli->cl_cache->ccc_unstable_nr);
CDEBUG(D_CACHE,
"%s: cli: %p unstable pages: %lu, osc unstable pages: %lu\n",
cli->cl_dirty_pages, cli->cl_dirty_transit,
cli->cl_dirty_max_pages);
oa->o_undirty = 0;
- } else if (unlikely(atomic_read(&obd_dirty_pages) -
- atomic_read(&obd_dirty_transit_pages) >
- (long)(obd_max_dirty_pages + 1))) {
+ } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
+ atomic_long_read(&obd_dirty_transit_pages) >
+ (obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1). */
- CERROR("%s: dirty %d - %d > system dirty_max %d\n",
+ CERROR("%s: dirty %ld - %ld > system dirty_max %lu\n",
cli->cl_import->imp_obd->obd_name,
- atomic_read(&obd_dirty_pages),
- atomic_read(&obd_dirty_transit_pages),
+ atomic_long_read(&obd_dirty_pages),
+ atomic_long_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
oa->o_undirty = 0;
} else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
struct client_obd *cli = &obd->u.cli;
- int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
- int target = *(int *)val;
+ long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
+ long target = *(long *)val;
nr = osc_lru_shrink(env, cli, min(nr, target), true);
- *(int *)val -= nr;
+ *(long *)val -= nr;
RETURN(0);
}