* otherwise it will form small read RPC(< 1M), which hurt server
* performance a lot.
*/
- ret = min(ra->ra_max_pages - atomic_read(&ra->ra_cur_pages), len);
+ ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
GOTO(out, ret = 0);
- if (atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
- atomic_sub(ret, &ra->ra_cur_pages);
+ if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
+ cfs_atomic_sub(ret, &ra->ra_cur_pages);
ret = 0;
}
out:
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
- atomic_sub(len, &ra->ra_cur_pages);
+ cfs_atomic_sub(len, &ra->ra_cur_pages);
}
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
ras->ras_requests++;
ras->ras_request_index = 0;
ras->ras_consecutive_requests++;
rar->lrr_reader = current;
- list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- spin_unlock(&ras->ras_lock);
+ cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ cfs_spin_unlock(&ras->ras_lock);
}
void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
- list_del_init(&rar->lrr_linkage);
- spin_unlock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
+ cfs_list_del_init(&rar->lrr_linkage);
+ cfs_spin_unlock(&ras->ras_lock);
}
static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
{
struct ll_ra_read *scan;
- list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
+ cfs_list_for_each_entry(scan, &ras->ras_read_beads, lrr_linkage) {
if (scan->lrr_reader == current)
return scan;
}
ras = ll_ras_get(f);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
bead = ll_ra_read_get_locked(ras);
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
return bead;
}
RETURN(0);
}
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
if (vio->cui_ra_window_set)
bead = &vio->cui_bead;
else
ria->ria_length = ras->ras_stride_length;
ria->ria_pages = ras->ras_stride_pages;
}
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
if (end == 0) {
ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
ra_end, end, ria->ria_end);
if (ra_end != end + 1) {
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&ras->ras_lock);
if (ra_end < ras->ras_next_readahead &&
index_in_window(ra_end, ras->ras_window_start, 0,
ras->ras_window_len)) {
ras->ras_next_readahead = ra_end;
RAS_CDEBUG(ras);
}
- spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&ras->ras_lock);
}
RETURN(ret);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
{
- spin_lock_init(&ras->ras_lock);
+ cfs_spin_lock_init(&ras->ras_lock);
ras_reset(ras, 0);
ras->ras_requests = 0;
- INIT_LIST_HEAD(&ras->ras_read_beads);
+ CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
int zero = 0, stride_detect = 0, ra_miss = 0;
ENTRY;
- spin_lock(&sbi->ll_lock);
- spin_lock(&ras->ras_lock);
+ cfs_spin_lock(&sbi->ll_lock);
+ cfs_spin_lock(&ras->ras_lock);
ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
out_unlock:
RAS_CDEBUG(ras);
ras->ras_request_index++;
- spin_unlock(&ras->ras_lock);
- spin_unlock(&sbi->ll_lock);
+ cfs_spin_unlock(&ras->ras_lock);
+ cfs_spin_unlock(&sbi->ll_lock);
return;
}