* ll_ra_count_get at the exactly same time. All of them will get a zero ra
* window, although the global window is 100M. -jay
*/
-static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len)
+static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria,
+ unsigned long len)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
unsigned long ret;
* otherwise it will form small read RPC(< 1M), which hurt server
* performance a lot.
*/
+ if (ra->ra_max_pages < atomic_read(&ra->ra_cur_pages))
+ GOTO(out, ret = 0);
+
ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len);
if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len))
GOTO(out, ret = 0);
+ if (ria->ria_pages == 0)
+ /* it needs 1M align again after trimed by ra_max_pages*/
+ if (ret >= ((ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES))
+ ret -= (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES;
+
if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) {
cfs_atomic_sub(ret, &ra->ra_cur_pages);
ret = 0;
}
+
out:
RETURN(ret);
}
if (len == 0)
RETURN(0);
- reserved = ll_ra_count_get(ll_i2sbi(inode), len);
-
+ reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
if (reserved < len)
ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
ENTRY;
- if (sbi->ll_ra_info.ra_max_pages_per_file)
+ if (sbi->ll_ra_info.ra_max_pages_per_file &&
+ sbi->ll_ra_info.ra_max_pages)
ras_update(sbi, inode, ras, page->cp_index,
cp->cpg_defer_uptodate);
* this will unlock it automatically as part of cl_page_list_disown().
*/
cl_2queue_add(queue, page);
- if (sbi->ll_ra_info.ra_max_pages_per_file)
+ if (sbi->ll_ra_info.ra_max_pages_per_file &&
+ sbi->ll_ra_info.ra_max_pages)
ll_readahead(env, io, ras,
vmpage->mapping, &queue->c2_qin, fd->fd_flags);