From 93b6adce588ab0aa6203f980cdfca35a19887d00 Mon Sep 17 00:00:00 2001 From: yangsheng Date: Thu, 18 Aug 2011 02:43:49 +0800 Subject: [PATCH] LU-15 slow IO with read-intense application Align the readahead extent by 1M after when it is trimed by ra_max_pages. Change-Id: I4102d2fe956fd01457949f0eb7c63654b5c2d095 signed-off-by: Wang Di signed-off-by: Yang Sheng Reviewed-on: http://review.whamcloud.com/1255 Tested-by: Hudson Reviewed-by: Jinshan Xiong Tested-by: Maloo Reviewed-by: Oleg Drokin --- lustre/llite/rw.c | 15 ++++++++++++--- lustre/llite/vvp_io.c | 6 ++++-- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/lustre/llite/rw.c b/lustre/llite/rw.c index d5e012e..f8fab63 100644 --- a/lustre/llite/rw.c +++ b/lustre/llite/rw.c @@ -338,7 +338,8 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which); * ll_ra_count_get at the exactly same time. All of them will get a zero ra * window, although the global window is 100M. -jay */ -static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len) +static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, struct ra_io_arg *ria, + unsigned long len) { struct ll_ra_info *ra = &sbi->ll_ra_info; unsigned long ret; @@ -349,14 +350,23 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len) * otherwise it will form small read RPC(< 1M), which hurt server * performance a lot. */ + if (ra->ra_max_pages < atomic_read(&ra->ra_cur_pages)) + GOTO(out, ret = 0); + ret = min(ra->ra_max_pages - cfs_atomic_read(&ra->ra_cur_pages), len); if ((int)ret < 0 || ret < min((unsigned long)PTLRPC_MAX_BRW_PAGES, len)) GOTO(out, ret = 0); + if (ria->ria_pages == 0) + /* it needs 1M align again after trimed by ra_max_pages*/ + if (ret >= ((ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES)) + ret -= (ria->ria_start + ret) % PTLRPC_MAX_BRW_PAGES; + if (cfs_atomic_add_return(ret, &ra->ra_cur_pages) > ra->ra_max_pages) { cfs_atomic_sub(ret, &ra->ra_cur_pages); ret = 0; } + out: RETURN(ret); } @@ -787,8 +797,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, if (len == 0) RETURN(0); - reserved = ll_ra_count_get(ll_i2sbi(inode), len); - + reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len); if (reserved < len) ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT); diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index cbf02cd..9f92f9e 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -792,7 +792,8 @@ static int vvp_io_read_page(const struct lu_env *env, ENTRY; - if (sbi->ll_ra_info.ra_max_pages_per_file) + if (sbi->ll_ra_info.ra_max_pages_per_file && + sbi->ll_ra_info.ra_max_pages) ras_update(sbi, inode, ras, page->cp_index, cp->cpg_defer_uptodate); @@ -815,7 +816,8 @@ static int vvp_io_read_page(const struct lu_env *env, * this will unlock it automatically as part of cl_page_list_disown(). */ cl_2queue_add(queue, page); - if (sbi->ll_ra_info.ra_max_pages_per_file) + if (sbi->ll_ra_info.ra_max_pages_per_file && + sbi->ll_ra_info.ra_max_pages) ll_readahead(env, io, ras, vmpage->mapping, &queue->c2_qin, fd->fd_flags); -- 1.8.3.1