From 1058867c004bf19774218945631a691e8210b502 Mon Sep 17 00:00:00 2001 From: Wang Shilong Date: Wed, 17 Mar 2021 17:58:00 +0800 Subject: [PATCH 1/1] LU-12142 readahead: limit over reservation For performance reason, exceeding @ra_max_pages are allowed to cover current read window, but this should be limited with RPC size in case a large block size read issued. Trim to RPC boundary. Otherwise, too many read ahead pages might be issued and make client short of LRU pages. Fixes: 777b04a093 ("LU-13386 llite: allow current readahead to exceed reservation" Signed-off-by: Wang Shilong Change-Id: Icf74b5fbc75cf836fedcad5184fcdf45c7b037b4 Reviewed-on: https://review.whamcloud.com/42060 Reviewed-by: Andreas Dilger Tested-by: jenkins Reviewed-by: Bobi Jam Tested-by: Maloo Reviewed-by: Oleg Drokin --- lustre/llite/lproc_llite.c | 10 ++++++++-- lustre/llite/rw.c | 8 ++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/lustre/llite/lproc_llite.c b/lustre/llite/lproc_llite.c index 73f5e1a..23b67d4 100644 --- a/lustre/llite/lproc_llite.c +++ b/lustre/llite/lproc_llite.c @@ -458,6 +458,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v) struct super_block *sb = m->private; struct ll_sb_info *sbi = ll_s2sbi(sb); struct cl_client_cache *cache = sbi->ll_cache; + struct ll_ra_info *ra = &sbi->ll_ra_info; long max_cached_mb; long unused_mb; @@ -465,16 +466,21 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v) max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max); unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left)); mutex_unlock(&cache->ccc_max_cache_mb_lock); + seq_printf(m, "users: %d\n" "max_cached_mb: %ld\n" "used_mb: %ld\n" "unused_mb: %ld\n" - "reclaim_count: %u\n", + "reclaim_count: %u\n" + "max_read_ahead_mb: %lu\n" + "used_read_ahead_mb: %d\n", atomic_read(&cache->ccc_users), max_cached_mb, max_cached_mb - unused_mb, unused_mb, - cache->ccc_lru_shrinkers); + cache->ccc_lru_shrinkers, + PAGES_TO_MiB(ra->ra_max_pages), + PAGES_TO_MiB(atomic_read(&ra->ra_cur_pages))); return 0; } diff --git a/lustre/llite/rw.c b/lustre/llite/rw.c index 9202524..743e2c1 100644 --- a/lustre/llite/rw.c +++ b/lustre/llite/rw.c @@ -777,6 +777,14 @@ static int ll_readahead(const struct lu_env *env, struct cl_io *io, vio->vui_ra_start_idx + vio->vui_ra_pages - 1; pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages - ria->ria_start_idx; + /** + * For performance reason, exceeding @ra_max_pages + * are allowed, but this should be limited with RPC + * size in case a large block size read issued. Trim + * to RPC boundary. + */ + pages_min = min(pages_min, ras->ras_rpc_pages - + (ria->ria_start_idx % ras->ras_rpc_pages)); } /* don't over reserved for mmap range read */ -- 1.8.3.1