summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
b4391fc)
For performance reason, exceeding @ra_max_pages are allowed to
cover current read window, but this should be limited with RPC
size in case a large block size read issued. Trim to RPC boundary.
Otherwise, too many read ahead pages might be issued and
make client short of LRU pages.
Fixes:
777b04a093 ("LU-13386 llite: allow current readahead to exceed reservation"
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Change-Id: Icf74b5fbc75cf836fedcad5184fcdf45c7b037b4
Reviewed-on: https://review.whamcloud.com/42060
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = sbi->ll_cache;
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
long max_cached_mb;
long unused_mb;
long max_cached_mb;
long unused_mb;
max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
mutex_unlock(&cache->ccc_max_cache_mb_lock);
max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
mutex_unlock(&cache->ccc_max_cache_mb_lock);
seq_printf(m, "users: %d\n"
"max_cached_mb: %ld\n"
"used_mb: %ld\n"
"unused_mb: %ld\n"
seq_printf(m, "users: %d\n"
"max_cached_mb: %ld\n"
"used_mb: %ld\n"
"unused_mb: %ld\n"
+ "reclaim_count: %u\n"
+ "max_read_ahead_mb: %lu\n"
+ "used_read_ahead_mb: %d\n",
atomic_read(&cache->ccc_users),
max_cached_mb,
max_cached_mb - unused_mb,
unused_mb,
atomic_read(&cache->ccc_users),
max_cached_mb,
max_cached_mb - unused_mb,
unused_mb,
- cache->ccc_lru_shrinkers);
+ cache->ccc_lru_shrinkers,
+ PAGES_TO_MiB(ra->ra_max_pages),
+ PAGES_TO_MiB(atomic_read(&ra->ra_cur_pages)));
vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
ria->ria_start_idx;
vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
ria->ria_start_idx;
+ /**
+ * For performance reason, exceeding @ra_max_pages
+ * are allowed, but this should be limited with RPC
+ * size in case a large block size read issued. Trim
+ * to RPC boundary.
+ */
+ pages_min = min(pages_min, ras->ras_rpc_pages -
+ (ria->ria_start_idx % ras->ras_rpc_pages));
}
/* don't over reserved for mmap range read */
}
/* don't over reserved for mmap range read */