Whamcloud - gitweb
LU-12142 readahead: limit over reservation
authorWang Shilong <wshilong@ddn.com>
Wed, 17 Mar 2021 09:58:00 +0000 (17:58 +0800)
committerAndreas Dilger <adilger@whamcloud.com>
Mon, 19 Jul 2021 18:05:47 +0000 (18:05 +0000)
For performance reason, exceeding @ra_max_pages are allowed to
cover current read window, but this should be limited with RPC
size in case a large block size read issued. Trim to RPC boundary.

Otherwise, too many read ahead pages might be issued and
make client short of LRU pages.

Lustre-commit: 1058867c004bf19774218945631a691e8210b502
Lustre-change: https://review.whamcloud.com/42060

Fixes: 777b04a093 ("LU-13386 llite: allow current readahead to exceed reservation"
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Change-Id: Icf74b5fbc75cf836fedcad5184fcdf45c7b037b4
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Reviewed-on: https://review.whamcloud.com/43455
Reviewed-by: Wang Shilong <wshilong@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
lustre/llite/lproc_llite.c
lustre/llite/rw.c

index 5ad3d62..094b12f 100644 (file)
@@ -450,6 +450,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
        struct super_block     *sb    = m->private;
        struct ll_sb_info      *sbi   = ll_s2sbi(sb);
        struct cl_client_cache *cache = sbi->ll_cache;
+       struct ll_ra_info *ra = &sbi->ll_ra_info;
        long max_cached_mb;
        long unused_mb;
 
@@ -457,16 +458,21 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
        max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
        unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
        mutex_unlock(&cache->ccc_max_cache_mb_lock);
+
        seq_printf(m, "users: %d\n"
                      "max_cached_mb: %ld\n"
                      "used_mb: %ld\n"
                      "unused_mb: %ld\n"
-                     "reclaim_count: %u\n",
+                     "reclaim_count: %u\n"
+                     "max_read_ahead_mb: %lu\n"
+                     "used_read_ahead_mb: %d\n",
                   atomic_read(&cache->ccc_users),
                   max_cached_mb,
                   max_cached_mb - unused_mb,
                   unused_mb,
-                  cache->ccc_lru_shrinkers);
+                  cache->ccc_lru_shrinkers,
+                  PAGES_TO_MiB(ra->ra_max_pages),
+                  PAGES_TO_MiB(atomic_read(&ra->ra_cur_pages)));
        return 0;
 }
 
index 1e460f0..958d848 100644 (file)
@@ -812,6 +812,14 @@ static int ll_readahead(const struct lu_env *env, struct cl_io *io,
                        vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
                pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
                                ria->ria_start_idx;
+                /**
+                 * For performance reason, exceeding @ra_max_pages
+                 * are allowed, but this should be limited with RPC
+                 * size in case a large block size read issued. Trim
+                 * to RPC boundary.
+                 */
+               pages_min = min(pages_min, ras->ras_rpc_pages -
+                               (ria->ria_start_idx % ras->ras_rpc_pages));
        }
 
        /* don't over reserved for mmap range read */