Whamcloud - gitweb
LU-12142 readahead: limit over reservation 60/42060/5
authorWang Shilong <wshilong@ddn.com>
Wed, 17 Mar 2021 09:58:00 +0000 (17:58 +0800)
committerOleg Drokin <green@whamcloud.com>
Tue, 6 Apr 2021 03:01:47 +0000 (03:01 +0000)
For performance reason, exceeding @ra_max_pages are allowed to
cover current read window, but this should be limited with RPC
size in case a large block size read issued. Trim to RPC boundary.

Otherwise, too many read ahead pages might be issued and
make client short of LRU pages.

Fixes: 777b04a093 ("LU-13386 llite: allow current readahead to exceed reservation"
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Change-Id: Icf74b5fbc75cf836fedcad5184fcdf45c7b037b4
Reviewed-on: https://review.whamcloud.com/42060
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Tested-by: jenkins <devops@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/llite/lproc_llite.c
lustre/llite/rw.c

index 73f5e1a..23b67d4 100644 (file)
@@ -458,6 +458,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
        struct super_block     *sb    = m->private;
        struct ll_sb_info      *sbi   = ll_s2sbi(sb);
        struct cl_client_cache *cache = sbi->ll_cache;
        struct super_block     *sb    = m->private;
        struct ll_sb_info      *sbi   = ll_s2sbi(sb);
        struct cl_client_cache *cache = sbi->ll_cache;
+       struct ll_ra_info *ra = &sbi->ll_ra_info;
        long max_cached_mb;
        long unused_mb;
 
        long max_cached_mb;
        long unused_mb;
 
@@ -465,16 +466,21 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
        max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
        unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
        mutex_unlock(&cache->ccc_max_cache_mb_lock);
        max_cached_mb = PAGES_TO_MiB(cache->ccc_lru_max);
        unused_mb = PAGES_TO_MiB(atomic_long_read(&cache->ccc_lru_left));
        mutex_unlock(&cache->ccc_max_cache_mb_lock);
+
        seq_printf(m, "users: %d\n"
                      "max_cached_mb: %ld\n"
                      "used_mb: %ld\n"
                      "unused_mb: %ld\n"
        seq_printf(m, "users: %d\n"
                      "max_cached_mb: %ld\n"
                      "used_mb: %ld\n"
                      "unused_mb: %ld\n"
-                     "reclaim_count: %u\n",
+                     "reclaim_count: %u\n"
+                     "max_read_ahead_mb: %lu\n"
+                     "used_read_ahead_mb: %d\n",
                   atomic_read(&cache->ccc_users),
                   max_cached_mb,
                   max_cached_mb - unused_mb,
                   unused_mb,
                   atomic_read(&cache->ccc_users),
                   max_cached_mb,
                   max_cached_mb - unused_mb,
                   unused_mb,
-                  cache->ccc_lru_shrinkers);
+                  cache->ccc_lru_shrinkers,
+                  PAGES_TO_MiB(ra->ra_max_pages),
+                  PAGES_TO_MiB(atomic_read(&ra->ra_cur_pages)));
        return 0;
 }
 
        return 0;
 }
 
index 9202524..743e2c1 100644 (file)
@@ -777,6 +777,14 @@ static int ll_readahead(const struct lu_env *env, struct cl_io *io,
                        vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
                pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
                                ria->ria_start_idx;
                        vio->vui_ra_start_idx + vio->vui_ra_pages - 1;
                pages_min = vio->vui_ra_start_idx + vio->vui_ra_pages -
                                ria->ria_start_idx;
+                /**
+                 * For performance reason, exceeding @ra_max_pages
+                 * are allowed, but this should be limited with RPC
+                 * size in case a large block size read issued. Trim
+                 * to RPC boundary.
+                 */
+               pages_min = min(pages_min, ras->ras_rpc_pages -
+                               (ria->ria_start_idx % ras->ras_rpc_pages));
        }
 
        /* don't over reserved for mmap range read */
        }
 
        /* don't over reserved for mmap range read */