Whamcloud - gitweb
LU-14616 readahead: fix reserving for unaliged read 77/43377/7
authorWang Shilong <wshilong@ddn.com>
Tue, 20 Apr 2021 01:47:25 +0000 (09:47 +0800)
committerOleg Drokin <green@whamcloud.com>
Wed, 5 May 2021 02:50:57 +0000 (02:50 +0000)
If read is [2K, 3K] on x86 platform, we only need
read one page, but it was calculated as 2 pages.

This could be problem, as we need reserve more
pages credits, vvp_page_completion_read() will only
free actual reading pages, which cause @ra_cur_pages
leaked.

Fixes: d4a54de84c0 ("LU-12367 llite: Fix page count for unaligned reads")
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Change-Id: I3cf03965196c1af833184d9cfc16779f79f5722c
Reviewed-on: https://review.whamcloud.com/43377
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/llite/rw.c
lustre/llite/vvp_io.c
lustre/tests/sanity.sh

index 126c583..3f16b3e 100644 (file)
@@ -94,6 +94,13 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
         * LRU pages, otherwise, it could cause deadlock.
         */
        pages = min(sbi->ll_cache->ccc_lru_max >> 2, pages);
+       /**
+        * if this happen, we reserve more pages than needed,
+        * this will make us leak @ra_cur_pages, because
+        * ll_ra_count_put() acutally freed @pages.
+        */
+       if (WARN_ON_ONCE(pages_min > pages))
+               pages_min = pages;
 
        /*
         * If read-ahead pages left are less than 1M, do not do read-ahead,
index c001d85..b014b4f 100644 (file)
@@ -813,6 +813,7 @@ static int vvp_io_read_start(const struct lu_env *env,
        int exceed = 0;
        int result;
        struct iov_iter iter;
+       pgoff_t page_offset;
 
        ENTRY;
 
@@ -857,14 +858,20 @@ static int vvp_io_read_start(const struct lu_env *env,
        if (!vio->vui_ra_valid) {
                vio->vui_ra_valid = true;
                vio->vui_ra_start_idx = cl_index(obj, pos);
-               vio->vui_ra_pages = cl_index(obj, tot + PAGE_SIZE - 1);
-               /* If both start and end are unaligned, we read one more page
-                * than the index math suggests. */
-               if ((pos & ~PAGE_MASK) != 0 && ((pos + tot) & ~PAGE_MASK) != 0)
+               vio->vui_ra_pages = 0;
+               page_offset = pos & ~PAGE_MASK;
+               if (page_offset) {
                        vio->vui_ra_pages++;
+                       if (tot > PAGE_SIZE - page_offset)
+                               tot -= (PAGE_SIZE - page_offset);
+                       else
+                               tot = 0;
+               }
+               vio->vui_ra_pages += (tot + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
                CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
-                      tot, vio->vui_ra_start_idx, vio->vui_ra_pages);
+                      vio->vui_tot_count, vio->vui_ra_start_idx,
+                      vio->vui_ra_pages);
        }
 
        /* BUG: 5972 */
index 3b16190..cd00e69 100755 (executable)
@@ -9885,6 +9885,8 @@ test_101e() {
                dd if=$file.$i of=/dev/null bs=$bsize count=$size_KB 2>/dev/null
        done
 
+       $LCTL get_param llite.*.max_cached_mb
+       $LCTL get_param llite.*.read_ahead_stats
        local miss=$($LCTL get_param -n llite.*.read_ahead_stats |
                     get_named_value 'misses' | calc_total)