Whamcloud - gitweb
LU-14616 readahead: fix reserving for unaliged read
authorWang Shilong <wshilong@ddn.com>
Tue, 20 Apr 2021 01:47:25 +0000 (09:47 +0800)
committerAndreas Dilger <adilger@whamcloud.com>
Thu, 15 Jul 2021 08:56:59 +0000 (08:56 +0000)
If read is [2K, 3K] on x86 platform, we only need
read one page, but it was calculated as 2 pages.

This could be problem, as we need reserve more
pages credits, vvp_page_completion_read() will only
free actual reading pages, which cause @ra_cur_pages
leaked.

Lustre-change: https://review.whamcloud.com/43377/
Lustre-commit: 5e7e9240d27a4b74127ea7a26d910ae41a6e1cb1

Fixes: d4a54de84c0 ("LU-12367 llite: Fix page count for unaligned reads")
Signed-off-by: Wang Shilong <wshilong@ddn.com>
Change-Id: I3cf03965196c1af833184d9cfc16779f79f5722c
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Bobi Jam <bobijam@hotmail.com>
Reviewed-on: https://review.whamcloud.com/44239
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
lustre/llite/rw.c
lustre/llite/vvp_io.c
lustre/tests/sanity.sh

index 15eb9ff..30d3207 100644 (file)
@@ -95,6 +95,13 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
         * LRU pages, otherwise, it could cause deadlock.
         */
        pages = min(sbi->ll_cache->ccc_lru_max >> 2, pages);
+       /**
+        * if this happen, we reserve more pages than needed,
+        * this will make us leak @ra_cur_pages, because
+        * ll_ra_count_put() acutally freed @pages.
+        */
+       if (WARN_ON_ONCE(pages_min > pages))
+               pages_min = pages;
 
        /*
         * If read-ahead pages left are less than 1M, do not do read-ahead,
index 5879fc4..b7062b8 100644 (file)
@@ -804,6 +804,7 @@ static int vvp_io_read_start(const struct lu_env *env,
        int exceed = 0;
        int result;
        struct iov_iter iter;
+       pgoff_t page_offset;
 
        ENTRY;
 
@@ -848,14 +849,20 @@ static int vvp_io_read_start(const struct lu_env *env,
        if (!vio->vui_ra_valid) {
                vio->vui_ra_valid = true;
                vio->vui_ra_start_idx = cl_index(obj, pos);
-               vio->vui_ra_pages = cl_index(obj, tot + PAGE_SIZE - 1);
-               /* If both start and end are unaligned, we read one more page
-                * than the index math suggests. */
-               if ((pos & ~PAGE_MASK) != 0 && ((pos + tot) & ~PAGE_MASK) != 0)
+               vio->vui_ra_pages = 0;
+               page_offset = pos & ~PAGE_MASK;
+               if (page_offset) {
                        vio->vui_ra_pages++;
+                       if (tot > PAGE_SIZE - page_offset)
+                               tot -= (PAGE_SIZE - page_offset);
+                       else
+                               tot = 0;
+               }
+               vio->vui_ra_pages += (tot + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
                CDEBUG(D_READA, "tot %zu, ra_start %lu, ra_count %lu\n",
-                      tot, vio->vui_ra_start_idx, vio->vui_ra_pages);
+                      vio->vui_tot_count, vio->vui_ra_start_idx,
+                      vio->vui_ra_pages);
        }
 
        /* BUG: 5972 */
index fb59afb..1b7506b 100755 (executable)
@@ -9651,6 +9651,8 @@ test_101e() {
                dd if=$file.$i of=/dev/null bs=$bsize count=$size_KB 2>/dev/null
        done
 
+       $LCTL get_param llite.*.max_cached_mb
+       $LCTL get_param llite.*.read_ahead_stats
        local miss=$($LCTL get_param -n llite.*.read_ahead_stats |
                     get_named_value 'misses' | cut -d" " -f1 | calc_total)