From 994e7fd8a7988e31af11e4a0fc2eb17e61a9941f Mon Sep 17 00:00:00 2001 From: Artem Blagodarenko Date: Sun, 28 Jan 2024 20:24:31 +0000 Subject: [PATCH] EX-8598 tests: use alternative data source for rewriting Using the same file as input has disadvantages. It is not possible to understand that data was not rewritten at all. Alternative data source should be used. Let's shift source file data and use it as a source. To check rewriting result the same operarion is performed on the destination file copy stored outside the Lustre FS. Signed-off-by: Artem Blagodarenko Test-Parameters: trivial testlist=sanity-compr env=ONLY=1004 Change-Id: I6ef400520359bfe9156c3f47e757064863bdf4e0 Reviewed-on: https://review.whamcloud.com/c/ex/lustre-release/+/53088 Reviewed-by: Andreas Dilger Reviewed-by: Jian Yu Tested-by: jenkins Tested-by: Maloo --- lustre/tests/sanity-compr.sh | 54 ++++++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/lustre/tests/sanity-compr.sh b/lustre/tests/sanity-compr.sh index 75a539c..06fa48d 100644 --- a/lustre/tests/sanity-compr.sh +++ b/lustre/tests/sanity-compr.sh @@ -608,6 +608,8 @@ test_1004() { local hdf=$LUSTRE/tests/AMSR_E_L3_DailyOcean_V05_20111003.hdf local tmp_hdf=$TMP/$tfile.hdf local source=$tmp_hdf + local cpy_hdf=$TMP/$tfile.cpy.hdf + local source=$cpy_hdf # Larger than arm page size local chunksize=128 @@ -621,9 +623,13 @@ test_1004() { return 0 fi + # Use shifted hdf as source of data + dd if=$tmp_hdf of=$cpy_hdf seek=7k || + error "failed creating an alternative data source" + # Fail test if source size changes so we catch this # Source should be a few MiB in size - $CHECKSTAT -s 14625450 $source || error "checkstat wrong size" + $CHECKSTAT -s 14625450 $tmp_hdf || error "checkstat wrong size" stack_trap "rm -f $tf; disable_compression" enable_compression @@ -633,47 +639,61 @@ test_1004() { error "set a compress component in $tf failed" # Create file and verify - trivial - dd if=$source bs=${chunksize}K of=$tf || error "(0) dd failed" - flush_and_compare $source $tf "(1)" + dd if=$tmp_hdf bs=${chunksize}K of=$tf || error "(0) dd failed" + flush_and_compare $tmp_hdf $tf "(1)" # Do a single 4K write - this will require a read-modify-write because # it is less than chunk size - dd if=$source bs=4K of=$tf count=1 conv=notrunc || error "(2) dd failed" - flush_and_compare $source $tf "(3)" + dd if=$source bs=4K of=$tf count=1 conv=notrunc || error "(2.1) dd failed" + dd if=$source bs=4K of=$tmp_hdf count=1 conv=notrunc || + error "(2.2) dd failed" + flush_and_compare $tmp_hdf $tf "(3)" # A single write of > chunk_size but < 2 chunks dd if=$source bs=$((chunksize * 3/2))K of=$tf count=1 conv=notrunc || - error "(4) dd failed" - flush_and_compare $source $tf "(5)" + error "(4.1) dd failed" + dd if=$source bs=$((chunksize * 3/2))K of=$tmp_hdf count=1 \ + conv=notrunc || error "(4.2) dd failed" + flush_and_compare $tmp_hdf $tf "(5)" # Same test but offset slightly in to the file dd if=$source seek=1 skip=1 bs=$((chunksize * 3/2))K of=$tf count=1 conv=notrunc || - error "(6) dd failed" - flush_and_compare $source $tf "(7)" + error "(6.1) dd failed" + dd if=$source seek=1 skip=1 bs=$((chunksize * 3/2))K of=$tmp_hdf \ + count=1 conv=notrunc || error "(6.2) dd failed" + flush_and_compare $tmp_hdf $tf "(7)" # Later tests use fsync to force read-modify-write # larger dd size with fsync (writing 4K at a time with fsync is slow) dd if=$source bs=$((chunksize / 2)) of=$tf conv=fsync,notrunc || - error "(8) dd failed" - flush_and_compare $source $tf "(9)" + error "(8.1) dd failed" + dd if=$source bs=$((chunksize / 2)) of=$tmp_hdf conv=fsync,notrunc || + error "(8.2) dd failed" + flush_and_compare $tmp_hdf $tf "(9)" # Larger than chunk size dd if=$source bs=$((chunksize * 3/2)) of=$tf conv=fsync,notrunc || - error "(10) dd failed" - flush_and_compare $source $tf "(11)" + error "(10.1) dd failed" + dd if=$source bs=$((chunksize * 3/2)) of=$tmp_hdf conv=fsync,notrunc || + error "(10.2) dd failed" + flush_and_compare $tmp_hdf $tf "(11)" # The above test is full pages on x86, sometimes partial on ARM # This will explicitly test readup of partial chunks as part of a write # This writes one full page, then part of the next page # This forces a partial page read before we can do the write dd if=$source bs=$((PAGE_SIZE * 2 - 1024)) of=$tf conv=notrunc || - error "(12) dd failed" - flush_and_compare $source $tf "(13)" + error "(12.1) dd failed" + dd if=$source bs=$((PAGE_SIZE * 2 - 1024)) of=$tmp_hdf conv=notrunc || + error "(12.2) dd failed" + flush_and_compare $tmp_hdf $tf "(13)" # Do the same test at an offset in the file dd if=$source seek=4 skip=4 bs=$((PAGE_SIZE * 2 - 1024)) of=$tf conv=notrunc || - error "(14) dd failed" - flush_and_compare $source $tf "(15)" + error "(14.1) dd failed" + dd if=$source seek=4 skip=4 bs=$((PAGE_SIZE * 2 - 1024)) of=$tmp_hdf conv=notrunc || + error "(14.2) dd failed" + flush_and_compare $tmp_hdf $tf "(15)" } run_test 1004 "initial test for write updating" -- 1.8.3.1