skip "Need MDS version at least 2.14.0.91"
local tf=$DIR/$tfile
- # We read from $tfile to this
- local tf_copy=$DIR/$tfile.copy
# Larger than arm page size
local chunksize=128
local read_ahead_mb
}
run_test 1003 "mmap test for compression"
+test_1004() {
+ (( MDS1_VERSION >= $(version_code 2.14.0.91) )) ||
+ skip "Need MDS version at least 2.14.0.91"
+
+ local tf=$DIR/$tfile
+ local hdf=$LUSTRE/tests/AMSR_E_L3_DailyOcean_V05_20111003.hdf
+ local tmp_hdf=$TMP/$tfile.hdf
+ local source=$tmp_hdf
+ # Larger than arm page size
+ local chunksize=128
+
+ if [[ -f $hdf.bz2 ]] && type -p bzcat >/dev/null; then
+ bzcat $hdf.bz2 > $tmp_hdf
+ elif [[ -f $hdf.bz2 ]] && type -p bunzip2 >/dev/null; then
+ cp $hdf.bz2 $tmp_hdf.bz2 || error "cp $tmp_hdf.bz2"
+ bunzip2 $tmp_hdf.bz2 || error "bunzip2 $tmp_hdf.bz2"
+ else
+ echo "bunzip2 is not installed, skip it"
+ return 0
+ fi
+
+ # Fail test if source size changes so we catch this
+ # Source should be a few MiB in size
+ $CHECKSTAT -s 14625450 $source || error "checkstat wrong size"
+
+ stack_trap "rm -f $tf; disable_compression"
+ enable_compression
+
+ # Simple compressed layout
+ $LFS setstripe -E -1 -Z lz4:0 --compress-chunk=$chunksize $tf ||
+ error "set a compress component in $tf failed"
+
+ # Create file and verify - trivial
+ dd if=$source bs=${chunksize}K of=$tf || error "(0) dd failed"
+ flush_and_compare $source $tf "(1)"
+
+ # Do a single 4K write - this will require a read-modify-write because
+ # it is less than chunk size
+ dd if=$source bs=4K of=$tf count=1 conv=notrunc || error "(2) dd failed"
+ flush_and_compare $source $tf "(3)"
+
+ # A single write of > chunk_size but < 2 chunks
+ dd if=$source bs=$((chunksize * 3/2))K of=$tf count=1 conv=notrunc ||
+ error "(4) dd failed"
+ flush_and_compare $source $tf "(5)"
+
+ # Same test but offset slightly in to the file
+ dd if=$source seek=1 skip=1 bs=$((chunksize * 3/2))K of=$tf count=1 conv=notrunc ||
+ error "(6) dd failed"
+ flush_and_compare $source $tf "(7)"
+
+ # Later tests use fsync to force read-modify-write
+ # larger dd size with fsync (writing 4K at a time with fsync is slow)
+ dd if=$source bs=$((chunksize / 2)) of=$tf conv=fsync,notrunc ||
+ error "(8) dd failed"
+ flush_and_compare $source $tf "(9)"
+
+ # Larger than chunk size
+ dd if=$source bs=$((chunksize * 3/2)) of=$tf conv=fsync,notrunc ||
+ error "(10) dd failed"
+ flush_and_compare $source $tf "(11)"
+
+ # The above test is full pages on x86, sometimes partial on ARM
+ # This will explicitly test readup of partial chunks as part of a write
+ # This writes one full page, then part of the next page
+ # This forces a partial page read before we can do the write
+ dd if=$source bs=$((PAGE_SIZE * 2 - 1024)) of=$tf conv=notrunc ||
+ error "(12) dd failed"
+ flush_and_compare $source $tf "(13)"
+
+ # Do the same test at an offset in the file
+ dd if=$source seek=4 skip=4 bs=$((PAGE_SIZE * 2 - 1024)) of=$tf conv=notrunc ||
+ error "(14) dd failed"
+ flush_and_compare $source $tf "(15)"
+}
+run_test 1004 "initial test for write updating"
+
+test_1005() {
+ (( MDS1_VERSION >= $(version_code 2.14.0.91) )) ||
+ skip "Need MDS version at least 2.14.0.91"
+
+ local tf=$DIR/$tfile
+ # We read from $tfile to this
+ # Larger than arm page size
+ local chunksize=128
+ local read_ahead_mb
+ local hdf=$LUSTRE/tests/AMSR_E_L3_DailyOcean_V05_20111003.hdf
+ local tmp_hdf=$TMP/$tfile.hdf
+ local source=$tmp_hdf
+
+ if [[ -f $hdf.bz2 ]] && type -p bzcat >/dev/null; then
+ bzcat $hdf.bz2 > $tmp_hdf
+ elif [[ -f $hdf.bz2 ]] && type -p bunzip2 >/dev/null; then
+ cp $hdf.bz2 $tmp_hdf.bz2 || error "cp $tmp_hdf.bz2"
+ bunzip2 $tmp_hdf.bz2 || error "bunzip2 $tmp_hdf.bz2"
+ else
+ echo "bunzip2 is not installed, skip it"
+ return 0
+ fi
+
+ # Fail test if source size changes so we catch this
+ # Source should be a few MiB in size
+ $CHECKSTAT -s 14625450 $source || error "checkstat wrong size"
+
+ stack_trap "rm -f $tf; disable_compression"
+ enable_compression
+
+ # Disable readahead so reads are not expanded to full chinks
+ $LCTL set_param osc.*.rpc_stats=c
+ read_ahead_mb=$($LCTL get_param -n llite.*.max_read_ahead_mb)
+ $LCTL set_param llite.*.max_read_ahead_mb=0
+ stack_trap "$LCTL set_param llite.*.max_read_ahead_mb=$read_ahead_mb" EXIT
+
+ # Simple compressed layout
+ $LFS setstripe -E -1 -Z lz4:0 --compress-chunk=$chunksize $tf ||
+ error "set a compress component in $tf failed"
+
+ # These tests will deliberately create unusual files, using the sample
+ # hdf5 file as a reference, but cannot compare to it because they
+ # copy only part of it. So all of them will also create an identical
+ # uncompressed file to compare against.
+ # Create the simplest possible example of a file with an incomplete
+ # chunk, just 4K at the beginning (this won't be compressed, but it's
+ # still a valid test)
+ dd if=$source bs=4K of=$tf count=1 || error "(0) dd failed"
+ dd if=$source bs=4K of=$tf.2 count=1 || error "(1) dd failed"
+
+ flush_and_compare $tf $tf.2 "(2)"
+
+ # Now we write adjacent in this first chunk, confirming updating it works
+ dd if=$source bs=4K of=$tf conv=notrunc count=2 seek=1 skip=1 || error "(3) dd failed"
+ dd if=$source bs=4K of=$tf.2 conv=notrunc count=2 seek=1 skip=1 || error "(4) dd failed"
+
+ flush_and_compare $tf $tf.2 "(5)"
+
+ # Now we jump a bit further on and do a write
+ dd if=$source bs=8K of=$tf conv=notrunc count=1 seek=4 skip=4 || error "(6) dd failed"
+ dd if=$source bs=8K of=$tf.2 conv=notrunc count=1 seek=4 skip=4 || error "(7) dd failed"
+ sync
+
+ flush_and_compare $tf $tf.2 "(8)"
+
+ # None of the above was compressed, because the first write was too
+ # small and the later writes weren't chunk aligned. Confirm the server
+ # didn't do any decompression/compression
+ #FIXME: Stats check goes here once available
+
+ # OK, now we truncate the file back to zero and start with a compressed
+ # chunk at the beginning.
+ # 16K - this will be compressed
+ dd if=$source bs=16K of=$tf count=1 || error "(9) dd failed"
+ dd if=$source bs=16K of=$tf.2 count=1 || error "(10) dd failed"
+
+ flush_and_compare $tf $tf.2 "(11)"
+
+ # Now we do the same size of write slightly further down
+ # 16K, in to an existing chunk - leads to read-modify-write
+ dd if=$source bs=16K of=$tf count=1 seek=2 skip=2 conv=notrunc || error "(12) dd failed"
+ dd if=$source bs=16K of=$tf.2 count=1 seek=2 skip=2 conv=notrunc || error "(13) dd failed"
+
+ flush_and_compare $tf $tf.2 "(14)"
+
+ # OK, now we're going to create a complete compressed chunk further
+ # along in the file
+ dd if=$source bs=128K count=1 skip=1 seek=1 conv=notrunc of=$tf ||
+ error "(15) dd failed"
+ dd if=$source bs=128K count=1 skip=1 seek=1 conv=notrunc of=$tf.2 ||
+ error "(16) dd failed"
+
+ flush_and_compare $tf $tf.2 "(17)"
+
+ # Now we're going to write *into* the middle of this compressed chunk
+ dd if=$source bs=32K count=1 skip=5 seek=5 conv=notrunc of=$tf ||
+ error "(18) dd failed"
+ dd if=$source bs=32K count=1 skip=5 seek=5 conv=notrunc of=$tf.2 ||
+ error "(19) dd failed"
+
+ flush_and_compare $tf $tf.2 "(20)"
+
+ # OK, now we're going to add another incomplete chunk after those
+ # two - starting in the third chunk, ie, offset of 256K
+ # This will be compressed
+ dd if=$source bs=64K count=1 skip=4 seek=4 conv=notrunc of=$tf ||
+ error "(21) dd failed"
+ dd if=$source bs=64K count=1 skip=4 seek=4 conv=notrunc of=$tf.2 ||
+ error "(22) dd failed"
+
+ flush_and_compare $tf $tf.2 "(23)"
+
+ # Now we're going to write the second 'half' of this chunk
+ dd if=$source bs=64K count=1 skip=5 seek=5 conv=notrunc of=$tf ||
+ error "(24) dd failed"
+ dd if=$source bs=64K count=1 skip=5 seek=5 conv=notrunc of=$tf.2 ||
+ error "(25) dd failed"
+
+ flush_and_compare $tf $tf.2 "(26)"
+
+ # And now we're going to add a chunk that doesn't start at offset 0,
+ # so it won't be compressed. This is the fourth chunk, so starts at
+ # 384K. So we'll start at 448K
+ dd if=$source bs=64K count=1 skip=7 seek=7 conv=notrunc of=$tf ||
+ error "(27) dd failed"
+ dd if=$source bs=64K count=1 skip=7 seek=7 conv=notrunc of=$tf.2 ||
+ error "(28) dd failed"
+
+ flush_and_compare $tf $tf.2 "(29)"
+
+ # Now we'll write a few KiB in to the middle of the first part of this
+ # chunk - starting at 400K
+ dd if=$source bs=8K count=2 skip=50 seek=50 conv=notrunc of=$tf ||
+ error "(30) dd failed"
+ dd if=$source bs=8K count=2 skip=50 seek=50 conv=notrunc of=$tf.2 ||
+ error "(31) dd failed"
+
+ flush_and_compare $tf $tf.2 "(32)"
+
+ # Now let's skip an entire chunk and do a full chunk at 640K
+ dd if=$source bs=128K count=1 skip=5 seek=5 conv=notrunc of=$tf ||
+ error "(33) dd failed"
+ dd if=$source bs=128K count=1 skip=5 seek=5 conv=notrunc of=$tf.2 ||
+ error "(34) dd failed"
+
+ flush_and_compare $tf $tf.2 "(35)"
+
+ # Then one more partial, but compressible, chunk after that at
+ # 768K
+ dd if=$source bs=64K count=1 skip=12 seek=12 conv=notrunc of=$tf ||
+ error "(36) dd failed"
+ dd if=$source bs=64K count=1 skip=12 seek=12 conv=notrunc of=$tf.2 ||
+ error "(37) dd failed"
+
+ flush_and_compare $tf $tf.2 "(38)"
+
+ # OK, now we have this complex file, let's test reading it at a
+ # number of block sizes to give us unusual offsets
+ echo "copying from compressed file with $bs"
+ for bs in 3 4 7 32 97 128 130 192; do
+ dd if=$tf bs=${bs}K of=$tf.3 ||
+ error "(39) dd with block size ${bs}K failed"
+
+ flush_and_compare $tf $tf.3 "(40)"
+ rm -f $tf.3
+ done
+}
+run_test 1005 "test for write updating with partial chunks"
+
complete_test $SECONDS
check_and_cleanup_lustre
declare -a logs=($ONLY)