local hdf=$LUSTRE/tests/AMSR_E_L3_DailyOcean_V05_20111003.hdf
local tmp_hdf=$TMP/$tfile.hdf
local source=$tmp_hdf
+ local cpy_hdf=$TMP/$tfile.cpy.hdf
+ local source=$cpy_hdf
# Larger than arm page size
local chunksize=128
return 0
fi
+ # Use shifted hdf as source of data
+ dd if=$tmp_hdf of=$cpy_hdf seek=7k ||
+ error "failed creating an alternative data source"
+
# Fail test if source size changes so we catch this
# Source should be a few MiB in size
- $CHECKSTAT -s 14625450 $source || error "checkstat wrong size"
+ $CHECKSTAT -s 14625450 $tmp_hdf || error "checkstat wrong size"
stack_trap "rm -f $tf; disable_compression"
enable_compression
error "set a compress component in $tf failed"
# Create file and verify - trivial
- dd if=$source bs=${chunksize}K of=$tf || error "(0) dd failed"
- flush_and_compare $source $tf "(1)"
+ dd if=$tmp_hdf bs=${chunksize}K of=$tf || error "(0) dd failed"
+ flush_and_compare $tmp_hdf $tf "(1)"
# Do a single 4K write - this will require a read-modify-write because
# it is less than chunk size
- dd if=$source bs=4K of=$tf count=1 conv=notrunc || error "(2) dd failed"
- flush_and_compare $source $tf "(3)"
+ dd if=$source bs=4K of=$tf count=1 conv=notrunc || error "(2.1) dd failed"
+ dd if=$source bs=4K of=$tmp_hdf count=1 conv=notrunc ||
+ error "(2.2) dd failed"
+ flush_and_compare $tmp_hdf $tf "(3)"
# A single write of > chunk_size but < 2 chunks
dd if=$source bs=$((chunksize * 3/2))K of=$tf count=1 conv=notrunc ||
- error "(4) dd failed"
- flush_and_compare $source $tf "(5)"
+ error "(4.1) dd failed"
+ dd if=$source bs=$((chunksize * 3/2))K of=$tmp_hdf count=1 \
+ conv=notrunc || error "(4.2) dd failed"
+ flush_and_compare $tmp_hdf $tf "(5)"
# Same test but offset slightly in to the file
dd if=$source seek=1 skip=1 bs=$((chunksize * 3/2))K of=$tf count=1 conv=notrunc ||
- error "(6) dd failed"
- flush_and_compare $source $tf "(7)"
+ error "(6.1) dd failed"
+ dd if=$source seek=1 skip=1 bs=$((chunksize * 3/2))K of=$tmp_hdf \
+ count=1 conv=notrunc || error "(6.2) dd failed"
+ flush_and_compare $tmp_hdf $tf "(7)"
# Later tests use fsync to force read-modify-write
# larger dd size with fsync (writing 4K at a time with fsync is slow)
dd if=$source bs=$((chunksize / 2)) of=$tf conv=fsync,notrunc ||
- error "(8) dd failed"
- flush_and_compare $source $tf "(9)"
+ error "(8.1) dd failed"
+ dd if=$source bs=$((chunksize / 2)) of=$tmp_hdf conv=fsync,notrunc ||
+ error "(8.2) dd failed"
+ flush_and_compare $tmp_hdf $tf "(9)"
# Larger than chunk size
dd if=$source bs=$((chunksize * 3/2)) of=$tf conv=fsync,notrunc ||
- error "(10) dd failed"
- flush_and_compare $source $tf "(11)"
+ error "(10.1) dd failed"
+ dd if=$source bs=$((chunksize * 3/2)) of=$tmp_hdf conv=fsync,notrunc ||
+ error "(10.2) dd failed"
+ flush_and_compare $tmp_hdf $tf "(11)"
# The above test is full pages on x86, sometimes partial on ARM
# This will explicitly test readup of partial chunks as part of a write
# This writes one full page, then part of the next page
# This forces a partial page read before we can do the write
dd if=$source bs=$((PAGE_SIZE * 2 - 1024)) of=$tf conv=notrunc ||
- error "(12) dd failed"
- flush_and_compare $source $tf "(13)"
+ error "(12.1) dd failed"
+ dd if=$source bs=$((PAGE_SIZE * 2 - 1024)) of=$tmp_hdf conv=notrunc ||
+ error "(12.2) dd failed"
+ flush_and_compare $tmp_hdf $tf "(13)"
# Do the same test at an offset in the file
dd if=$source seek=4 skip=4 bs=$((PAGE_SIZE * 2 - 1024)) of=$tf conv=notrunc ||
- error "(14) dd failed"
- flush_and_compare $source $tf "(15)"
+ error "(14.1) dd failed"
+ dd if=$source seek=4 skip=4 bs=$((PAGE_SIZE * 2 - 1024)) of=$tmp_hdf conv=notrunc ||
+ error "(14.2) dd failed"
+ flush_and_compare $tmp_hdf $tf "(15)"
}
run_test 1004 "initial test for write updating"