X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Fsanity.sh;h=f78552203e1297d6ceb187b67eb9d354908cffa7;hp=42789ad323863de6880d786a5d4ee199768071eb;hb=6bbae72c6900dbd2b853d716bc4d456dc7fd586e;hpb=8deea7888c0c242e549e6a567f9aa5d1c7c3558e diff --git a/lustre/tests/sanity.sh b/lustre/tests/sanity.sh index 42789ad..f785522 100755 --- a/lustre/tests/sanity.sh +++ b/lustre/tests/sanity.sh @@ -20,6 +20,13 @@ if $SHARED_KEY; then ALWAYS_EXCEPT="$ALWAYS_EXCEPT 17n 60a 133g 300f" fi +if [[ $(uname -m) = aarch64 ]]; then + # bug number: LU-11596 (all below) + ALWAYS_EXCEPT+=" 42d 42e 63a 63b 64a 64b 64c" + # bug number: LU-11671 LU-11665 LU-11594 LU-11667 LU-11729 + ALWAYS_EXCEPT+=" 45 101c 103a 317 810" +fi + # Check Grants after these tests GRANT_CHECK_LIST="$GRANT_CHECK_LIST 42a 42b 42c 42d 42e 63a 63b 64a 64b 64c" SRCDIR=$(cd $(dirname $0); echo $PWD) @@ -110,10 +117,8 @@ elif [ -r /etc/os-release ]; then if [[ $ubuntu_version -gt $(version_code 16.0.0) ]]; then # bug number for skipped test: - # LU-10334 LU-10335 LU-10335 LU-10335 - ALWAYS_EXCEPT+=" 103a 130a 130b 130c" - # LU-10335 LU-10335 LU-10366 - ALWAYS_EXCEPT+=" 130d 130e 410" + # LU-10334 LU-10366 + ALWAYS_EXCEPT+=" 103a 410" fi fi fi @@ -1139,12 +1144,6 @@ test_24u() { # bug12192 } run_test 24u "create stripe file" -page_size() { - local size - size=$(getconf PAGE_SIZE 2>/dev/null) - echo -n ${size:-4096} -} - simple_cleanup_common() { local rc=0 trap 0 @@ -1202,7 +1201,7 @@ test_24v() { # take into account of overhead in lu_dirpage header and end mark in # each page, plus one in rpc_num calculation. local dirent_size=$((32 + (${#tfile} | 7) + 1 + 8)) - local page_entries=$((($(page_size) - 24) / dirent_size)) + local page_entries=$(((PAGE_SIZE - 24) / dirent_size)) local mdt_idx=$($LFS getdirstripe -i $(dirname $fname)) local rpc_pages=$(max_pages_per_rpc $mdt_idx) local rpc_max=$((nrfiles / (page_entries * rpc_pages) + stripes)) @@ -3209,7 +3208,7 @@ test_34h() { # Since just timed wait is not good enough, let's do a sync write # that way we are sure enough time for a roundtrip + processing # passed + 2 seconds of extra margin. - dd if=/dev/zero of=$DIR/${tfile}-1 bs=4096 oflag=direct count=1 + dd if=/dev/zero of=$DIR/${tfile}-1 bs=$PAGE_SIZE oflag=direct count=1 rm $DIR/${tfile}-1 sleep 2 @@ -3995,7 +3994,6 @@ test_42e() { # bug22074 [ $PARALLEL == "yes" ] && skip "skip parallel run" local TDIR=$DIR/${tdir}e - local pagesz=$(page_size) local pages=16 # hardcoded 16 pages, don't change it. local files=$((OSTCOUNT * 500)) # hopefully 500 files on each OST local proc_osc0="osc.${FSNAME}-OST0000-osc-[^MDT]*" @@ -4041,7 +4039,7 @@ test_42e() { # bug22074 $LCTL set_param $proc_osc0/rpc_stats 0 for ((;i<$files; i++)); do [ $($GETSTRIPE -i $TDIR/f$i) -eq 0 ] || continue - dd if=/dev/zero of=$TDIR/f$i bs=$pagesz count=$pages 2>/dev/null + dd if=/dev/zero of=$TDIR/f$i bs=$PAGE_SIZE count=$pages 2>/dev/null done sync $LCTL get_param $proc_osc0/rpc_stats @@ -4220,10 +4218,10 @@ test_46() { f="$DIR/f46" stop_writeback sync - dd if=/dev/zero of=$f bs=`page_size` seek=511 count=1 + dd if=/dev/zero of=$f bs=$PAGE_SIZE seek=511 count=1 sync - dd conv=notrunc if=/dev/zero of=$f bs=`page_size` seek=1023 count=1 - dd conv=notrunc if=/dev/zero of=$f bs=`page_size` seek=511 count=1 + dd conv=notrunc if=/dev/zero of=$f bs=$PAGE_SIZE seek=1023 count=1 + dd conv=notrunc if=/dev/zero of=$f bs=$PAGE_SIZE seek=511 count=1 sync start_writeback } @@ -4664,7 +4662,7 @@ test_54b() { f="$DIR/f54b" mknod $f c 1 3 chmod 0666 $f - dd if=/dev/zero of=$f bs=$(page_size) count=1 + dd if=/dev/zero of=$f bs=$PAGE_SIZE count=1 } run_test 54b "char device works in lustre ======================" @@ -4703,17 +4701,17 @@ test_54c() { trap cleanup_54c EXIT mknod $loopdev b 7 $LOOPNUM echo "make a loop file system with $DIR/$tfile on $loopdev ($LOOPNUM)." - dd if=/dev/zero of=$DIR/$tfile bs=$(get_page_size client) seek=1024 count=1 > /dev/null + dd if=/dev/zero of=$DIR/$tfile bs=$PAGE_SIZE seek=1024 count=1 > /dev/null losetup $loopdev $DIR/$tfile || error "can't set up $loopdev for $DIR/$tfile" mkfs.ext2 $loopdev || error "mke2fs on $loopdev" test_mkdir $DIR/$tdir mount -t ext2 $loopdev $DIR/$tdir || error "error mounting $loopdev on $DIR/$tdir" - dd if=/dev/zero of=$DIR/$tdir/tmp bs=$(get_page_size client) count=30 || + dd if=/dev/zero of=$DIR/$tdir/tmp bs=$PAGE_SIZE count=30 || error "dd write" df $DIR/$tdir - dd if=$DIR/$tdir/tmp of=/dev/zero bs=$(get_page_size client) count=30 || + dd if=$DIR/$tdir/tmp of=/dev/zero bs=$PAGE_SIZE count=30 || error "dd read" cleanup_54c } @@ -6350,11 +6348,41 @@ test_60e() { } run_test 60e "no space while new llog is being created" +test_60g() { + local pid + + test_mkdir -c $MDSCOUNT $DIR/$tdir + $LFS setdirstripe -D -i -1 -c $MDSCOUNT $DIR/$tdir + + ( + local index=0 + while true; do + mkdir $DIR/$tdir/subdir$index 2>/dev/null + rmdir $DIR/$tdir/subdir$index 2>/dev/null + index=$((index + 1)) + done + ) & + + pid=$! + + for i in $(seq 100); do + # define OBD_FAIL_OSD_TXN_START 0x19a + do_facet mds1 lctl set_param fail_loc=0x8000019a + usleep 100 + done + + kill -9 $pid + + mkdir $DIR/$tdir/new || error "mkdir failed" + rmdir $DIR/$tdir/new || error "rmdir failed" +} +run_test 60g "transaction abort won't cause MDT hung" + test_61() { [ $PARALLEL == "yes" ] && skip "skip parallel run" f="$DIR/f61" - dd if=/dev/zero of=$f bs=$(page_size) count=1 || error "dd $f failed" + dd if=/dev/zero of=$f bs=$PAGE_SIZE count=1 || error "dd $f failed" cancel_lru_locks osc $MULTIOP $f OSMWUc || error "$MULTIOP $f failed" sync @@ -6445,8 +6473,6 @@ run_test 64c "verify grant shrink" want_grant() { local tgt=$1 - local page_size=$(get_page_size client) - local nrpages=$($LCTL get_param -n osc.${tgt}.max_pages_per_rpc) local rpc_in_flight=$($LCTL get_param -n osc.${tgt}.max_rpcs_in_flight) @@ -6455,15 +6481,15 @@ want_grant() { local dirty_max_pages=$($LCTL get_param -n osc.${tgt}.max_dirty_mb) - dirty_max_pages=$((dirty_max_pages * 1024 * 1024 / page_size)) + dirty_max_pages=$((dirty_max_pages * 1024 * 1024 / PAGE_SIZE)) [[ $dirty_max_pages -gt $nrpages ]] && nrpages=$dirty_max_pages - local undirty=$((nrpages * page_size)) + local undirty=$((nrpages * PAGE_SIZE)) local max_extent_pages max_extent_pages=$($LCTL get_param osc.${tgt}.import | grep grant_max_extent_size | awk '{print $2}') - max_extent_pages=$((max_extent_pages / page_size)) + max_extent_pages=$((max_extent_pages / PAGE_SIZE)) local nrextents=$(((nrpages + max_extent_pages - 1) / max_extent_pages)) local grant_extent_tax grant_extent_tax=$($LCTL get_param osc.${tgt}.import | @@ -7812,7 +7838,7 @@ run_test 101f "check mmap read performance" test_101g_brw_size_test() { local mb=$1 - local pages=$((mb * 1048576 / $(page_size))) + local pages=$((mb * 1048576 / PAGE_SIZE)) local file=$DIR/$tfile $LCTL set_param osc.*.max_pages_per_rpc=${mb}M || @@ -8604,7 +8630,6 @@ test_104b() { [ $RUNAS_ID -eq $UID ] && skip_env "RUNAS_ID = UID = $UID -- skipping" - chmod 666 /dev/obd denied_cnt=$(($($RUNAS $LFS check servers 2>&1 | grep "Permission denied" | wc -l))) if [ $denied_cnt -ne 0 ]; then @@ -10158,38 +10183,44 @@ run_test 127a "verify the client stats are sane" test_127b() { # bug LU-333 [ $PARALLEL == "yes" ] && skip "skip parallel run" + local name count samp unit min max sum sumsq $LCTL set_param llite.*.stats=0 - FSIZE=65536 # sized fixed to match PAGE_SIZE for most clients # perform 2 reads and writes so MAX is different from SUM. - dd if=/dev/zero of=$DIR/$tfile bs=$FSIZE count=1 - dd if=/dev/zero of=$DIR/$tfile bs=$FSIZE count=1 + dd if=/dev/zero of=$DIR/$tfile bs=$PAGE_SIZE count=1 + dd if=/dev/zero of=$DIR/$tfile bs=$PAGE_SIZE count=1 cancel_lru_locks osc - dd if=$DIR/$tfile of=/dev/null bs=$FSIZE count=1 - dd if=$DIR/$tfile of=/dev/null bs=$FSIZE count=1 - - $LCTL get_param llite.*.stats | grep samples > $TMP/${tfile}.tmp - while read NAME COUNT SAMP UNIT MIN MAX SUM SUMSQ; do - echo "got $COUNT $NAME" - eval $NAME=$COUNT || error "Wrong proc format" - - case $NAME in - read_bytes) - [ $COUNT -ne 2 ] && error "count is not 2: $COUNT" - [ $MIN -ne $FSIZE ] && error "min is not $FSIZE: $MIN" - [ $MAX -ne $FSIZE ] && error "max is incorrect: $MAX" - [ $SUM -ne $((FSIZE * 2)) ] && error "sum is wrong: $SUM" - ;; - write_bytes) - [ $COUNT -ne 2 ] && error "count is not 2: $COUNT" - [ $MIN -ne $FSIZE ] && error "min is not $FSIZE: $MIN" - [ $MAX -ne $FSIZE ] && error "max is incorrect: $MAX" - [ $SUM -ne $((FSIZE * 2)) ] && error "sum is wrong: $SUM" - ;; - *) ;; - esac - done < $TMP/${tfile}.tmp + dd if=$DIR/$tfile of=/dev/null bs=$PAGE_SIZE count=1 + dd if=$DIR/$tfile of=/dev/null bs=$PAGE_SIZE count=1 + + $LCTL get_param llite.*.stats | grep samples > $TMP/$tfile.tmp + while read name count samp unit min max sum sumsq; do + echo "got $count $name" + eval $name=$count || error "Wrong proc format" + + case $name in + read_bytes) + [ $count -ne 2 ] && error "count is not 2: $count" + [ $min -ne $PAGE_SIZE ] && + error "min is not $PAGE_SIZE: $min" + [ $max -ne $PAGE_SIZE ] && + error "max is incorrect: $max" + [ $sum -ne $((PAGE_SIZE * 2)) ] && + error "sum is wrong: $sum" + ;; + write_bytes) + [ $count -ne 2 ] && error "count is not 2: $count" + [ $min -ne $PAGE_SIZE ] && + error "min is not $PAGE_SIZE: $min" + [ $max -ne $PAGE_SIZE ] && + error "max is incorrect: $max" + [ $sum -ne $((PAGE_SIZE * 2)) ] && + error "sum is wrong: $sum" + ;; + *) ;; + esac + done < $TMP/$tfile.tmp #check that we actually got some stats [ "$read_bytes" ] || error "Missing read_bytes stats" @@ -13959,7 +13990,7 @@ test_205() { # Job stats local old_jobenv=$($LCTL get_param -n jobid_var) [ $old_jobenv != $JOBENV ] && jobstats_set $JOBENV - if [[ $PERM_CMD = *"set_param -P"* ]]; then + if [[ $PERM_CMD == *"set_param -P"* ]]; then stack_trap "do_facet mgs $PERM_CMD jobid_var=$old_jobenv" EXIT else stack_trap "do_facet mgs $PERM_CMD \ @@ -15436,12 +15467,74 @@ test_230j() { } run_test 230j "DoM file data not changed after dir migration" +test_230k() { + [ $MDSCOUNT -lt 4 ] && skip "needs >= 4 MDTs" + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ] && + skip "Need MDS version at least 2.11.56" + + local total=20 + local files_on_starting_mdt=0 + + $LFS mkdir -i -1 -c 2 $DIR/$tdir || error "mkdir failed" + $LFS getdirstripe $DIR/$tdir + for i in $(seq $total); do + echo $((i*i - i)) > $DIR/$tdir/$tfile.$i || error "write failed" + [[ $($LFS getstripe -m $DIR/$tdir/$tfile.$i) -eq 0 ]] && + files_on_starting_mdt=$((files_on_starting_mdt + 1)) + done + + echo "$files_on_starting_mdt files on MDT0" + + $LFS migrate -m 1,3 $DIR/$tdir || error "migrate -m 1,3 failed" + $LFS getdirstripe $DIR/$tdir + + files_on_starting_mdt=0 + for i in $(seq $total); do + $(echo $((i*i - i)) | cmp $DIR/$tdir/$tfile.$i -) || + error "file $tfile.$i mismatch after migration" + [[ $($LFS getstripe -m $DIR/$tdir/$tfile.$i) -eq 1 ]] && + files_on_starting_mdt=$((files_on_starting_mdt + 1)) + done + + echo "$files_on_starting_mdt files on MDT1 after migration" + [[ $files_on_starting_mdt -eq $total ]] && error "all files on MDT1" + + $LFS migrate -m 0 -c 2 $DIR/$tdir || error "migrate -m 0 -c 2 failed" + $LFS getdirstripe $DIR/$tdir + + files_on_starting_mdt=0 + for i in $(seq $total); do + $(echo $((i*i - i)) | cmp $DIR/$tdir/$tfile.$i -) || + error "file $tfile.$i mismatch after 2nd migration" + [[ $($LFS getstripe -m $DIR/$tdir/$tfile.$i) -eq 0 ]] && + files_on_starting_mdt=$((files_on_starting_mdt + 1)) + done + + echo "$files_on_starting_mdt files on MDT0 after 2nd migration" + [[ $files_on_starting_mdt -eq $total ]] && error "all files on MDT0" + + true +} +run_test 230k "file data not changed after dir migration" + +test_230l() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ] && + skip "Need MDS version at least 2.11.56" + + $LFS mkdir -i 0 -c 1 $DIR/$tdir || error "mkdir failed" + createmany -o $DIR/$tdir/f___________________________________ 1000 || + error "create files under remote dir failed $i" + $LFS migrate -m 1 $DIR/$tdir || error "migrate failed" +} +run_test 230l "readdir between MDTs won't crash" + test_231a() { # For simplicity this test assumes that max_pages_per_rpc # is the same across all OSCs local max_pages=$($LCTL get_param -n osc.*.max_pages_per_rpc | head -n1) - local bulk_size=$((max_pages * 4096)) + local bulk_size=$((max_pages * PAGE_SIZE)) local brw_size=$(do_facet ost1 $LCTL get_param -n obdfilter.*.brw_size | head -n 1) @@ -15720,36 +15813,48 @@ test_240() { run_test 240 "race between ldlm enqueue and the connection RPC (no ASSERT)" test_241_bio() { - for LOOP in $(seq $1); do - dd if=$DIR/$tfile of=/dev/null bs=40960 count=1 2>/dev/null + local count=$1 + local bsize=$2 + + for LOOP in $(seq $count); do + dd if=$DIR/$tfile of=/dev/null bs=$bsize count=1 2>/dev/null cancel_lru_locks $OSC || true done } test_241_dio() { + local count=$1 + local bsize=$2 + for LOOP in $(seq $1); do - dd if=$DIR/$tfile of=/dev/null bs=40960 count=1 \ - iflag=direct 2>/dev/null + dd if=$DIR/$tfile of=/dev/null bs=$bsize count=1 iflag=direct \ + 2>/dev/null done } test_241a() { # was test_241 - dd if=/dev/zero of=$DIR/$tfile count=1 bs=40960 + local bsize=$PAGE_SIZE + + (( bsize < 40960 )) && bsize=40960 + dd if=/dev/zero of=$DIR/$tfile count=1 bs=$bsize ls -la $DIR/$tfile cancel_lru_locks $OSC - test_241_bio 1000 & + test_241_bio 1000 $bsize & PID=$! - test_241_dio 1000 + test_241_dio 1000 $bsize wait $PID } run_test 241a "bio vs dio" test_241b() { - dd if=/dev/zero of=$DIR/$tfile count=1 bs=40960 + local bsize=$PAGE_SIZE + + (( bsize < 40960 )) && bsize=40960 + dd if=/dev/zero of=$DIR/$tfile count=1 bs=$bsize ls -la $DIR/$tfile - test_241_dio 1000 & + test_241_dio 1000 $bsize & PID=$! - test_241_dio 1000 + test_241_dio 1000 $bsize wait $PID } run_test 241b "dio vs dio" @@ -16739,7 +16844,7 @@ run_test 260 "Check mdc_close fail" ### Data-on-MDT sanity tests ### test_270a() { [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.10.55) ] && - skip "Need MDS version at least 2.10.55" + skip "Need MDS version at least 2.10.55 for DoM" # create DoM file local dom=$DIR/$tdir/dom_file @@ -16748,14 +16853,13 @@ test_270a() { mkdir -p $DIR/$tdir # basic checks for DoM component creation - $LFS setstripe -E 1024K -E 1024K -L mdt $dom 2>/dev/null && + $LFS setstripe -E 1024K -E 2048K -L mdt $dom 2>/dev/null && error "Can set MDT layout to non-first entry" - $LFS setstripe -E 1024K -L mdt -E 1024K -L mdt $dom 2>/dev/null && + $LFS setstripe -E 1024K -L mdt -E 2048K -L mdt $dom 2>/dev/null && error "Can define multiple entries as MDT layout" - $LFS setstripe -E 1M -L mdt $dom || - error "Can't create DoM layout" + $LFS setstripe -E 1M -L mdt $dom || error "Can't create DoM layout" [ $($LFS getstripe -L $dom) == "mdt" ] || error "bad pattern" [ $($LFS getstripe -c $dom) == 0 ] || error "bad stripe count" @@ -16767,46 +16871,61 @@ test_270a() { local space_check=1 # Skip free space checks with ZFS - if [ "$(facet_fstype $facet)" == "zfs" ]; then - space_check=0 - fi + [ "$(facet_fstype $facet)" == "zfs" ] && space_check=0 # write sync + local size_tmp=$((65536 * 3)) local mdtfree1=$(do_facet $facet \ - lctl get_param -n osd*.*$mdtname.kbytesfree) - dd if=/dev/urandom of=$tmp bs=1024 count=100 + lctl get_param -n osd*.*$mdtname.kbytesfree) + + dd if=/dev/urandom of=$tmp bs=1024 count=$((size_tmp / 1024)) # check also direct IO along write - dd if=$tmp of=$dom bs=102400 count=1 oflag=direct + # IO size must be a multiple of PAGE_SIZE on all platforms (ARM=64KB) + dd if=$tmp of=$dom bs=65536 count=$((size_tmp / 65536)) oflag=direct sync cmp $tmp $dom || error "file data is different" - [ $(stat -c%s $dom) == 102400 ] || error "bad size after write" + [ $(stat -c%s $dom) == $size_tmp ] || + error "bad size after write: $(stat -c%s $dom) != $size_tmp" if [ $space_check == 1 ]; then local mdtfree2=$(do_facet $facet \ - lctl get_param -n osd*.*$mdtname.kbytesfree) - [ $(($mdtfree1 - $mdtfree2)) -ge 102 ] || - error "MDT free space is wrong after write" + lctl get_param -n osd*.*$mdtname.kbytesfree) + + # increase in usage from by $size_tmp + [ $(($mdtfree1 - $mdtfree2)) -ge $((size_tmp / 1024)) ] || + error "MDT free space wrong after write: " \ + "$mdtfree1 >= $mdtfree2 + $size_tmp/1024" fi # truncate - $TRUNCATE $dom 10000 - [ $(stat -c%s $dom) == 10000 ] || error "bad size after truncate" + local size_dom=10000 + + $TRUNCATE $dom $size_dom + [ $(stat -c%s $dom) == $size_dom ] || + error "bad size after truncate: $(stat -c%s $dom) != $size_dom" if [ $space_check == 1 ]; then mdtfree1=$(do_facet $facet \ lctl get_param -n osd*.*$mdtname.kbytesfree) - [ $(($mdtfree1 - $mdtfree2)) -ge 92 ] || - error "MDT free space is wrong after truncate" + # decrease in usage from $size_tmp to new $size_dom + [ $(($mdtfree1 - $mdtfree2)) -ge \ + $(((size_tmp - size_dom) / 1024)) ] || + error "MDT free space is wrong after truncate: " \ + "$mdtfree1 >= $mdtfree2 + ($size_tmp - $size_dom) / 1024" fi # append cat $tmp >> $dom sync - [ $(stat -c%s $dom) == 112400 ] || error "bad size after append" + size_dom=$((size_dom + size_tmp)) + [ $(stat -c%s $dom) == $size_dom ] || + error "bad size after append: $(stat -c%s $dom) != $size_dom" if [ $space_check == 1 ]; then mdtfree2=$(do_facet $facet \ lctl get_param -n osd*.*$mdtname.kbytesfree) - [ $(($mdtfree1 - $mdtfree2)) -ge 102 ] || - error "MDT free space is wrong after append" + # increase in usage by $size_tmp from previous + [ $(($mdtfree1 - $mdtfree2)) -ge $((size_tmp / 1024)) ] || + error "MDT free space is wrong after append: " \ + "$mdtfree1 >= $mdtfree2 + $size_tmp/1024" fi # delete @@ -16814,22 +16933,25 @@ test_270a() { if [ $space_check == 1 ]; then mdtfree1=$(do_facet $facet \ lctl get_param -n osd*.*$mdtname.kbytesfree) - [ $(($mdtfree1 - $mdtfree2)) -ge 112 ] || - error "MDT free space is wrong after removal" + # decrease in usage by $size_dom from previous + [ $(($mdtfree1 - $mdtfree2)) -ge $((size_dom / 1024)) ] || + error "MDT free space is wrong after removal: " \ + "$mdtfree1 >= $mdtfree2 + $size_dom/1024" fi # combined striping $LFS setstripe -E 1024K -L mdt -E EOF $dom || error "Can't create DoM + OST striping" - dd if=/dev/urandom of=$tmp bs=1024 count=2000 + size_tmp=2031616 # must be a multiple of PAGE_SIZE=65536 on ARM + dd if=/dev/urandom of=$tmp bs=1024 count=$((size_tmp / 1024)) # check also direct IO along write - dd if=$tmp of=$dom bs=102400 count=20 oflag=direct + dd if=$tmp of=$dom bs=65536 count=$((size_tmp / 65536)) oflag=direct sync cmp $tmp $dom || error "file data is different" - [ $(stat -c%s $dom) == 2048000 ] || error "bad size after write" - rm $dom - rm $tmp + [ $(stat -c%s $dom) == $size_tmp ] || + error "bad size after write: $(stat -c%s $dom) != $size_tmp" + rm $dom $tmp return 0 } @@ -18369,7 +18491,6 @@ test_312() { # LU-4856 local max_blksz=$(do_facet ost1 \ $ZFS get -p recordsize $(facet_device ost1) | awk '!/VALUE/{print $3}') - local min_blksz=$(getconf PAGE_SIZE) # to make life a little bit easier $LFS mkdir -c 1 -i 0 $DIR/$tdir @@ -18381,30 +18502,31 @@ test_312() { # LU-4856 # Get ZFS object id local zfs_objid=$(zfs_oid_to_objid ost1 $oid) + # block size change by sequential overwrite + local bs - # block size change by sequential over write - local blksz - for ((bs=$min_blksz; bs <= max_blksz; bs <<= 2)); do + for ((bs=$PAGE_SIZE; bs <= max_blksz; bs *= 4)) ; do dd if=/dev/zero of=$tf bs=$bs count=1 oflag=sync conv=notrunc - blksz=$(zfs_object_blksz ost1 $zfs_objid) + local blksz=$(zfs_object_blksz ost1 $zfs_objid) [ $blksz -eq $bs ] || error "blksz error: $blksz, expected: $bs" done rm -f $tf # block size change by sequential append write - dd if=/dev/zero of=$tf bs=$min_blksz count=1 oflag=sync conv=notrunc + dd if=/dev/zero of=$tf bs=$PAGE_SIZE count=1 oflag=sync conv=notrunc oid=$($LFS getstripe $tf | awk '/obdidx/{getline; print $2}') zfs_objid=$(zfs_oid_to_objid ost1 $oid) + local count - for ((count = 1; count < $((max_blksz / min_blksz)); count *= 2)); do - dd if=/dev/zero of=$tf bs=$min_blksz count=$count seek=$count \ + for ((count = 1; count < $((max_blksz / PAGE_SIZE)); count *= 2)); do + dd if=/dev/zero of=$tf bs=$PAGE_SIZE count=$count seek=$count \ oflag=sync conv=notrunc blksz=$(zfs_object_blksz ost1 $zfs_objid) - [ $blksz -eq $((2 * count * min_blksz)) ] || - error "blksz error, actual $blksz, " \ - "expected: 2 * $count * $min_blksz" + [ $blksz -eq $((2 * count * PAGE_SIZE)) ] || + error "blksz error, actual $blksz, " \ + "expected: 2 * $count * $PAGE_SIZE" done rm -f $tf @@ -18415,8 +18537,8 @@ test_312() { # LU-4856 dd if=/dev/zero of=$tf bs=1K count=1 oflag=sync conv=notrunc blksz=$(zfs_object_blksz ost1 $zfs_objid) - [ $blksz -eq $min_blksz ] || - error "blksz error: $blksz, expected: $min_blksz" + [ $blksz -eq $PAGE_SIZE ] || + error "blksz error: $blksz, expected: $PAGE_SIZE" dd if=/dev/zero of=$tf bs=64K count=1 oflag=sync conv=notrunc seek=128 blksz=$(zfs_object_blksz ost1 $zfs_objid) @@ -18432,12 +18554,13 @@ test_313() { remote_ost_nodsh && skip "remote OST with nodsh" local file=$DIR/$tfile + rm -f $file $SETSTRIPE -c 1 -i 0 $file || error "setstripe failed" # define OBD_FAIL_TGT_RCVD_EIO 0x720 do_facet ost1 "$LCTL set_param fail_loc=0x720" - dd if=/dev/zero of=$file bs=4096 oflag=direct count=1 && + dd if=/dev/zero of=$file bs=$PAGE_SIZE oflag=direct count=1 && error "write should failed" do_facet ost1 "$LCTL set_param fail_loc=0" rm -f $file @@ -18459,8 +18582,9 @@ test_315() { # LU-618 local file=$DIR/$tfile rm -f $file - $MULTIOP $file oO_CREAT:O_DIRECT:O_RDWR:w4096000c - $MULTIOP $file oO_RDONLY:r4096000_c & + $MULTIOP $file oO_CREAT:O_DIRECT:O_RDWR:w4063232c || + error "multiop file write failed" + $MULTIOP $file oO_RDONLY:r4063232_c & PID=$! sleep 2 @@ -18966,7 +19090,7 @@ test_407() { run_test 407 "transaction fail should cause operation fail" test_408() { - dd if=/dev/zero of=$DIR/$tfile bs=4096 count=1 oflag=direct + dd if=/dev/zero of=$DIR/$tfile bs=$PAGE_SIZE count=1 oflag=direct #define OBD_FAIL_OSC_BRW_PREP_REQ2 0x40a lctl set_param fail_loc=0x8000040a @@ -19176,7 +19300,6 @@ test_415() { } run_test 415 "lock revoke is not missing" - test_416() { [ $(lustre_version_code mds1) -lt $(version_code 2.11.55) ] && skip "Need server version at least 2.11.55" @@ -19190,6 +19313,42 @@ test_416() { } run_test 416 "transaction start failure won't cause system hung" +cleanup_417() { + trap 0 + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param -n mdt.*MDT*.enable_dir_migration=1" + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param -n mdt.*MDT*.enable_remote_dir=1" + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param -n mdt.*MDT*.enable_striped_dir=1" +} + +test_417() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + [[ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ]] && + skip "Need MDS version at least 2.11.56" && return + + trap cleanup_417 RETURN EXIT + + $LFS mkdir -i 1 $DIR/$tdir.1 || error "create remote dir $tdir.1 failed" + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param -n mdt.*MDT*.enable_dir_migration=0" + $LFS migrate -m 0 $DIR/$tdir.1 && + error "migrate dir $tdir.1 should fail" + + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param -n mdt.*MDT*.enable_remote_dir=0" + $LFS mkdir -i 1 $DIR/$tdir.2 && + error "create remote dir $tdir.2 should fail" + + do_nodes $(comma_list $(mdts_nodes)) \ + "$LCTL set_param -n mdt.*MDT*.enable_striped_dir=0" + $LFS mkdir -c 2 $DIR/$tdir.3 && + error "create striped dir $tdir.3 should fail" + true +} +run_test 417 "disable remote dir, striped dir and dir migration" + prep_801() { [[ $(lustre_version_code mds1) -lt $(version_code 2.9.55) ]] || [[ $(lustre_version_code ost1) -lt $(version_code 2.9.55) ]] && @@ -19475,11 +19634,12 @@ test_803() { done sync; sleep 3 + wait_delete_completed # ensure old test cleanups are finished echo "before create:" $LFS df -i $MOUNT local before_used=$($LFS df -i | grep MDT0000_UUID | awk '{print $3}') - for ((i=0; i<10; i++)); do + for i in {1..10}; do $LFS mkdir -c 1 -i 1 $DIR/$tdir/foo$i || error "Fail to create $DIR/$tdir/foo$i" done @@ -19489,10 +19649,11 @@ test_803() { $LFS df -i $MOUNT local after_used=$($LFS df -i | grep MDT0000_UUID | awk '{print $3}') - [ $after_used -ge $((before_used + 10)) ] || + # allow for an llog to be cleaned up during the test + [ $after_used -ge $((before_used + 10 - 1)) ] || error "before ($before_used) + 10 > after ($after_used)" - for ((i=0; i<10; i++)); do + for i in {1..10}; do rm -rf $DIR/$tdir/foo$i || error "Fail to remove $DIR/$tdir/foo$i" done @@ -19501,11 +19662,11 @@ test_803() { wait_delete_completed echo "after unlink:" $LFS df -i $MOUNT - before_used=$after_used after_used=$($LFS df -i | grep MDT0000_UUID | awk '{print $3}') - [ $after_used -le $((before_used - 8)) ] || - error "before ($before_used) - 8 < after ($after_used)" + # allow for an llog to be created during the test + [ $after_used -le $((before_used + 1)) ] || + error "after ($after_used) > before ($before_used) + 1" } run_test 803 "verify agent object for remote object" @@ -19687,7 +19848,7 @@ test_806() { local offset=0 local i - echo "Test SOM for single client muti-threaded($num) write" + echo "Test SOM for single client multi-threaded($num) write" $TRUNCATE $DIR/$tfile 0 for ((i = 0; i < $num; i++)); do $MULTIOP $DIR/$tfile Oz${offset}w${bs}c & @@ -19716,7 +19877,7 @@ test_806() { offset=0 i=0 - echo "Test SOM for muti-client ($num) writes" + echo "Test SOM for multi-client ($num) writes" $TRUNCATE $DIR/$tfile 0 for client in ${CLIENTS//,/ }; do do_node $client $MULTIOP $DIR/$tfile Oz${offset}w${bs}c & @@ -19789,7 +19950,7 @@ test_807() { local offset=0 local i=0 - echo "Test SOM for muti-client ($num) writes" + echo "Test SOM for multi-client ($num) writes" touch $DIR/$tfile || error "touch $tfile failed" $TRUNCATE $DIR/$tfile 0 for client in ${CLIENTS//,/ }; do @@ -19846,6 +20007,51 @@ test_808() { } run_test 808 "Check trusted.som xattr not logged in Changelogs" +check_som_nodata() +{ + $LFS getsom $1 + [[ $? -eq 61 ]] || error "DoM-only file $1 has SOM xattr" +} + +test_809() { + [ $(lustre_version_code $SINGLEMDS) -lt $(version_code 2.11.56) ] && + skip "Need MDS version at least 2.11.56" && return + + $LFS setstripe -E 1M -L mdt $DIR/$tfile || + error "failed to create DoM-only file $DIR/$tfile" + touch $DIR/$tfile || error "touch $tfile failed" + check_som_nodata $DIR/$tfile + + dd if=/dev/zero of=$DIR/$tfile bs=2048 count=1 || + error "write $tfile failed" + check_som_nodata $DIR/$tfile + + $TRUNCATE $DIR/$tfile 1234 + check_som_nodata $DIR/$tfile + + $TRUNCATE $DIR/$tfile 4097 + check_som_nodata $DIR/$file +} +run_test 809 "Verify no SOM xattr store for DoM-only files" + +test_810() { + local ORIG + local CSUM + + # t10 seem to dislike partial pages + lctl set_param osc.*.checksum_type=adler + lctl set_param fail_loc=0x411 + dd if=/dev/urandom of=$DIR/$tfile bs=10240 count=2 + ORIG=$(md5sum $DIR/$tfile) + lctl set_param ldlm.namespaces.*osc*.lru_size=clear + CSUM=$(md5sum $DIR/$tfile) + set_checksum_type adler + if [ "$ORIG" != "$CSUM" ]; then + error "$ORIG != $CSUM" + fi +} +run_test 810 "partial page writes on ZFS (LU-11663)" + # # tests that do cleanup/setup should be run at the end #