X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lustre%2Ftests%2Fost-pools.sh;h=dd13c6b900b3ef60ab058224f0b2a26364f0cb61;hb=38e67bc99804b98f88fcfd1adc0ef54f79549676;hp=3f924e4d482fae7debd5e90b8c018c38a7fd8c99;hpb=d6f2a9fcfd8d3b14e05e3f1f660ba2ead9f81879;p=fs%2Flustre-release.git diff --git a/lustre/tests/ost-pools.sh b/lustre/tests/ost-pools.sh index 3f924e4..dd13c6b 100644 --- a/lustre/tests/ost-pools.sh +++ b/lustre/tests/ost-pools.sh @@ -29,7 +29,7 @@ init_logging check_and_setup_lustre -[ "$SLOW" = "no" ] && EXCEPT_SLOW="23b" +[ "$SLOW" = "no" ] && EXCEPT_SLOW="5b 18 22 23b 25" DIR=${DIR:-$MOUNT} assert_DIR @@ -576,18 +576,18 @@ sub_test_5() { destroy_pool $POOL2 } -test_5() { +test_5a() { set_cleanup_trap # Issue commands from client - sub_test_5 $LCTL sub_test_5 $LFS +} +run_test 5a "lfs pool_list from client" - # Issue commands from MDS +test_5b() { + set_cleanup_trap sub_test_5 "do_facet $SINGLEMDS lctl" - sub_test_5 "do_facet $SINGLEMDS lfs" - } -run_test 5 "lfs/lctl pool_list" +run_test 5b "lctl pool_list from MDS" test_6() { set_cleanup_trap @@ -998,17 +998,17 @@ test_18() { # is / should be max=30 diff=$((($files1 - $files2) * 100 / $files1)) - echo "No pool / wide pool: $diff %." - [ $diff -gt $max ] && - error_ignore 23408 "Degradation with wide pool is $diff% > $max%" + echo "No pool / wide pool: $diff %." + [ $diff -gt $max ] && + error_ignore bz23408 "Degradation with wide pool is $diff% > $max%" - max=30 - diff=$((($files1 - $files3) * 100 / $files1)) - echo "No pool / missing pool: $diff %." - [ $diff -gt $max ] && - error_ignore 23408 "Degradation with wide pool is $diff% > $max%" + max=30 + diff=$((($files1 - $files3) * 100 / $files1)) + echo "No pool / missing pool: $diff %." + [ $diff -gt $max ] && + error_ignore bz23408 "Degradation with wide pool is $diff% > $max%" - return 0 + return 0 } run_test 18 "File create in a directory which references a deleted pool" @@ -1159,9 +1159,6 @@ test_22() { run_test 22 "Simultaneous manipulation of a pool" test_23a() { - # XXX remove this once all quota code landed - skip_env "quota isn't functional" && return - set_cleanup_trap local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir} [[ $OSTCOUNT -le 1 ]] && skip_env "Need at least 2 OSTs" && return @@ -1228,9 +1225,6 @@ test_23a() { run_test 23a "OST pools and quota" test_23b() { - # XXX remove this once all quota code landed - skip_env "quota isn't functional" && return - set_cleanup_trap local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir} [[ $OSTCOUNT -le 1 ]] && skip_env "Need at least 2 OSTs" && return 0 @@ -1243,8 +1237,6 @@ test_23b() { local i=0 local TGT - local BUNIT_SZ=1024 # min block quota unit(kB) - local LIMIT=$((BUNIT_SZ * (OSTCOUNT + 1))) local dir=$POOL_ROOT/dir local file="$dir/$tfile-quota" @@ -1255,12 +1247,14 @@ test_23b() { add_pool $POOL "$FSNAME-OST[$TGT_FIRST-$TGT_MAX/3]" "$TGT" create_dir $dir $POOL - AVAIL=$($LFS df -p $POOL $dir | awk '/summary/ { print $4 }') - [ $AVAIL -gt $MAXFREE ] && - skip_env "Filesystem space $AVAIL is larger than $MAXFREE limit" && - return 0 - log "OSTCOUNT=$OSTCOUNT, OSTSIZE=$OSTSIZE" - log "MAXFREE=$MAXFREE, AVAIL=$AVAIL, SLOW=$SLOW" + local maxfree=$((1024 * 1024 * 30)) # 30G + local AVAIL=$(lfs_df -p $POOL $dir | awk '/summary/ { print $4 }') + [ $AVAIL -gt $maxfree ] && + skip_env "Filesystem space $AVAIL is larger than " \ + "$maxfree limit" && return 0 + + echo "OSTCOUNT=$OSTCOUNT, OSTSIZE=$OSTSIZE, AVAIL=$AVAIL" + echo "MAXFREE=$maxfree, SLOW=$SLOW" # XXX remove the interoperability code once we drop the old server # ( < 2.3.50) support. @@ -1271,28 +1265,47 @@ test_23b() { sleep 5 fi - chown $RUNAS_ID.$RUNAS_ID $dir - i=0 - RC=0 - while [ $RC -eq 0 ]; do - i=$((i + 1)) - stat=$(LOCALE=C $RUNAS2 dd if=/dev/zero of=${file}$i bs=1M \ - count=$((LIMIT * 4)) 2>&1) - RC=$? - echo "$i: $stat" - if [ $RC -eq 1 ]; then - echo $stat | grep -q "Disk quota exceeded" - [[ $? -eq 0 ]] && error "dd failed with EDQUOT with quota off" - - echo $stat | grep -q "No space left on device" - [[ $? -ne 0 ]] && - error "dd did not fail with ENOSPC" - fi - done - - df -h - - rm -rf $POOL_ROOT + chown $RUNAS_ID.$RUNAS_ID $dir + i=0 + local RC=0 + local TOTAL=0 # KB + local stime=$(date +%s) + local stat + local etime + local elapsed + local maxtime=300 # minimum speed: 5GB / 300sec ~= 17MB/s + while [ $RC -eq 0 ]; do + i=$((i + 1)) + stat=$(LOCALE=C $RUNAS2 dd if=/dev/zero of=${file}$i bs=1M \ + count=$((5 * 1024)) 2>&1) + RC=$? + TOTAL=$((TOTAL + 1024 * 1024 * 5)) + echo "[$i iteration] $stat" + echo "total written: $TOTAL" + + etime=$(date +%s) + elapsed=$((etime - stime)) + echo "stime=$stime, etime=$etime, elapsed=$elapsed" + + if [ $RC -eq 1 ]; then + echo $stat | grep -q "Disk quota exceeded" + [[ $? -eq 0 ]] && + error "dd failed with EDQUOT with quota off" + + echo $stat | grep -q "No space left on device" + [[ $? -ne 0 ]] && + error "dd did not fail with ENOSPC" + elif [ $TOTAL -gt $AVAIL ]; then + error "dd didn't fail with ENOSPC ($TOTAL > $AVAIL)" + elif [ $i -eq 1 -a $elapsed -gt $maxtime ]; then + log "The first 5G write used $elapsed (> $maxtime) " \ + "seconds, terminated" + RC=1 + fi + done + + df -h + rm -rf $POOL_ROOT } run_test 23b "OST pools and OOS" @@ -1375,7 +1388,6 @@ run_test 24 "Independence of pool from other setstripe parameters" test_25() { set_cleanup_trap - local dev=$(mdsdevname ${SINGLEMDS//mds/}) local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir} mkdir -p $POOL_ROOT @@ -1387,11 +1399,9 @@ test_25() { sort -u | tr '\n' ' ' " "$FSNAME-OST0000_UUID " >/dev/null || error "pool_add failed: $1; $2" - stop $SINGLEMDS || return 1 - start $SINGLEMDS ${dev} $MDS_MOUNT_OPTS || - { error "Failed to start $SINGLEMDS after stopping" && break; } - wait_osc_import_state mds ost FULL - clients_up + facet_failover $SINGLEMDS || error "failed to failover $SINGLEMDS" + wait_osc_import_state $SINGLEMDS ost FULL + clients_up wait_mds_ost_sync # Veriy that the pool got created and is usable @@ -1453,7 +1463,7 @@ run_test 26 "Choose other OSTs in the pool first in the creation remedy" cd $ORIG_PWD -complete $(basename $0) $SECONDS +complete $SECONDS cleanup_pools $FSNAME check_and_cleanup_lustre exit_status