X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Fost-pools.sh;h=c3a560952ef11f390832083cfc4095ca271c59ba;hb=28806162c234a178365853a393aa401e180256e2;hp=c9c3f22ce89539a41ff040d50ea4e03f7de038dd;hpb=9680dabde70a9ec5f211e26d7c2f2988f296f455;p=fs%2Flustre-release.git diff --git a/lustre/tests/ost-pools.sh b/lustre/tests/ost-pools.sh index c9c3f22..c3a5609 100644 --- a/lustre/tests/ost-pools.sh +++ b/lustre/tests/ost-pools.sh @@ -29,7 +29,7 @@ init_logging check_and_setup_lustre -[ "$SLOW" = "no" ] && EXCEPT_SLOW="23b" +[ "$SLOW" = "no" ] && EXCEPT_SLOW="5b 18 22 23b 25" DIR=${DIR:-$MOUNT} assert_DIR @@ -576,18 +576,18 @@ sub_test_5() { destroy_pool $POOL2 } -test_5() { +test_5a() { set_cleanup_trap # Issue commands from client - sub_test_5 $LCTL sub_test_5 $LFS +} +run_test 5a "lfs pool_list from client" - # Issue commands from MDS +test_5b() { + set_cleanup_trap sub_test_5 "do_facet $SINGLEMDS lctl" - sub_test_5 "do_facet $SINGLEMDS lfs" - } -run_test 5 "lfs/lctl pool_list" +run_test 5b "lctl pool_list from MDS" test_6() { set_cleanup_trap @@ -1237,8 +1237,6 @@ test_23b() { local i=0 local TGT - local BUNIT_SZ=1024 # min block quota unit(kB) - local LIMIT=$((BUNIT_SZ * (OSTCOUNT + 1))) local dir=$POOL_ROOT/dir local file="$dir/$tfile-quota" @@ -1249,12 +1247,14 @@ test_23b() { add_pool $POOL "$FSNAME-OST[$TGT_FIRST-$TGT_MAX/3]" "$TGT" create_dir $dir $POOL - AVAIL=$($LFS df -p $POOL $dir | awk '/summary/ { print $4 }') - [ $AVAIL -gt $MAXFREE ] && - skip_env "Filesystem space $AVAIL is larger than $MAXFREE limit" && - return 0 - log "OSTCOUNT=$OSTCOUNT, OSTSIZE=$OSTSIZE" - log "MAXFREE=$MAXFREE, AVAIL=$AVAIL, SLOW=$SLOW" + local maxfree=$((1024 * 1024 * 30)) # 30G + local AVAIL=$(lfs_df -p $POOL $dir | awk '/summary/ { print $4 }') + [ $AVAIL -gt $maxfree ] && + skip_env "Filesystem space $AVAIL is larger than " \ + "$maxfree limit" && return 0 + + echo "OSTCOUNT=$OSTCOUNT, OSTSIZE=$OSTSIZE, AVAIL=$AVAIL" + echo "MAXFREE=$maxfree, SLOW=$SLOW" # XXX remove the interoperability code once we drop the old server # ( < 2.3.50) support. @@ -1265,28 +1265,47 @@ test_23b() { sleep 5 fi - chown $RUNAS_ID.$RUNAS_ID $dir - i=0 - RC=0 - while [ $RC -eq 0 ]; do - i=$((i + 1)) - stat=$(LOCALE=C $RUNAS2 dd if=/dev/zero of=${file}$i bs=1M \ - count=$((LIMIT * 4)) 2>&1) - RC=$? - echo "$i: $stat" - if [ $RC -eq 1 ]; then - echo $stat | grep -q "Disk quota exceeded" - [[ $? -eq 0 ]] && error "dd failed with EDQUOT with quota off" - - echo $stat | grep -q "No space left on device" - [[ $? -ne 0 ]] && - error "dd did not fail with ENOSPC" - fi - done - - df -h - - rm -rf $POOL_ROOT + chown $RUNAS_ID.$RUNAS_ID $dir + i=0 + local RC=0 + local TOTAL=0 # KB + local stime=$(date +%s) + local stat + local etime + local elapsed + local maxtime=300 # minimum speed: 5GB / 300sec ~= 17MB/s + while [ $RC -eq 0 ]; do + i=$((i + 1)) + stat=$(LOCALE=C $RUNAS2 dd if=/dev/zero of=${file}$i bs=1M \ + count=$((5 * 1024)) 2>&1) + RC=$? + TOTAL=$((TOTAL + 1024 * 1024 * 5)) + echo "[$i iteration] $stat" + echo "total written: $TOTAL" + + etime=$(date +%s) + elapsed=$((etime - stime)) + echo "stime=$stime, etime=$etime, elapsed=$elapsed" + + if [ $RC -eq 1 ]; then + echo $stat | grep -q "Disk quota exceeded" + [[ $? -eq 0 ]] && + error "dd failed with EDQUOT with quota off" + + echo $stat | grep -q "No space left on device" + [[ $? -ne 0 ]] && + error "dd did not fail with ENOSPC" + elif [ $TOTAL -gt $AVAIL ]; then + error "dd didn't fail with ENOSPC ($TOTAL > $AVAIL)" + elif [ $i -eq 1 -a $elapsed -gt $maxtime ]; then + log "The first 5G write used $elapsed (> $maxtime) " \ + "seconds, terminated" + RC=1 + fi + done + + df -h + rm -rf $POOL_ROOT } run_test 23b "OST pools and OOS" @@ -1369,7 +1388,6 @@ run_test 24 "Independence of pool from other setstripe parameters" test_25() { set_cleanup_trap - local dev=$(mdsdevname ${SINGLEMDS//mds/}) local POOL_ROOT=${POOL_ROOT:-$DIR/$tdir} mkdir -p $POOL_ROOT @@ -1381,11 +1399,9 @@ test_25() { sort -u | tr '\n' ' ' " "$FSNAME-OST0000_UUID " >/dev/null || error "pool_add failed: $1; $2" - stop $SINGLEMDS || return 1 - start $SINGLEMDS ${dev} $MDS_MOUNT_OPTS || - { error "Failed to start $SINGLEMDS after stopping" && break; } - wait_osc_import_state mds ost FULL - clients_up + facet_failover $SINGLEMDS || error "failed to failover $SINGLEMDS" + wait_osc_import_state $SINGLEMDS ost FULL + clients_up wait_mds_ost_sync # Veriy that the pool got created and is usable @@ -1447,7 +1463,7 @@ run_test 26 "Choose other OSTs in the pool first in the creation remedy" cd $ORIG_PWD -complete $(basename $0) $SECONDS +complete $SECONDS cleanup_pools $FSNAME check_and_cleanup_lustre exit_status