X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Finsanity.sh;h=cadaaea0f267d246b105c95cc931b12ff3f341c4;hp=ebf5cb89c44389e99c70da97d1f9deb92a534e09;hb=0585b0fb5895a24f07ca32e830d1fa72b75f4f2b;hpb=efe37534401d6a195f895ebbf7640fdfd595d024 diff --git a/lustre/tests/insanity.sh b/lustre/tests/insanity.sh index ebf5cb8..cadaaea 100755 --- a/lustre/tests/insanity.sh +++ b/lustre/tests/insanity.sh @@ -1,4 +1,7 @@ -#!/bin/sh +#!/bin/bash +# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*- +# vim:shiftwidth=4:softtabstop=4:tabstop=4: +# # Test multiple failures, AKA Test 17 set -e @@ -9,17 +12,16 @@ LUSTRE=${LUSTRE:-`dirname $0`/..} init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} +init_logging # -ALWAYS_EXCEPT="10 $INSANITY_EXCEPT" +ALWAYS_EXCEPT="15 $INSANITY_EXCEPT" if [ "$FAILURE_MODE" = "HARD" ]; then - mixed_ost_devs && CONFIG_EXCEPTIONS="0 2 4 5 6 8" && \ - echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. " && \ - echo "Except the tests: $CONFIG_EXCEPTIONS" && \ - ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS" + skip_env "$TESTSUITE: is not functional with FAILURE_MODE = HARD, " \ + "please use recovery-double-scale, bz20407" + exit 0 fi -# [ "$SLOW" = "no" ] && EXCEPT_SLOW="" SETUP=${SETUP:-""} @@ -31,17 +33,18 @@ SINGLECLIENT=${SINGLECLIENT:-$HOSTNAME} LIVE_CLIENT=${LIVE_CLIENT:-$SINGLECLIENT} FAIL_CLIENTS=${FAIL_CLIENTS:-$RCLIENTS} -assert_env mds_HOST MDS_MKFS_OPTS -assert_env ost_HOST OST_MKFS_OPTS OSTCOUNT +assert_env mds_HOST MDSCOUNT +assert_env ost_HOST OSTCOUNT assert_env LIVE_CLIENT FSNAME -remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0 -remote_ost_nodsh && skip "remote OST with nodsh" && exit 0 +require_dsh_mds || exit 0 +require_dsh_ost || exit 0 # FAIL_CLIENTS list should not contain the LIVE_CLIENT FAIL_CLIENTS=$(echo " $FAIL_CLIENTS " | sed -re "s/\s+$LIVE_CLIENT\s+/ /g") DIR=${DIR:-$MOUNT} +TESTDIR=$DIR/d0.$TESTSUITE ##### # fail clients round robin @@ -60,26 +63,13 @@ set_fail_client() { echo "fail $FAIL_CLIENT, next is $FAIL_NEXT" } -shutdown_client() { - client=$1 - if [ "$FAILURE_MODE" = HARD ]; then - $POWER_DOWN $client - while ping -w 3 -c 1 $client > /dev/null 2>&1; do - echo "waiting for node $client to fail" - sleep 1 - done - elif [ "$FAILURE_MODE" = SOFT ]; then - zconf_umount $client $MOUNT -f - fi -} - fail_clients() { - num=$1 + num=$1 - log "Request clients to fail: ${num}. Num of clients to fail: ${FAIL_NUM}, already failed: $DOWN_NUM" - if [ -z "$num" ] || [ "$num" -gt $((FAIL_NUM - DOWN_NUM)) ]; then - num=$((FAIL_NUM - DOWN_NUM)) - fi + log "Request fail clients: $num, to fail: $FAIL_NUM, failed: $DOWN_NUM" + if [ -z "$num" ] || [ "$num" -gt $((FAIL_NUM - DOWN_NUM)) ]; then + num=$((FAIL_NUM - DOWN_NUM)) + fi if [ -z "$num" ] || [ "$num" -le 0 ]; then log "No clients failed!" @@ -97,57 +87,61 @@ fail_clients() { echo "down clients: $DOWN_CLIENTS" - for client in $DOWN_CLIENTS; do - boot_node $client - done - DOWN_NUM=`echo $DOWN_CLIENTS | wc -w` - client_rmdirs + for client in $DOWN_CLIENTS; do + boot_node $client + done + DOWN_NUM=`echo $DOWN_CLIENTS | wc -w` + client_rmdirs } reintegrate_clients() { - for client in $DOWN_CLIENTS; do - wait_for_host $client - echo "Restarting $client" - zconf_mount $client $MOUNT || return 1 - done - DOWN_CLIENTS="" - DOWN_NUM=0 + for client in $DOWN_CLIENTS; do + wait_for_host $client + echo "Restarting $client" + zconf_mount $client $MOUNT || return 1 + done + + DOWN_CLIENTS="" + DOWN_NUM=0 } start_ost() { - start ost$1 `ostdevname $1` $OST_MOUNT_OPTS + start ost$1 `ostdevname $1` $OST_MOUNT_OPTS +} + +start_mdt() { + start mds$1 $(mdsdevname $1) $MDS_MOUNT_OPTS } trap exit INT client_touch() { - file=$1 - for c in $LIVE_CLIENT $FAIL_CLIENTS; do - if echo $DOWN_CLIENTS | grep -q $c; then continue; fi - $PDSH $c touch $MOUNT/${c}_$file || return 1 - done + file=$1 + for c in $LIVE_CLIENT $FAIL_CLIENTS; do + echo $DOWN_CLIENTS | grep -q $c && continue + $PDSH $c touch $TESTDIR/${c}_$file || return 1 + done } client_rm() { - file=$1 - for c in $LIVE_CLIENT $FAIL_CLIENTS; do - $PDSH $c rm $MOUNT/${c}_$file - done + file=$1 + for c in $LIVE_CLIENT $FAIL_CLIENTS; do + $PDSH $c rm $TESTDIR/${c}_$file + done } client_mkdirs() { - for c in $LIVE_CLIENT $FAIL_CLIENTS; do - echo "$c mkdir $MOUNT/$c" - $PDSH $c "mkdir $MOUNT/$c" - $PDSH $c "ls -l $MOUNT/$c" - done + for c in $LIVE_CLIENT $FAIL_CLIENTS; do + echo "$c mkdir $TESTDIR/$c" + $PDSH $c "mkdir $TESTDIR/$c && ls -l $TESTDIR/$c" + done } client_rmdirs() { - for c in $LIVE_CLIENT $FAIL_CLIENTS; do - echo "rmdir $MOUNT/$c" - $PDSH $LIVE_CLIENT "rmdir $MOUNT/$c" - done + for c in $LIVE_CLIENT $FAIL_CLIENTS; do + echo "rmdir $TESTDIR/$c" + $PDSH $LIVE_CLIENT "rmdir $TESTDIR/$c" + done } clients_recover_osts() { @@ -157,18 +151,19 @@ clients_recover_osts() { check_and_setup_lustre +rm -rf $TESTDIR +mkdir -p $TESTDIR + # 9 Different Failure Modes Combinations echo "Starting Test 17 at `date`" test_0() { - facet_failover $SINGLEMDS - echo "Waiting for df pid: $DFPID" - wait $DFPID || { echo "df returned $?" && return 1; } + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done for i in $(seq $OSTCOUNT) ; do - facet_failover ost$i || return 4 - echo "Waiting for df pid: $DFPID" - wait $DFPID || { echo "df returned $?" && return 3; } + fail ost$i done return 0 } @@ -176,27 +171,59 @@ run_test 0 "Fail all nodes, independently" ############### First Failure Mode ############### test_1() { -echo "Don't do a MDS - MDS Failure Case" -echo "This makes no sense" + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + + [ "$(facet_fstype mds2)" = "zfs" ] && + skip "LU-2059: no local config for ZFS MDTs" && return + + clients_up + + shutdown_facet mds1 + reboot_facet mds1 + + # prepare for MDS failover + change_active mds1 + reboot_facet mds1 + + clients_up & + DFPID=$! + sleep 5 + + shutdown_facet mds2 + + echo "Reintegrating MDS2" + reboot_facet mds2 + wait_for_facet mds2 + start_mdt 2 || return 2 + + wait_for_facet mds1 + start_mdt 1 || return $? + + #Check FS + wait $DFPID + echo "Verify reintegration" + clients_up || return 1 } run_test 1 "MDS/MDS failure" ################################################### ############### Second Failure Mode ############### test_2() { - echo "Verify Lustre filesystem is up and running" - [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - client_df + clients_up - shutdown_facet $SINGLEMDS - reboot_facet $SINGLEMDS + for i in $(seq $MDSCOUNT) ; do + shutdown_facet mds$i + reboot_facet mds$i - # prepare for MDS failover - change_active $SINGLEMDS - reboot_facet $SINGLEMDS + # prepare for MDS failover + change_active mds$i + reboot_facet mds$i + done - client_df & + clients_up & DFPID=$! sleep 5 @@ -204,36 +231,38 @@ test_2() { echo "Reintegrating OST" reboot_facet ost1 - wait_for ost1 + wait_for_facet ost1 start_ost 1 || return 2 - wait_for $SINGLEMDS - start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS || return $? + for i in $(seq $MDSCOUNT) ; do + wait_for_facet mds$i + start_mdt $i || return $? + done #Check FS wait $DFPID clients_recover_osts ost1 echo "Verify reintegration" - client_df || return 1 + clients_up || return 1 } run_test 2 "Second Failure Mode: MDS/OST `date`" ################################################### - ############### Third Failure Mode ############### test_3() { #Create files echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - + #MDS Portion - facet_failover $SINGLEMDS - wait $DFPID || echo df failed: $? + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done #Check FS echo "Test Lustre stability after MDS failover" - client_df + clients_up #CLIENT Portion echo "Failing 2 CLIENTS" @@ -241,13 +270,13 @@ test_3() { #Check FS echo "Test Lustre stability after CLIENT failure" - client_df + clients_up #Reintegration echo "Reintegrating CLIENTS" reintegrate_clients || return 1 - client_df || return 3 + clients_up || return 3 sleep 2 # give it a little time for fully recovered before next test } run_test 3 "Thirdb Failure Mode: MDS/CLIENT `date`" @@ -255,51 +284,54 @@ run_test 3 "Thirdb Failure Mode: MDS/CLIENT `date`" ############### Fourth Failure Mode ############### test_4() { - echo "Fourth Failure Mode: OST/MDS `date`" + echo "Fourth Failure Mode: OST/MDS `date`" #OST Portion shutdown_facet ost1 - + #Check FS echo "Test Lustre stability after OST failure" - client_df & + clients_up & DFPIDA=$! sleep 5 - #MDS Portion - shutdown_facet $SINGLEMDS - reboot_facet $SINGLEMDS + for i in $(seq $MDSCOUNT) ; do + shutdown_facet mds$i + reboot_facet mds$i - # prepare for MDS failover - change_active $SINGLEMDS - reboot_facet $SINGLEMDS + # prepare for MDS failover + change_active mds$i + reboot_facet mds$i + done - client_df & + clients_up & DFPIDB=$! sleep 5 #Reintegration echo "Reintegrating OST" reboot_facet ost1 - wait_for ost1 + wait_for_facet ost1 start_ost 1 - - wait_for $SINGLEMDS - start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS + + for i in $(seq $MDSCOUNT) ; do + wait_for_facet mds$i + start_mdt $i || return $? + done #Check FS - + wait $DFPIDA wait $DFPIDB clients_recover_osts ost1 echo "Test Lustre stability after MDS failover" - client_df || return 1 + clients_up || return 1 } run_test 4 "Fourth Failure Mode: OST/MDS `date`" ################################################### ############### Fifth Failure Mode ############### test_5() { - [ $OSTCOUNT -lt 2 ] && skip "$OSTCOUNT < 2, not enough OSTs" && return 0 + [ $OSTCOUNT -lt 2 ] && skip_env "$OSTCOUNT < 2, not enough OSTs" && return 0 echo "Fifth Failure Mode: OST/OST `date`" @@ -307,7 +339,7 @@ test_5() { echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - client_df + clients_up #OST Portion shutdown_facet ost1 @@ -315,7 +347,7 @@ test_5() { #Check FS echo "Test Lustre stability after OST failure" - client_df & + clients_up & DFPIDA=$! sleep 5 @@ -325,15 +357,15 @@ test_5() { #Check FS echo "Test Lustre stability after OST failure" - client_df & + clients_up & DFPIDB=$! sleep 5 #Reintegration echo "Reintegrating OSTs" - wait_for ost1 + wait_for_facet ost1 start_ost 1 - wait_for ost2 + wait_for_facet ost2 start_ost 2 clients_recover_osts ost1 @@ -342,7 +374,7 @@ test_5() { wait $DFPIDA wait $DFPIDB - client_df || return 2 + clients_up || return 2 } run_test 5 "Fifth Failure Mode: OST/OST `date`" ################################################### @@ -355,7 +387,7 @@ test_6() { echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - client_df + clients_up client_touch testfile || return 2 #OST Portion @@ -364,7 +396,7 @@ test_6() { #Check FS echo "Test Lustre stability after OST failure" - client_df & + clients_up & DFPIDA=$! echo DFPIDA=$DFPIDA sleep 5 @@ -375,25 +407,25 @@ test_6() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df & + clients_up & DFPIDB=$! echo DFPIDB=$DFPIDB sleep 5 #Reintegration echo "Reintegrating OST/CLIENTs" - wait_for ost1 + wait_for_facet ost1 start_ost 1 reintegrate_clients || return 1 sleep 5 - wait_remote_prog df $((TIMEOUT * 3 + 10)) + wait_remote_prog "stat -f" $((TIMEOUT * 3 + 20)) wait $DFPIDA wait $DFPIDB echo "Verifying mount" [ -z "$(mounted_lustre_filesystems)" ] && return 3 - client_df + clients_up } run_test 6 "Sixth Failure Mode: OST/CLIENT `date`" ################################################### @@ -407,7 +439,7 @@ test_7() { echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - client_df + clients_up client_touch testfile || return 1 #CLIENT Portion @@ -416,9 +448,9 @@ test_7() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + clients_up + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Sleep echo "Wait 1 minutes" @@ -428,22 +460,21 @@ test_7() { echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && return 2 - client_df + clients_up client_rm testfile #MDS Portion - facet_failover $SINGLEMDS + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done - #Check FS - echo "Test Lustre stability after MDS failover" - wait $DFPID || echo "df on down clients fails " || return 1 - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Reintegration echo "Reintegrating CLIENTs" reintegrate_clients || return 2 - client_df + clients_up #Sleep echo "wait 1 minutes" @@ -461,7 +492,7 @@ test_8() { echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - client_df + clients_up client_touch testfile #CLIENT Portion @@ -470,9 +501,9 @@ test_8() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + clients_up + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Sleep echo "Wait 1 minutes" @@ -482,7 +513,7 @@ test_8() { echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - client_df + clients_up client_touch testfile @@ -492,20 +523,20 @@ test_8() { #Check FS echo "Test Lustre stability after OST failure" - client_df & + clients_up & DFPID=$! sleep 5 #non-failout hangs forever here - #$PDSH $LIVE_CLIENT "ls -l $MOUNT" - #$PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + #$PDSH $LIVE_CLIENT "ls -l $TESTDIR" + #$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Reintegration echo "Reintegrating CLIENTs/OST" reintegrate_clients || return 3 - wait_for ost1 + wait_for_facet ost1 start_ost 1 wait $DFPID - client_df || return 1 + clients_up || return 1 client_touch testfile2 || return 2 #Sleep @@ -524,7 +555,7 @@ test_9() { echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - client_df + clients_up client_touch testfile || return 1 #CLIENT Portion @@ -533,9 +564,9 @@ test_9() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" || return 1 - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" || return 2 + clients_up + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 1 + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 2 #Sleep echo "Wait 1 minutes" @@ -543,7 +574,7 @@ test_9() { #Create files echo "Verify Lustre filesystem is up and running" - $PDSH $LIVE_CLIENT df $MOUNT || return 3 + client_up $LIVE_CLIENT || return 3 client_touch testfile || return 4 #CLIENT Portion @@ -552,14 +583,14 @@ test_9() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" || return 5 - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" || return 6 + clients_up + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 5 + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 6 #Reintegration echo "Reintegrating CLIENTs/CLIENTs" reintegrate_clients || return 7 - client_df + clients_up #Sleep echo "Wait 1 minutes" @@ -568,14 +599,184 @@ test_9() { run_test 9 "Ninth Failure Mode: CLIENT/CLIENT `date`" ################################################### +############### Tenth Failure Mode ############### test_10() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + + shutdown_facet mds1 + reboot_facet mds1 + + # prepare for MDS failover + change_active mds1 + reboot_facet mds1 + + clients_up & + DFPID=$! + sleep 5 + + shutdown_facet ost1 + + echo "Reintegrating OST" + reboot_facet ost1 + wait_for_facet ost1 + start_ost 1 || return 2 + + shutdown_facet mds2 + reboot_facet mds2 + + # prepare for MDS failover + change_active mds2 + reboot_facet mds2 + + wait_for_facet mds1 + start_mdt 1 || return $? + + wait_for_facet mds2 + start_mdt 2 || return $? + + #Check FS + wait $DFPID + clients_recover_osts ost1 + echo "Verify reintegration" + clients_up || return 1 +} +run_test 10 "Tenth Failure Mode: MDT0/OST/MDT1 `date`" +################################################### + +############### Seventh Failure Mode ############### +test_11() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1 + #Check FS + + echo "Test Lustre stability after MDS failover" + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + fail mds2 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test +} +run_test 11 "Eleventh Failure Mode: MDS0/CLIENT/MDS1 `date`" +################################################### + +test_12() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1,mds2 + clients_up + + #OSS Portion + fail ost1,ost2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test +} +run_test 12 "Twelve Failure Mode: MDS0,MDS1/OST0, OST1/CLIENTS `date`" +################################################### + +test_13() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1,mds2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test + + #OSS Portion + fail ost1,ost2 + clients_up || return 4 +} +run_test 13 "Thirteen Failure Mode: MDS0,MDS1/CLIENTS/OST0,OST1 `date`" +################################################### + +test_14() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #OST Portion + fail ost1,ost2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test + + #OSS Portion + fail mds1,mds2 + clients_up || return 4 +} +run_test 14 "Fourteen Failure Mode: OST0,OST1/CLIENTS/MDS0,MDS1 `date`" +################################################### + +test_15() { #Run availability after all failures DURATION=${DURATION:-$((2 * 60 * 60))} # 6 hours default LOADTEST=${LOADTEST:-metadata-load.py} $PWD/availability.sh $CONFIG $DURATION $CLIENTS || return 1 } -run_test 10 "Running Availability for 6 hours..." +run_test 15 "Running Availability for 6 hours..." -equals_msg `basename $0`: test complete, cleaning up +complete $SECONDS check_and_cleanup_lustre -[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true +exit_status