X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Finsanity.sh;h=32701f94d25d77eca6304d602af53e2efa6f0c3b;hb=20dfdeda5ea3f679b68c4a9fb6924c90a0a79cac;hp=1838cea7907775d8611aa6f4609d6e15a4f2f3e9;hpb=c190866d0098b4c4d653168bcf3f7094b4ac7abd;p=fs%2Flustre-release.git diff --git a/lustre/tests/insanity.sh b/lustre/tests/insanity.sh index 1838cea..32701f9 100755 --- a/lustre/tests/insanity.sh +++ b/lustre/tests/insanity.sh @@ -1,4 +1,7 @@ -#!/bin/sh +#!/bin/bash +# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*- +# vim:shiftwidth=4:softtabstop=4:tabstop=4: +# # Test multiple failures, AKA Test 17 set -e @@ -8,20 +11,40 @@ LUSTRE=${LUSTRE:-`dirname $0`/..} init_test_env $@ -. ${CONFIG:=$LUSTRE/tests/cfg/insanity-local.sh} +. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} +init_logging +# +ALWAYS_EXCEPT="15 $INSANITY_EXCEPT" -ALWAYS_EXCEPT="10" +if [ "$FAILURE_MODE" = "HARD" ]; then + skip_env "$TESTSUITE: is not functional with FAILURE_MODE = HARD, " \ + "please use recovery-double-scale, bz20407" + exit 0 +fi + +[ "$SLOW" = "no" ] && EXCEPT_SLOW="" + +SETUP=${SETUP:-""} +CLEANUP=${CLEANUP:-""} build_test_filter -assert_env mds_HOST ost1_HOST ost2_HOST client_HOST LIVE_CLIENT +SINGLECLIENT=${SINGLECLIENT:-$HOSTNAME} +LIVE_CLIENT=${LIVE_CLIENT:-$SINGLECLIENT} +FAIL_CLIENTS=${FAIL_CLIENTS:-$RCLIENTS} + +assert_env mds_HOST MDSCOUNT +assert_env ost_HOST OSTCOUNT +assert_env LIVE_CLIENT FSNAME -# This can be a regexp, to allow more clients -CLIENTS=${CLIENTS:-"`comma_list $LIVE_CLIENT $FAIL_CLIENTS`"} +require_dsh_mds || exit 0 +require_dsh_ost || exit 0 -CLIENTLIST="$LIVE_CLIENT $FAIL_CLIENTS" +# FAIL_CLIENTS list should not contain the LIVE_CLIENT +FAIL_CLIENTS=$(echo " $FAIL_CLIENTS " | sed -re "s/\s+$LIVE_CLIENT\s+/ /g") DIR=${DIR:-$MOUNT} +TESTDIR=$DIR/d0.$TESTSUITE ##### # fail clients round robin @@ -30,211 +53,216 @@ DIR=${DIR:-$MOUNT} FAIL_LIST=($FAIL_CLIENTS) FAIL_NUM=${#FAIL_LIST[*]} FAIL_NEXT=0 +typeset -i FAIL_NEXT DOWN_NUM=0 # number of nodes currently down -# return next client to fail -fail_client() { - ret=${FAIL_LIST[$FAIL_NEXT]} +# set next client to fail +set_fail_client() { + FAIL_CLIENT=${FAIL_LIST[$FAIL_NEXT]} FAIL_NEXT=$(( (FAIL_NEXT+1) % FAIL_NUM )) - echo $ret -} - -shutdown_client() { - client=$1 - if [ "$FAILURE_MODE" = HARD ]; then - $POWER_DOWN $client - elif [ "$FAILURE_MODE" = SOFT ]; then - $PDSH $client $LCONF --clenaup --force --nomod $XMLCONFIG - fi -} - -reboot_node() { - NODE=$1 - if [ "$FAILURE_MODE" = HARD ]; then - $POWER_UP $NODE - fi + echo "fail $FAIL_CLIENT, next is $FAIL_NEXT" } fail_clients() { - num=$1 - if [ -z "$num" ] || [ "$num" -gt $((FAIL_NUM - DOWN_NUM)) ]; then - num=$((FAIL_NUM - DOWN_NUM)) - fi + num=$1 + + log "Request fail clients: $num, to fail: $FAIL_NUM, failed: $DOWN_NUM" + if [ -z "$num" ] || [ "$num" -gt $((FAIL_NUM - DOWN_NUM)) ]; then + num=$((FAIL_NUM - DOWN_NUM)) + fi if [ -z "$num" ] || [ "$num" -le 0 ]; then + log "No clients failed!" return fi + client_mkdirs + for i in `seq $num`; do - client=`fail_client` + set_fail_client + client=$FAIL_CLIENT DOWN_CLIENTS="$DOWN_CLIENTS $client" - client_mkdirs shutdown_client $client done - for client in $DOWN_CLIENTS; do - reboot_node $client - done - DOWN_NUM=`echo $DOWN_CLIENTS | wc -w` - $PDSH $LIVE_CLIENT "cd $MOUNT && rmdir $CLIENTLIST" + echo "down clients: $DOWN_CLIENTS" + + for client in $DOWN_CLIENTS; do + boot_node $client + done + DOWN_NUM=`echo $DOWN_CLIENTS | wc -w` + client_rmdirs } reintegrate_clients() { - for client in $DOWN_CLIENTS; do - wait_for_host $client - $PDSH $client "$LCONF --node client --select mds_svc=`facet_active mds` $CLIENTOPTS $XMLCONFIG" - done - DOWN_CLIENTS="" - DOWN_NUM=0 + for client in $DOWN_CLIENTS; do + wait_for_host $client + echo "Restarting $client" + zconf_mount $client $MOUNT || return 1 + done + + DOWN_CLIENTS="" + DOWN_NUM=0 } -gen_config() { - rm -f $XMLCONFIG - add_mds mds --dev $MDSDEV --size $MDSSIZE - - if [ ! -z "$mdsfailover_HOST" ]; then - add_mdsfailover mds --dev $MDSDEV --size $MDSSIZE - fi - - add_lov lov1 mds --stripe_sz $STRIPE_BYTES\ - --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 - add_ost ost1 --lov lov1 --dev $OSTDEV --size $OSTSIZE - add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE - add_client client mds --lov lov1 --path $MOUNT +start_ost() { + start ost$1 `ostdevname $1` $OST_MOUNT_OPTS } -setup() { - wait_for ost1 - start ost1 ${REFORMAT} $OSTLCONFARGS - wait_for ost2 - start ost2 ${REFORMAT} $OSTLCONFARGS - [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE - wait_for mds - start mds $MDSLCONFARGS ${REFORMAT} - while ! do_node $HOST "$CHECKSTAT -t dir $LUSTRE"; do sleep 5; done - do_node $CLIENTS lconf --node client_facet \ - --select mds_service=$ACTIVEMDS $XMLCONFIG +start_mdt() { + start mds$1 $(mdsdevname $1) $MDS_MOUNT_OPTS } -cleanup() { - # make sure we are using the primary MDS, so the config log will - # be able to clean up properly. - activemds=`facet_active mds` -# if [ $activemds != "mds" ]; then -# fail mds -# fi - for node in $CLIENTS; do - do_node $node lconf ${FORCE} --select mds_svc=${activemds}_facet --cleanup --node client_facet $XMLCONFIG || true - done +trap exit INT - stop mds ${FORCE} $MDSLCONFARGS - stop ost1 ${FORCE} - stop ost2 ${FORCE} --dump cleanup.log +client_touch() { + file=$1 + for c in $LIVE_CLIENT $FAIL_CLIENTS; do + echo $DOWN_CLIENTS | grep -q $c && continue + $PDSH $c touch $TESTDIR/${c}_$file || return 1 + done } -trap exit INT +client_rm() { + file=$1 + for c in $LIVE_CLIENT $FAIL_CLIENTS; do + $PDSH $c rm $TESTDIR/${c}_$file + done +} client_mkdirs() { - $PDSH $CLIENTS "mkdir $MOUNT/\`hostname\`; ls $MOUNT/\`hostname\` > /dev/null" + for c in $LIVE_CLIENT $FAIL_CLIENTS; do + echo "$c mkdir $TESTDIR/$c" + $PDSH $c "mkdir $TESTDIR/$c && ls -l $TESTDIR/$c" + done +} + +client_rmdirs() { + for c in $LIVE_CLIENT $FAIL_CLIENTS; do + echo "rmdir $TESTDIR/$c" + $PDSH $LIVE_CLIENT "rmdir $TESTDIR/$c" + done } clients_recover_osts() { facet=$1 - $PDSH $CLIENTS "$LCTL "'--device %OSC_`hostname`_'"${facet}_svc_MNT_client_facet recover" +# do_node $CLIENTS "$LCTL "'--device %OSC_`hostname`_'"${facet}_svc_MNT_client_facet recover" } -if [ "$ONLY" == "cleanup" ]; then - cleanup - exit -fi - -gen_config -setup +check_and_setup_lustre -if [ "$ONLY" == "setup" ]; then - exit 0 -fi +rm -rf $TESTDIR +mkdir -p $TESTDIR # 9 Different Failure Modes Combinations echo "Starting Test 17 at `date`" test_0() { - echo "Failover MDS" - facet_failover mds - wait $DFPID || return 1 + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done - echo "Failing OST1" - facet_failover ost1 - wait $DFPID || return 2 - - echo "Failing OST2" - facet_failover ost2 - wait $DFPID || return 3 + for i in $(seq $OSTCOUNT) ; do + fail ost$i + done return 0 } run_test 0 "Fail all nodes, independently" ############### First Failure Mode ############### test_1() { -echo "Don't do a MDS - MDS Failure Case" -echo "This makes no sense" -# FIXME every test makes sense + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + + clients_up + + shutdown_facet mds1 + reboot_facet mds1 + + # prepare for MDS failover + change_active mds1 + reboot_facet mds1 + + clients_up & + DFPID=$! + sleep 5 + + shutdown_facet mds2 + + echo "Reintegrating MDS2" + reboot_facet mds2 + wait_for_facet mds2 + start_mdt 2 || return 2 + + wait_for_facet mds1 + start_mdt 1 || return $? + + #Check FS + wait $DFPID + echo "Verify reintegration" + clients_up || return 1 } run_test 1 "MDS/MDS failure" ################################################### ############### Second Failure Mode ############### test_2() { - echo "Verify Lustre filesystem is up and running" - client_df + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + [ "$(facet_fstype ost1)" = "zfs" ] && + skip "LU-2059: no local config for ZFS OSTs" && return - echo "Failing MDS" - shutdown_facet mds - reboot_facet mds + clients_up - # prepare for MDS failover - change_active mds - reboot_facet mds + for i in $(seq $MDSCOUNT) ; do + shutdown_facet mds$i + reboot_facet mds$i - client_df & + # prepare for MDS failover + change_active mds$i + reboot_facet mds$i + done + + clients_up & DFPID=$! sleep 5 - echo "Failing OST" shutdown_facet ost1 echo "Reintegrating OST" reboot_facet ost1 - wait_for ost1 - start ost1 + wait_for_facet ost1 + start_ost 1 || return 2 - echo "Failover MDS" - wait_for mds - start mds + for i in $(seq $MDSCOUNT) ; do + wait_for_facet mds$i + start_mdt $i || return $? + done #Check FS wait $DFPID clients_recover_osts ost1 echo "Verify reintegration" - client_df || return 1 + clients_up || return 1 } run_test 2 "Second Failure Mode: MDS/OST `date`" ################################################### - ############### Third Failure Mode ############### test_3() { #Create files echo "Verify Lustre filesystem is up and running" - + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + #MDS Portion - facet_failover mds - wait $DFPID || echo df failed: $? + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done #Check FS echo "Test Lustre stability after MDS failover" - client_df + clients_up #CLIENT Portion echo "Failing 2 CLIENTS" @@ -242,98 +270,114 @@ test_3() { #Check FS echo "Test Lustre stability after CLIENT failure" - client_df + clients_up #Reintegration echo "Reintegrating CLIENTS" - reintegrate_clients + reintegrate_clients || return 1 - client_df || return 1 + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test } run_test 3 "Thirdb Failure Mode: MDS/CLIENT `date`" ################################################### ############### Fourth Failure Mode ############### test_4() { - echo "Fourth Failure Mode: OST/MDS `date`" + echo "Fourth Failure Mode: OST/MDS `date`" + + [ "$(facet_fstype ost1)" = "zfs" ] && + skip "LU-2059: no local config for ZFS OSTs" && return #OST Portion - echo "Failing OST ost1" shutdown_facet ost1 - + #Check FS echo "Test Lustre stability after OST failure" - client_df + clients_up & + DFPIDA=$! + sleep 5 - #MDS Portion - echo "Failing MDS" - shutdown_facet mds - reboot_facet mds + for i in $(seq $MDSCOUNT) ; do + shutdown_facet mds$i + reboot_facet mds$i - # prepare for MDS failover - change_active mds - reboot_facet mds + # prepare for MDS failover + change_active mds$i + reboot_facet mds$i + done - client_df & - DFPID=$! + clients_up & + DFPIDB=$! sleep 5 #Reintegration echo "Reintegrating OST" reboot_facet ost1 - wait_for ost1 - start ost1 - - echo "Failover MDS" - wait_for mds - start mds + wait_for_facet ost1 + start_ost 1 + + for i in $(seq $MDSCOUNT) ; do + wait_for_facet mds$i + start_mdt $i || return $? + done #Check FS - - wait $DFPID + + wait $DFPIDA + wait $DFPIDB clients_recover_osts ost1 echo "Test Lustre stability after MDS failover" - client_df || return 1 + clients_up || return 1 } run_test 4 "Fourth Failure Mode: OST/MDS `date`" ################################################### ############### Fifth Failure Mode ############### test_5() { + [ $OSTCOUNT -lt 2 ] && skip_env "$OSTCOUNT < 2, not enough OSTs" && return 0 + echo "Fifth Failure Mode: OST/OST `date`" #Create files echo "Verify Lustre filesystem is up and running" - client_df + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + clients_up #OST Portion - echo "Failing OST" shutdown_facet ost1 reboot_facet ost1 #Check FS echo "Test Lustre stability after OST failure" - client_df + clients_up & + DFPIDA=$! + sleep 5 #OST Portion - echo "Failing OST" shutdown_facet ost2 reboot_facet ost2 #Check FS echo "Test Lustre stability after OST failure" - client_df + clients_up & + DFPIDB=$! + sleep 5 #Reintegration echo "Reintegrating OSTs" - wait_for ost1 - wait_for ost1 - start ost1 - start ost2 + wait_for_facet ost1 + start_ost 1 + wait_for_facet ost2 + start_ost 2 clients_recover_osts ost1 clients_recover_osts ost2 - sleep 5 - client_df || return 1 + sleep $TIMEOUT + + wait $DFPIDA + wait $DFPIDB + clients_up || return 2 } run_test 5 "Fifth Failure Mode: OST/OST `date`" ################################################### @@ -344,17 +388,21 @@ test_6() { #Create files echo "Verify Lustre filesystem is up and running" - client_df || return 1 - $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile" || return 2 + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + clients_up + client_touch testfile || return 2 #OST Portion - echo "Failing OST" shutdown_facet ost1 reboot_facet ost1 #Check FS echo "Test Lustre stability after OST failure" - client_df + clients_up & + DFPIDA=$! + echo DFPIDA=$DFPIDA + sleep 5 #CLIENT Portion echo "Failing CLIENTs" @@ -362,17 +410,25 @@ test_6() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df + clients_up & + DFPIDB=$! + echo DFPIDB=$DFPIDB + sleep 5 #Reintegration echo "Reintegrating OST/CLIENTs" - wait_for ost1 - start ost1 - reintegrate_clients + wait_for_facet ost1 + start_ost 1 + reintegrate_clients || return 1 sleep 5 + wait_remote_prog "stat -f" $((TIMEOUT * 3 + 20)) + wait $DFPIDA + wait $DFPIDB + echo "Verifying mount" - client_df || return 3 + [ -z "$(mounted_lustre_filesystems)" ] && return 3 + clients_up } run_test 6 "Sixth Failure Mode: OST/CLIENT `date`" ################################################### @@ -384,8 +440,10 @@ test_7() { #Create files echo "Verify Lustre filesystem is up and running" - client_df - $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + clients_up + client_touch testfile || return 1 #CLIENT Portion echo "Part 1: Failing CLIENT" @@ -393,9 +451,9 @@ test_7() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + clients_up + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Sleep echo "Wait 1 minutes" @@ -403,23 +461,23 @@ test_7() { #Create files echo "Verify Lustre filesystem is up and running" - client_df - $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile" + [ -z "$(mounted_lustre_filesystems)" ] && return 2 + + clients_up + client_rm testfile #MDS Portion - echo "Failing MDS" - facet_failover mds + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done - #Check FS - echo "Test Lustre stability after MDS failover" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Reintegration echo "Reintegrating CLIENTs" - reintegrate_clients - client_df || return 1 + reintegrate_clients || return 2 + clients_up #Sleep echo "wait 1 minutes" @@ -435,8 +493,10 @@ test_8() { #Create files echo "Verify Lustre filesystem is up and running" - client_df - $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + clients_up + client_touch testfile #CLIENT Portion echo "Failing CLIENTs" @@ -444,9 +504,9 @@ test_8() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + clients_up + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Sleep echo "Wait 1 minutes" @@ -454,26 +514,33 @@ test_8() { #Create files echo "Verify Lustre filesystem is up and running" - client_df - $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + clients_up + client_touch testfile + #OST Portion - echo "Failing OST" shutdown_facet ost1 reboot_facet ost1 #Check FS echo "Test Lustre stability after OST failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + clients_up & + DFPID=$! + sleep 5 + #non-failout hangs forever here + #$PDSH $LIVE_CLIENT "ls -l $TESTDIR" + #$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Reintegration echo "Reintegrating CLIENTs/OST" - reintegrate_clients - start ost1 - client_df || return 1 - $PDSH $CLIENTS "/bin/touch $MOUNT/CLIENT_OST_2\`hostname\`_testfile" || return 2 + reintegrate_clients || return 3 + wait_for_facet ost1 + start_ost 1 + wait $DFPID + clients_up || return 1 + client_touch testfile2 || return 2 #Sleep echo "Wait 1 minutes" @@ -489,8 +556,10 @@ test_9() { #Create files echo "Verify Lustre filesystem is up and running" - client_df - $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + clients_up + client_touch testfile || return 1 #CLIENT Portion echo "Failing CLIENTs" @@ -498,9 +567,9 @@ test_9() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" || return 1 - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" || return 2 + clients_up + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 1 + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 2 #Sleep echo "Wait 1 minutes" @@ -508,8 +577,8 @@ test_9() { #Create files echo "Verify Lustre filesystem is up and running" - client_df || return 3 - $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile" || return 4 + client_up $LIVE_CLIENT || return 3 + client_touch testfile || return 4 #CLIENT Portion echo "Failing CLIENTs" @@ -517,14 +586,14 @@ test_9() { #Check FS echo "Test Lustre stability after CLIENTs failure" - client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" || return 5 - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" || return 6 + clients_up + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 5 + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 6 #Reintegration echo "Reintegrating CLIENTs/CLIENTs" - reintegrate_clients - client_df || return 7 + reintegrate_clients || return 7 + clients_up #Sleep echo "Wait 1 minutes" @@ -533,11 +602,184 @@ test_9() { run_test 9 "Ninth Failure Mode: CLIENT/CLIENT `date`" ################################################### +############### Tenth Failure Mode ############### test_10() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + + shutdown_facet mds1 + reboot_facet mds1 + + # prepare for MDS failover + change_active mds1 + reboot_facet mds1 + + clients_up & + DFPID=$! + sleep 5 + + shutdown_facet ost1 + + echo "Reintegrating OST" + reboot_facet ost1 + wait_for_facet ost1 + start_ost 1 || return 2 + + shutdown_facet mds2 + reboot_facet mds2 + + # prepare for MDS failover + change_active mds2 + reboot_facet mds2 + + wait_for_facet mds1 + start_mdt 1 || return $? + + wait_for_facet mds2 + start_mdt 2 || return $? + + #Check FS + wait $DFPID + clients_recover_osts ost1 + echo "Verify reintegration" + clients_up || return 1 +} +run_test 10 "Tenth Failure Mode: MDT0/OST/MDT1 `date`" +################################################### + +############### Seventh Failure Mode ############### +test_11() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1 + #Check FS + + echo "Test Lustre stability after MDS failover" + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + fail mds2 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test +} +run_test 11 "Eleventh Failure Mode: MDS0/CLIENT/MDS1 `date`" +################################################### + +test_12() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1,mds2 + clients_up + + #OSS Portion + fail ost1,ost2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test +} +run_test 12 "Twelve Failure Mode: MDS0,MDS1/OST0, OST1/CLIENTS `date`" +################################################### + +test_13() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1,mds2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test + + #OSS Portion + fail ost1,ost2 + clients_up || return 4 +} +run_test 13 "Thirteen Failure Mode: MDS0,MDS1/CLIENTS/OST0,OST1 `date`" +################################################### + +test_14() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #OST Portion + fail ost1,ost2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test + + #OSS Portion + fail mds1,mds2 + clients_up || return 4 +} +run_test 14 "Fourteen Failure Mode: OST0,OST1/CLIENTS/MDS0,MDS1 `date`" +################################################### + +test_15() { #Run availability after all failures - ./availability.sh 21600 + DURATION=${DURATION:-$((2 * 60 * 60))} # 6 hours default + LOADTEST=${LOADTEST:-metadata-load.py} + $PWD/availability.sh $CONFIG $DURATION $CLIENTS || return 1 } -run_test 10 "Running Availability for 6 hours..." +run_test 15 "Running Availability for 6 hours..." -equals_msg "Done, cleaning up" -cleanup +complete $SECONDS +check_and_cleanup_lustre +exit_status