X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Finsanity.sh;h=37f2f66ad6a792e110e64fe21fe8632ec1fcf2d0;hb=a05c6e3f5598adaa68f7ac52bf489354953b7c2c;hp=74ca4a24c360eeefc26f8891ea4e9788ecf1a2b5;hpb=dee5f24114531ec34fc56ce2826ada9e5690aabc;p=fs%2Flustre-release.git diff --git a/lustre/tests/insanity.sh b/lustre/tests/insanity.sh index 74ca4a2..37f2f66 100755 --- a/lustre/tests/insanity.sh +++ b/lustre/tests/insanity.sh @@ -14,12 +14,11 @@ init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} init_logging # -ALWAYS_EXCEPT="10 $INSANITY_EXCEPT" +ALWAYS_EXCEPT="15 $INSANITY_EXCEPT" if [ "$FAILURE_MODE" = "HARD" ]; then skip_env "$TESTSUITE: is not functional with FAILURE_MODE = HARD, " \ "please use recovery-double-scale, bz20407" - exit 0 fi [ "$SLOW" = "no" ] && EXCEPT_SLOW="" @@ -33,7 +32,7 @@ SINGLECLIENT=${SINGLECLIENT:-$HOSTNAME} LIVE_CLIENT=${LIVE_CLIENT:-$SINGLECLIENT} FAIL_CLIENTS=${FAIL_CLIENTS:-$RCLIENTS} -assert_env mds_HOST +assert_env mds_HOST MDSCOUNT assert_env ost_HOST OSTCOUNT assert_env LIVE_CLIENT FSNAME @@ -109,6 +108,10 @@ start_ost() { start ost$1 `ostdevname $1` $OST_MOUNT_OPTS } +start_mdt() { + start mds$1 $(mdsdevname $1) $MDS_MOUNT_OPTS +} + trap exit INT client_touch() { @@ -154,7 +157,9 @@ mkdir -p $TESTDIR echo "Starting Test 17 at `date`" test_0() { - fail $SINGLEMDS + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done for i in $(seq $OSTCOUNT) ; do fail ost$i @@ -165,8 +170,35 @@ run_test 0 "Fail all nodes, independently" ############### First Failure Mode ############### test_1() { -echo "Don't do a MDS - MDS Failure Case" -echo "This makes no sense" + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + + clients_up + + shutdown_facet mds1 + reboot_facet mds1 + + # prepare for MDS failover + change_active mds1 + reboot_facet mds1 + + clients_up & + DFPID=$! + sleep 5 + + shutdown_facet mds2 + + echo "Reintegrating MDS2" + reboot_facet mds2 + wait_for_facet mds2 + start_mdt 2 || return 2 + + wait_for_facet mds1 + start_mdt 1 || return $? + + #Check FS + wait $DFPID + echo "Verify reintegration" + clients_up || return 1 } run_test 1 "MDS/MDS failure" ################################################### @@ -176,17 +208,16 @@ test_2() { echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - [ "$(facet_fstype ost1)" = "zfs" ] && - skip "LU-2059: no local config for ZFS OSTs" && return - - clients_up + clients_up - shutdown_facet $SINGLEMDS - reboot_facet $SINGLEMDS + for i in $(seq $MDSCOUNT) ; do + shutdown_facet mds$i + reboot_facet mds$i - # prepare for MDS failover - change_active $SINGLEMDS - reboot_facet $SINGLEMDS + # prepare for MDS failover + change_active mds$i + reboot_facet mds$i + done clients_up & DFPID=$! @@ -199,8 +230,10 @@ test_2() { wait_for_facet ost1 start_ost 1 || return 2 - wait_for_facet $SINGLEMDS - start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS || return $? + for i in $(seq $MDSCOUNT) ; do + wait_for_facet mds$i + start_mdt $i || return $? + done #Check FS wait $DFPID @@ -212,15 +245,16 @@ test_2() { run_test 2 "Second Failure Mode: MDS/OST `date`" ################################################### - ############### Third Failure Mode ############### test_3() { #Create files echo "Verify Lustre filesystem is up and running" [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" - + #MDS Portion - fail $SINGLEMDS + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done #Check FS echo "Test Lustre stability after MDS failover" @@ -248,25 +282,23 @@ run_test 3 "Thirdb Failure Mode: MDS/CLIENT `date`" test_4() { echo "Fourth Failure Mode: OST/MDS `date`" - [ "$(facet_fstype ost1)" = "zfs" ] && - skip "LU-2059: no local config for ZFS OSTs" && return - #OST Portion shutdown_facet ost1 - + #Check FS echo "Test Lustre stability after OST failure" clients_up & DFPIDA=$! sleep 5 - #MDS Portion - shutdown_facet $SINGLEMDS - reboot_facet $SINGLEMDS + for i in $(seq $MDSCOUNT) ; do + shutdown_facet mds$i + reboot_facet mds$i - # prepare for MDS failover - change_active $SINGLEMDS - reboot_facet $SINGLEMDS + # prepare for MDS failover + change_active mds$i + reboot_facet mds$i + done clients_up & DFPIDB=$! @@ -278,10 +310,12 @@ test_4() { wait_for_facet ost1 start_ost 1 - wait_for_facet $SINGLEMDS - start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS + for i in $(seq $MDSCOUNT) ; do + wait_for_facet mds$i + start_mdt $i || return $? + done #Check FS - + wait $DFPIDA wait $DFPIDB clients_recover_osts ost1 @@ -293,26 +327,25 @@ run_test 4 "Fourth Failure Mode: OST/MDS `date`" ############### Fifth Failure Mode ############### test_5() { - [ $OSTCOUNT -lt 2 ] && skip_env "$OSTCOUNT < 2, not enough OSTs" && return 0 + [ $OSTCOUNT -lt 2 ] && skip_env "needs >= 2 OSTs" - echo "Fifth Failure Mode: OST/OST `date`" + echo "Fifth Failure Mode: OST/OST `date`" - #Create files - echo "Verify Lustre filesystem is up and running" - [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + #Create files + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + clients_up + #OST Portion + shutdown_facet ost1 + reboot_facet ost1 - clients_up - - #OST Portion - shutdown_facet ost1 - reboot_facet ost1 - #Check FS echo "Test Lustre stability after OST failure" clients_up & DFPIDA=$! sleep 5 - + #OST Portion shutdown_facet ost2 reboot_facet ost2 @@ -329,7 +362,7 @@ test_5() { start_ost 1 wait_for_facet ost2 start_ost 2 - + clients_recover_osts ost1 clients_recover_osts ost2 sleep $TIMEOUT @@ -426,7 +459,9 @@ test_7() { client_rm testfile #MDS Portion - fail $SINGLEMDS + for i in $(seq $MDSCOUNT) ; do + fail mds$i + done $PDSH $LIVE_CLIENT "ls -l $TESTDIR" $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" @@ -559,13 +594,183 @@ test_9() { run_test 9 "Ninth Failure Mode: CLIENT/CLIENT `date`" ################################################### +############### Tenth Failure Mode ############### test_10() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + + shutdown_facet mds1 + reboot_facet mds1 + + # prepare for MDS failover + change_active mds1 + reboot_facet mds1 + + clients_up & + DFPID=$! + sleep 5 + + shutdown_facet ost1 + + echo "Reintegrating OST" + reboot_facet ost1 + wait_for_facet ost1 + start_ost 1 || return 2 + + shutdown_facet mds2 + reboot_facet mds2 + + # prepare for MDS failover + change_active mds2 + reboot_facet mds2 + + wait_for_facet mds1 + start_mdt 1 || return $? + + wait_for_facet mds2 + start_mdt 2 || return $? + + #Check FS + wait $DFPID + clients_recover_osts ost1 + echo "Verify reintegration" + clients_up || return 1 +} +run_test 10 "Tenth Failure Mode: MDT0/OST/MDT1 `date`" +################################################### + +############### Seventh Failure Mode ############### +test_11() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1 + #Check FS + + echo "Test Lustre stability after MDS failover" + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + fail mds2 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test +} +run_test 11 "Eleventh Failure Mode: MDS0/CLIENT/MDS1 `date`" +################################################### + +test_12() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1,mds2 + clients_up + + #OSS Portion + fail ost1,ost2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test +} +run_test 12 "Twelve Failure Mode: MDS0,MDS1/OST0, OST1/CLIENTS `date`" +################################################### + +test_13() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #MDS Portion + fail mds1,mds2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test + + #OSS Portion + fail ost1,ost2 + clients_up || return 4 +} +run_test 13 "Thirteen Failure Mode: MDS0,MDS1/CLIENTS/OST0,OST1 `date`" +################################################### + +test_14() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" + echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + #OST Portion + fail ost1,ost2 + clients_up + + #CLIENT Portion + echo "Failing 2 CLIENTS" + fail_clients 2 + + #Check FS + echo "Test Lustre stability after CLIENT failure" + clients_up + + #Reintegration + echo "Reintegrating CLIENTS" + reintegrate_clients || return 1 + + clients_up || return 3 + sleep 2 # give it a little time for fully recovered before next test + + #OSS Portion + fail mds1,mds2 + clients_up || return 4 +} +run_test 14 "Fourteen Failure Mode: OST0,OST1/CLIENTS/MDS0,MDS1 `date`" +################################################### + +test_15() { #Run availability after all failures DURATION=${DURATION:-$((2 * 60 * 60))} # 6 hours default LOADTEST=${LOADTEST:-metadata-load.py} $PWD/availability.sh $CONFIG $DURATION $CLIENTS || return 1 } -run_test 10 "Running Availability for 6 hours..." +run_test 15 "Running Availability for 6 hours..." complete $SECONDS check_and_cleanup_lustre