X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Finsanity.sh;h=690322c0eafe2a77f12f2ac74060522c959a7370;hb=2410a014a1895cf90a00f6d54ec600bf7d42134a;hp=86c46ac336823f0cf11459d35a9872e830e497f5;hpb=113303973ec9f8484eb2355a1a6ef3c4c7fd6a56;p=fs%2Flustre-release.git diff --git a/lustre/tests/insanity.sh b/lustre/tests/insanity.sh index 86c46ac..690322c 100755 --- a/lustre/tests/insanity.sh +++ b/lustre/tests/insanity.sh @@ -8,24 +8,41 @@ LUSTRE=${LUSTRE:-`dirname $0`/..} init_test_env $@ -. ${CONFIG:=$LUSTRE/tests/cfg/insanity-local.sh} +. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} +# +ALWAYS_EXCEPT="10 $INSANITY_EXCEPT" + +if [ "$FAILURE_MODE" = "HARD" ]; then + mixed_ost_devs && CONFIG_EXCEPTIONS="0 2 4 5 6 8" && \ + echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. " && \ + echo "Except the tests: $CONFIG_EXCEPTIONS" && \ + ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS" +fi -ALWAYS_EXCEPT="10 $INSANITY_EXCEPT" +# +[ "$SLOW" = "no" ] && EXCEPT_SLOW="" -SETUP=${SETUP:-"setup"} -CLEANUP=${CLEANUP:-"cleanup"} +SETUP=${SETUP:-""} +CLEANUP=${CLEANUP:-""} build_test_filter -assert_env mds_HOST MDS_MKFS_OPTS MDSDEV +SINGLECLIENT=${SINGLECLIENT:-$HOSTNAME} +LIVE_CLIENT=${LIVE_CLIENT:-$SINGLECLIENT} +FAIL_CLIENTS=${FAIL_CLIENTS:-$RCLIENTS} + +assert_env mds_HOST MDS_MKFS_OPTS assert_env ost_HOST OST_MKFS_OPTS OSTCOUNT assert_env LIVE_CLIENT FSNAME +remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0 +remote_ost_nodsh && skip "remote OST with nodsh" && exit 0 -# This can be a regexp, to allow more clients -CLIENTS=${CLIENTS:-"`comma_list $LIVE_CLIENT $FAIL_CLIENTS $EXTRA_CLIENTS`"} +# FAIL_CLIENTS list should not contain the LIVE_CLIENT +FAIL_CLIENTS=$(echo " $FAIL_CLIENTS " | sed -re "s/\s+$LIVE_CLIENT\s+/ /g") DIR=${DIR:-$MOUNT} +TESTDIR=$DIR/d0.$(basename $0 .sh) ##### # fail clients round robin @@ -44,33 +61,16 @@ set_fail_client() { echo "fail $FAIL_CLIENT, next is $FAIL_NEXT" } -shutdown_client() { - client=$1 - if [ "$FAILURE_MODE" = HARD ]; then - $POWER_DOWN $client - while ping -w 3 -c 1 $client > /dev/null 2>&1; do - echo "waiting for node $client to fail" - sleep 1 - done - elif [ "$FAILURE_MODE" = SOFT ]; then - zconf_umount $client $MOUNT -f - fi -} - -reboot_node() { - NODE=$1 - if [ "$FAILURE_MODE" = HARD ]; then - $POWER_UP $NODE - fi -} - fail_clients() { num=$1 + + log "Request clients to fail: ${num}. Num of clients to fail: ${FAIL_NUM}, already failed: $DOWN_NUM" if [ -z "$num" ] || [ "$num" -gt $((FAIL_NUM - DOWN_NUM)) ]; then num=$((FAIL_NUM - DOWN_NUM)) fi if [ -z "$num" ] || [ "$num" -le 0 ]; then + log "No clients failed!" return fi @@ -86,7 +86,7 @@ fail_clients() { echo "down clients: $DOWN_CLIENTS" for client in $DOWN_CLIENTS; do - reboot_node $client + boot_node $client done DOWN_NUM=`echo $DOWN_CLIENTS | wc -w` client_rmdirs @@ -106,51 +106,34 @@ start_ost() { start ost$1 `ostdevname $1` $OST_MOUNT_OPTS } -setup() { - cleanup - rm -rf logs/* - formatall - setupall - - while ! do_node $CLIENTS "ls -d $LUSTRE" > /dev/null; do sleep 5; done - grep " $MOUNT " /proc/mounts || zconf_mount $CLIENTS $MOUNT -} - -cleanup() { - zconf_umount $CLIENTS $MOUNT - cleanupall - cleanup_check -} - trap exit INT client_touch() { file=$1 for c in $LIVE_CLIENT $FAIL_CLIENTS; do if echo $DOWN_CLIENTS | grep -q $c; then continue; fi - $PDSH $c touch $MOUNT/${c}_$file || return 1 + $PDSH $c touch $TESTDIR/${c}_$file || return 1 done } client_rm() { file=$1 for c in $LIVE_CLIENT $FAIL_CLIENTS; do - $PDSH $c rm $MOUNT/${c}_$file + $PDSH $c rm $TESTDIR/${c}_$file done } client_mkdirs() { for c in $LIVE_CLIENT $FAIL_CLIENTS; do - echo "$c mkdir $MOUNT/$c" - $PDSH $c "mkdir $MOUNT/$c" - $PDSH $c "ls -l $MOUNT/$c" + echo "$c mkdir $TESTDIR/$c" + $PDSH $c "mkdir $TESTDIR/$c && ls -l $TESTDIR/$c" done } client_rmdirs() { for c in $LIVE_CLIENT $FAIL_CLIENTS; do - echo "rmdir $MOUNT/$c" - $PDSH $LIVE_CLIENT "rmdir $MOUNT/$c" + echo "rmdir $TESTDIR/$c" + $PDSH $LIVE_CLIENT "rmdir $TESTDIR/$c" done } @@ -159,37 +142,20 @@ clients_recover_osts() { # do_node $CLIENTS "$LCTL "'--device %OSC_`hostname`_'"${facet}_svc_MNT_client_facet recover" } -if [ "$ONLY" == "cleanup" ]; then - $CLEANUP - exit -fi - -if [ ! -z "$EVAL" ]; then - eval "$EVAL" - exit $? -fi - -$SETUP +check_and_setup_lustre -if [ "$ONLY" == "setup" ]; then - exit 0 -fi +rm -rf $TESTDIR +mkdir -p $TESTDIR # 9 Different Failure Modes Combinations echo "Starting Test 17 at `date`" test_0() { - facet_failover mds - echo "Waiting for df pid: $DFPID" - wait $DFPID || { echo "df returned $?" && return 1; } + fail $SINGLEMDS - facet_failover ost1 || return 4 - echo "Waiting for df pid: $DFPID" - wait $DFPID || { echo "df returned $?" && return 2; } - - facet_failover ost2 || return 5 - echo "Waiting for df pid: $DFPID" - wait $DFPID || { echo "df returned $?" && return 3; } + for i in $(seq $OSTCOUNT) ; do + fail ost$i + done return 0 } run_test 0 "Fail all nodes, independently" @@ -205,14 +171,16 @@ run_test 1 "MDS/MDS failure" ############### Second Failure Mode ############### test_2() { echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + client_df - shutdown_facet mds - reboot_facet mds + shutdown_facet $SINGLEMDS + reboot_facet $SINGLEMDS # prepare for MDS failover - change_active mds - reboot_facet mds + change_active $SINGLEMDS + reboot_facet $SINGLEMDS client_df & DFPID=$! @@ -225,8 +193,8 @@ test_2() { wait_for ost1 start_ost 1 || return 2 - wait_for mds - start mds $MDSDEV $MDS_MOUNT_OPTS || return $? + wait_for $SINGLEMDS + start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS || return $? #Check FS wait $DFPID @@ -243,10 +211,10 @@ run_test 2 "Second Failure Mode: MDS/OST `date`" test_3() { #Create files echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" #MDS Portion - facet_failover mds - wait $DFPID || echo df failed: $? + fail $SINGLEMDS #Check FS echo "Test Lustre stability after MDS failover" @@ -265,6 +233,7 @@ test_3() { reintegrate_clients || return 1 client_df || return 3 + sleep 2 # give it a little time for fully recovered before next test } run_test 3 "Thirdb Failure Mode: MDS/CLIENT `date`" ################################################### @@ -283,12 +252,12 @@ test_4() { sleep 5 #MDS Portion - shutdown_facet mds - reboot_facet mds + shutdown_facet $SINGLEMDS + reboot_facet $SINGLEMDS # prepare for MDS failover - change_active mds - reboot_facet mds + change_active $SINGLEMDS + reboot_facet $SINGLEMDS client_df & DFPIDB=$! @@ -300,8 +269,8 @@ test_4() { wait_for ost1 start_ost 1 - wait_for mds - start mds $MDSDEV $MDS_MOUNT_OPTS + wait_for $SINGLEMDS + start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS #Check FS wait $DFPIDA @@ -315,10 +284,14 @@ run_test 4 "Fourth Failure Mode: OST/MDS `date`" ############### Fifth Failure Mode ############### test_5() { + [ $OSTCOUNT -lt 2 ] && skip_env "$OSTCOUNT < 2, not enough OSTs" && return 0 + echo "Fifth Failure Mode: OST/OST `date`" #Create files echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + client_df #OST Portion @@ -365,7 +338,9 @@ test_6() { #Create files echo "Verify Lustre filesystem is up and running" - client_df || return 1 + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + + client_df client_touch testfile || return 2 #OST Portion @@ -376,6 +351,7 @@ test_6() { echo "Test Lustre stability after OST failure" client_df & DFPIDA=$! + echo DFPIDA=$DFPIDA sleep 5 #CLIENT Portion @@ -386,19 +362,23 @@ test_6() { echo "Test Lustre stability after CLIENTs failure" client_df & DFPIDB=$! + echo DFPIDB=$DFPIDB sleep 5 #Reintegration echo "Reintegrating OST/CLIENTs" wait_for ost1 start_ost 1 - reintegrate_clients + reintegrate_clients || return 1 sleep 5 + wait_remote_prog df $((TIMEOUT * 3 + 10)) wait $DFPIDA wait $DFPIDB + echo "Verifying mount" - client_df || return 3 + [ -z "$(mounted_lustre_filesystems)" ] && return 3 + client_df } run_test 6 "Sixth Failure Mode: OST/CLIENT `date`" ################################################### @@ -410,6 +390,8 @@ test_7() { #Create files echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + client_df client_touch testfile || return 1 @@ -420,8 +402,8 @@ test_7() { #Check FS echo "Test Lustre stability after CLIENTs failure" client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Sleep echo "Wait 1 minutes" @@ -429,22 +411,21 @@ test_7() { #Create files echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && return 2 + client_df client_rm testfile #MDS Portion - facet_failover mds + fail $SINGLEMDS - #Check FS - echo "Test Lustre stability after MDS failover" - wait $DFPID || echo "df on down clients fails " || return 1 - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Reintegration echo "Reintegrating CLIENTs" - reintegrate_clients - client_df || return 2 + reintegrate_clients || return 2 + client_df #Sleep echo "wait 1 minutes" @@ -460,6 +441,8 @@ test_8() { #Create files echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + client_df client_touch testfile @@ -470,8 +453,8 @@ test_8() { #Check FS echo "Test Lustre stability after CLIENTs failure" client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Sleep echo "Wait 1 minutes" @@ -479,6 +462,8 @@ test_8() { #Create files echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + client_df client_touch testfile @@ -493,12 +478,12 @@ test_8() { DFPID=$! sleep 5 #non-failout hangs forever here - #$PDSH $LIVE_CLIENT "ls -l $MOUNT" - #$PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" + #$PDSH $LIVE_CLIENT "ls -l $TESTDIR" + #$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" #Reintegration echo "Reintegrating CLIENTs/OST" - reintegrate_clients + reintegrate_clients || return 3 wait_for ost1 start_ost 1 wait $DFPID @@ -519,6 +504,8 @@ test_9() { #Create files echo "Verify Lustre filesystem is up and running" + [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running" + client_df client_touch testfile || return 1 @@ -529,8 +516,8 @@ test_9() { #Check FS echo "Test Lustre stability after CLIENTs failure" client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" || return 1 - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" || return 2 + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 1 + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 2 #Sleep echo "Wait 1 minutes" @@ -548,13 +535,13 @@ test_9() { #Check FS echo "Test Lustre stability after CLIENTs failure" client_df - $PDSH $LIVE_CLIENT "ls -l $MOUNT" || return 5 - $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile" || return 6 + $PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 5 + $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 6 #Reintegration echo "Reintegrating CLIENTs/CLIENTs" - reintegrate_clients - client_df || return 7 + reintegrate_clients || return 7 + client_df #Sleep echo "Wait 1 minutes" @@ -571,7 +558,6 @@ test_10() { } run_test 10 "Running Availability for 6 hours..." -equals_msg "Done, cleaning up" -$CLEANUP -echo "$0: completed" - +equals_msg `basename $0`: test complete, cleaning up +check_and_cleanup_lustre +[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true