init_test_env $@
-. ${CONFIG:=$LUSTRE/tests/cfg/insanity-local.sh}
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+#
+ALWAYS_EXCEPT="10 $INSANITY_EXCEPT"
+
+if [ "$FAILURE_MODE" = "HARD" ]; then
+ mixed_ost_devs && CONFIG_EXCEPTIONS="0 2 4 5 6 8" && \
+ echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. " && \
+ echo "Except the tests: $CONFIG_EXCEPTIONS" && \
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
+fi
+
+#
+[ "$SLOW" = "no" ] && EXCEPT_SLOW=""
-ALWAYS_EXCEPT=""
+SETUP=${SETUP:-""}
+CLEANUP=${CLEANUP:-""}
build_test_filter
-assert_env mds_HOST ost1_HOST ost2_HOST client_HOST LIVE_CLIENT
+SINGLECLIENT=${SINGLECLIENT:-$HOSTNAME}
+LIVE_CLIENT=${LIVE_CLIENT:-$SINGLECLIENT}
+FAIL_CLIENTS=${FAIL_CLIENTS:-$RCLIENTS}
+
+assert_env mds_HOST MDS_MKFS_OPTS
+assert_env ost_HOST OST_MKFS_OPTS OSTCOUNT
+assert_env LIVE_CLIENT FSNAME
-# This can be a regexp, to allow more clients
-CLIENTS=${CLIENTS:-"`comma_list $LIVE_CLIENT $FAIL_CLIENTS`"}
+remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
+remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
-CLIENTLIST="$LIVE_CLIENT $FAIL_CLIENTS"
+# FAIL_CLIENTS list should not contain the LIVE_CLIENT
+FAIL_CLIENTS=$(echo " $FAIL_CLIENTS " | sed -re "s/\s+$LIVE_CLIENT\s+/ /g")
DIR=${DIR:-$MOUNT}
+TESTDIR=$DIR/d0.$(basename $0 .sh)
#####
# fail clients round robin
FAIL_LIST=($FAIL_CLIENTS)
FAIL_NUM=${#FAIL_LIST[*]}
FAIL_NEXT=0
+typeset -i FAIL_NEXT
DOWN_NUM=0 # number of nodes currently down
-# return next client to fail
-fail_client() {
- ret=${FAIL_LIST[$FAIL_NEXT]}
+# set next client to fail
+set_fail_client() {
+ FAIL_CLIENT=${FAIL_LIST[$FAIL_NEXT]}
FAIL_NEXT=$(( (FAIL_NEXT+1) % FAIL_NUM ))
- echo $ret
-}
-
-shutdown_client() {
- client=$1
- if [ "$FAILURE_MODE" = HARD ]; then
- $POWER_DOWN $client
- elif [ "$FAILURE_MODE" = SOFT ]; then
- $PDSH $client $LCONF --clenaup --force --nomod $XMLCONFIG
- fi
-}
-
-reboot_node() {
- NODE=$1
- if [ "$FAILURE_MODE" = HARD ]; then
- $POWER_UP $NODE
- fi
+ echo "fail $FAIL_CLIENT, next is $FAIL_NEXT"
}
fail_clients() {
num=$1
- if [ -z "$num" -o $num -gt $((FAIL_NUM - DOWN_NUM)) ]; then
+
+ log "Request clients to fail: ${num}. Num of clients to fail: ${FAIL_NUM}, already failed: $DOWN_NUM"
+ if [ -z "$num" ] || [ "$num" -gt $((FAIL_NUM - DOWN_NUM)) ]; then
num=$((FAIL_NUM - DOWN_NUM))
fi
- if [ -z "$num" -o $num -le 0 ]; then
+ if [ -z "$num" ] || [ "$num" -le 0 ]; then
+ log "No clients failed!"
return
fi
+ client_mkdirs
+
for i in `seq $num`; do
- client=`fail_client`
+ set_fail_client
+ client=$FAIL_CLIENT
DOWN_CLIENTS="$DOWN_CLIENTS $client"
- client_mkdirs
shutdown_client $client
done
+ echo "down clients: $DOWN_CLIENTS"
+
for client in $DOWN_CLIENTS; do
- reboot_node $client
+ boot_node $client
done
DOWN_NUM=`echo $DOWN_CLIENTS | wc -w`
- $PDSH $LIVE_CLIENT "cd $MOUNT && rmdir $CLIENTLIST"
+ client_rmdirs
}
reintegrate_clients() {
for client in $DOWN_CLIENTS; do
wait_for_host $client
- $PDSH $client "$LCONF --node client --select mds_svc=`facet_active mds` $CLIENTOPTS $XMLCONFIG"
+ echo "Restarting $client"
+ zconf_mount $client $MOUNT || return 1
done
DOWN_CLIENTS=""
DOWN_NUM=0
}
-gen_config() {
- rm -f $XMLCONFIG
- add_mds mds --dev $MDSDEV --size $MDSSIZE
+start_ost() {
+ start ost$1 `ostdevname $1` $OST_MOUNT_OPTS
+}
- if [ ! -z "$mdsfailover_HOST" ]; then
- add_mdsfailover mds --dev $MDSDEV --size $MDSSIZE
- fi
+trap exit INT
- add_lov lov1 mds --stripe_sz $STRIPE_BYTES\
- --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
- add_ost ost1 --lov lov1 --dev $OSTDEV --size $OSTSIZE
- add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
- add_client client mds --lov lov1 --path $MOUNT
+client_touch() {
+ file=$1
+ for c in $LIVE_CLIENT $FAIL_CLIENTS; do
+ if echo $DOWN_CLIENTS | grep -q $c; then continue; fi
+ $PDSH $c touch $TESTDIR/${c}_$file || return 1
+ done
}
-setup() {
- wait_for ost1
- start ost1 ${REFORMAT} $OSTLCONFARGS
- wait_for ost2
- start ost2 ${REFORMAT} $OSTLCONFARGS
- [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
- wait_for mds
- start mds $MDSLCONFARGS ${REFORMAT}
- while ! $PDSH $HOST "ls -ld $LUSTRE"; do sleep 5; done
- do_node $CLIENTS lconf --node client_facet \
- --select mds_service=$ACTIVEMDS $XMLCONFIG
+client_rm() {
+ file=$1
+ for c in $LIVE_CLIENT $FAIL_CLIENTS; do
+ $PDSH $c rm $TESTDIR/${c}_$file
+ done
}
-cleanup() {
- # make sure we are using the primary MDS, so the config log will
- # be able to clean up properly.
- activemds=`facet_active mds`
-# if [ $activemds != "mds" ]; then
-# fail mds
-# fi
- for node in $CLIENTS; do
- do_node $node lconf ${FORCE} --select mds_svc=${activemds}_facet --cleanup --node client_facet $XMLCONFIG || true
+client_mkdirs() {
+ for c in $LIVE_CLIENT $FAIL_CLIENTS; do
+ echo "$c mkdir $TESTDIR/$c"
+ $PDSH $c "mkdir $TESTDIR/$c && ls -l $TESTDIR/$c"
done
-
- stop mds ${FORCE} $MDSLCONFARGS
- stop ost1 ${FORCE}
- stop ost2 ${FORCE} --dump cleanup.log
}
-trap exit INT
-
-client_mkdirs() {
- $PDSH $CLIENTS "mkdir $MOUNT/\`hostname\`; ls $MOUNT/\`hostname\` > /dev/null"
+client_rmdirs() {
+ for c in $LIVE_CLIENT $FAIL_CLIENTS; do
+ echo "rmdir $TESTDIR/$c"
+ $PDSH $LIVE_CLIENT "rmdir $TESTDIR/$c"
+ done
}
clients_recover_osts() {
facet=$1
- $PDSH $CLIENTS "$LCTL "'--device %OSC_`hostname`_OST_'"${facet}_svc_MNT_client recover"
+# do_node $CLIENTS "$LCTL "'--device %OSC_`hostname`_'"${facet}_svc_MNT_client_facet recover"
}
-if [ "$ONLY" == "cleanup" ]; then
- cleanup
- exit
-fi
-
-gen_config
-setup
+check_and_setup_lustre
-if [ "$ONLY" == "setup" ]; then
- exit 0
-fi
+rm -rf $TESTDIR
+mkdir -p $TESTDIR
# 9 Different Failure Modes Combinations
echo "Starting Test 17 at `date`"
test_0() {
- echo "Failover MDS"
- facet_failover mds
- wait $DFPID || return 1
+ fail $SINGLEMDS
- echo "Failing OST1"
- facet_failover ost1
- wait $DFPID || return 2
-
- echo "Failing OST2"
- facet_failover ost2
- wait $DFPID || return 3
+ for i in $(seq $OSTCOUNT) ; do
+ fail ost$i
+ done
return 0
}
run_test 0 "Fail all nodes, independently"
test_1() {
echo "Don't do a MDS - MDS Failure Case"
echo "This makes no sense"
-# FIXME every test makes sense
}
run_test 1 "MDS/MDS failure"
###################################################
############### Second Failure Mode ###############
test_2() {
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
client_df
- echo "Failing MDS"
- shutdown_facet mds
- reboot_facet mds
+ shutdown_facet $SINGLEMDS
+ reboot_facet $SINGLEMDS
# prepare for MDS failover
- change_active mds
- reboot_facet mds
+ change_active $SINGLEMDS
+ reboot_facet $SINGLEMDS
client_df &
DFPID=$!
sleep 5
- echo "Failing OST"
shutdown_facet ost1
echo "Reintegrating OST"
reboot_facet ost1
wait_for ost1
- start ost1
+ start_ost 1 || return 2
- echo "Failover MDS"
- wait_for mds
- start mds
+ wait_for $SINGLEMDS
+ start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS || return $?
#Check FS
wait $DFPID
clients_recover_osts ost1
echo "Verify reintegration"
- client_df
+ client_df || return 1
}
run_test 2 "Second Failure Mode: MDS/OST `date`"
test_3() {
#Create files
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
#MDS Portion
- facet_failover mds
- wait $DFPID || echo df failed: $?
+ fail $SINGLEMDS
#Check FS
echo "Test Lustre stability after MDS failover"
#Reintegration
echo "Reintegrating CLIENTS"
- reintegrate_clients
+ reintegrate_clients || return 1
- client_df
+ client_df || return 3
+ sleep 2 # give it a little time for fully recovered before next test
}
run_test 3 "Thirdb Failure Mode: MDS/CLIENT `date`"
###################################################
echo "Fourth Failure Mode: OST/MDS `date`"
#OST Portion
- echo "Failing OST ost1"
shutdown_facet ost1
#Check FS
echo "Test Lustre stability after OST failure"
- client_df
+ client_df &
+ DFPIDA=$!
+ sleep 5
#MDS Portion
- echo "Failing MDS"
- shutdown_facet mds
- reboot_facet mds
+ shutdown_facet $SINGLEMDS
+ reboot_facet $SINGLEMDS
# prepare for MDS failover
- change_active mds
- reboot_facet mds
+ change_active $SINGLEMDS
+ reboot_facet $SINGLEMDS
client_df &
- DFPID=$!
+ DFPIDB=$!
sleep 5
#Reintegration
echo "Reintegrating OST"
reboot_facet ost1
wait_for ost1
- start ost1
+ start_ost 1
- echo "Failover MDS"
- wait_for mds
- start mds
+ wait_for $SINGLEMDS
+ start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS
#Check FS
- wait $DFPID
+ wait $DFPIDA
+ wait $DFPIDB
clients_recover_osts ost1
echo "Test Lustre stability after MDS failover"
- client_df
+ client_df || return 1
}
run_test 4 "Fourth Failure Mode: OST/MDS `date`"
###################################################
############### Fifth Failure Mode ###############
test_5() {
+ [ $OSTCOUNT -lt 2 ] && skip_env "$OSTCOUNT < 2, not enough OSTs" && return 0
+
echo "Fifth Failure Mode: OST/OST `date`"
#Create files
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
client_df
#OST Portion
- echo "Failing OST"
shutdown_facet ost1
reboot_facet ost1
#Check FS
echo "Test Lustre stability after OST failure"
- client_df
+ client_df &
+ DFPIDA=$!
+ sleep 5
#OST Portion
- echo "Failing OST"
- shutdown_node ost2
+ shutdown_facet ost2
reboot_facet ost2
#Check FS
echo "Test Lustre stability after OST failure"
- client_df
+ client_df &
+ DFPIDB=$!
+ sleep 5
#Reintegration
echo "Reintegrating OSTs"
wait_for ost1
- wait_for ost1
- start ost1
- start ost2
+ start_ost 1
+ wait_for ost2
+ start_ost 2
clients_recover_osts ost1
clients_recover_osts ost2
- client_df
+ sleep $TIMEOUT
+
+ wait $DFPIDA
+ wait $DFPIDB
+ client_df || return 2
}
run_test 5 "Fifth Failure Mode: OST/OST `date`"
###################################################
#Create files
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
client_df
- $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile"
+ client_touch testfile || return 2
#OST Portion
- echo "Failing OST"
- shutdown_node ost1
+ shutdown_facet ost1
reboot_facet ost1
#Check FS
echo "Test Lustre stability after OST failure"
- client_df
+ client_df &
+ DFPIDA=$!
+ echo DFPIDA=$DFPIDA
+ sleep 5
#CLIENT Portion
echo "Failing CLIENTs"
#Check FS
echo "Test Lustre stability after CLIENTs failure"
- client_df
+ client_df &
+ DFPIDB=$!
+ echo DFPIDB=$DFPIDB
+ sleep 5
#Reintegration
echo "Reintegrating OST/CLIENTs"
wait_for ost1
- start ost1
- reintegrate_clients
-
+ start_ost 1
+ reintegrate_clients || return 1
+ sleep 5
+
+ wait_remote_prog df $((TIMEOUT * 3 + 10))
+ wait $DFPIDA
+ wait $DFPIDB
+
echo "Verifying mount"
+ [ -z "$(mounted_lustre_filesystems)" ] && return 3
client_df
}
run_test 6 "Sixth Failure Mode: OST/CLIENT `date`"
#Create files
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
client_df
- $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile"
+ client_touch testfile || return 1
#CLIENT Portion
echo "Part 1: Failing CLIENT"
#Check FS
echo "Test Lustre stability after CLIENTs failure"
client_df
- $PDSH $LIVE_CLIENT "ls -l $MOUNT"
- $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile"
+ $PDSH $LIVE_CLIENT "ls -l $TESTDIR"
+ $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile"
#Sleep
echo "Wait 1 minutes"
#Create files
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && return 2
+
client_df
- $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile"
+ client_rm testfile
#MDS Portion
- echo "Failing MDS"
- facet_failover mds
+ fail $SINGLEMDS
- #Check FS
- echo "Test Lustre stability after MDS failover"
- client_df
- $PDSH $LIVE_CLIENT "ls -l $MOUNT"
- $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile"
+ $PDSH $LIVE_CLIENT "ls -l $TESTDIR"
+ $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile"
#Reintegration
echo "Reintegrating CLIENTs"
- reintegrate_clients
+ reintegrate_clients || return 2
client_df
#Sleep
#Create files
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
client_df
- $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile"
+ client_touch testfile
#CLIENT Portion
echo "Failing CLIENTs"
#Check FS
echo "Test Lustre stability after CLIENTs failure"
client_df
- $PDSH $LIVE_CLIENT "ls -l $MOUNT"
- $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile"
+ $PDSH $LIVE_CLIENT "ls -l $TESTDIR"
+ $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile"
#Sleep
echo "Wait 1 minutes"
#Create files
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
client_df
- $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile"
+ client_touch testfile
+
#OST Portion
- echo "Failing OST"
- shutdown_node ost1
+ shutdown_facet ost1
reboot_facet ost1
#Check FS
echo "Test Lustre stability after OST failure"
- client_df
- $PDSH $LIVE_CLIENT "ls -l $MOUNT"
- $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile"
+ client_df &
+ DFPID=$!
+ sleep 5
+ #non-failout hangs forever here
+ #$PDSH $LIVE_CLIENT "ls -l $TESTDIR"
+ #$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile"
#Reintegration
echo "Reintegrating CLIENTs/OST"
- reintegrate_clients
- start ost1
- client_df
- $PDSH $CLIENTS "/bin/touch $MOUNT/CLIENT_OST_2\`hostname\`_testfile"
+ reintegrate_clients || return 3
+ wait_for ost1
+ start_ost 1
+ wait $DFPID
+ client_df || return 1
+ client_touch testfile2 || return 2
#Sleep
echo "Wait 1 minutes"
#Create files
echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
client_df
- $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile"
+ client_touch testfile || return 1
#CLIENT Portion
echo "Failing CLIENTs"
#Check FS
echo "Test Lustre stability after CLIENTs failure"
client_df
- $PDSH $LIVE_CLIENT "ls -l $MOUNT"
- $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile"
+ $PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 1
+ $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 2
#Sleep
echo "Wait 1 minutes"
#Create files
echo "Verify Lustre filesystem is up and running"
- client_df
- $PDSH $CLIENTS "/bin/touch $MOUNT/\`hostname\`_testfile"
+ $PDSH $LIVE_CLIENT df $MOUNT || return 3
+ client_touch testfile || return 4
#CLIENT Portion
echo "Failing CLIENTs"
#Check FS
echo "Test Lustre stability after CLIENTs failure"
client_df
- $PDSH $LIVE_CLIENT "ls -l $MOUNT"
- $PDSH $LIVE_CLIENT "rm -f $MOUNT/*_testfile"
+ $PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 5
+ $PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 6
#Reintegration
echo "Reintegrating CLIENTs/CLIENTs"
- reintegrate_clients
+ reintegrate_clients || return 7
client_df
#Sleep
test_10() {
#Run availability after all failures
- ./availability.sh 21600
+ DURATION=${DURATION:-$((2 * 60 * 60))} # 6 hours default
+ LOADTEST=${LOADTEST:-metadata-load.py}
+ $PWD/availability.sh $CONFIG $DURATION $CLIENTS || return 1
}
run_test 10 "Running Availability for 6 hours..."
-equals_msg "Done, cleaning up"
-cleanup
+equals_msg `basename $0`: test complete, cleaning up
+check_and_cleanup_lustre
+[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true