-#!/bin/sh
+#!/bin/bash
+# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
+# vim:shiftwidth=4:softtabstop=4:tabstop=4:
+#
# Test multiple failures, AKA Test 17
set -e
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
#
-ALWAYS_EXCEPT="10 $INSANITY_EXCEPT"
+ALWAYS_EXCEPT="15 $INSANITY_EXCEPT"
if [ "$FAILURE_MODE" = "HARD" ]; then
- mixed_ost_devs && CONFIG_EXCEPTIONS="0 2 4 5 6 8" && \
- echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. " && \
- echo "Except the tests: $CONFIG_EXCEPTIONS" && \
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
+ skip_env "$TESTSUITE: is not functional with FAILURE_MODE = HARD, " \
+ "please use recovery-double-scale, bz20407"
+ exit 0
fi
-#
[ "$SLOW" = "no" ] && EXCEPT_SLOW=""
SETUP=${SETUP:-""}
LIVE_CLIENT=${LIVE_CLIENT:-$SINGLECLIENT}
FAIL_CLIENTS=${FAIL_CLIENTS:-$RCLIENTS}
-assert_env mds_HOST MDS_MKFS_OPTS
-assert_env ost_HOST OST_MKFS_OPTS OSTCOUNT
+assert_env mds_HOST MDSCOUNT
+assert_env ost_HOST OSTCOUNT
assert_env LIVE_CLIENT FSNAME
-remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
-remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
+require_dsh_mds || exit 0
+require_dsh_ost || exit 0
# FAIL_CLIENTS list should not contain the LIVE_CLIENT
FAIL_CLIENTS=$(echo " $FAIL_CLIENTS " | sed -re "s/\s+$LIVE_CLIENT\s+/ /g")
DIR=${DIR:-$MOUNT}
-TESTDIR=$DIR/d0.$(basename $0 .sh)
+TESTDIR=$DIR/d0.$TESTSUITE
#####
# fail clients round robin
}
fail_clients() {
- num=$1
+ num=$1
- log "Request clients to fail: ${num}. Num of clients to fail: ${FAIL_NUM}, already failed: $DOWN_NUM"
- if [ -z "$num" ] || [ "$num" -gt $((FAIL_NUM - DOWN_NUM)) ]; then
- num=$((FAIL_NUM - DOWN_NUM))
- fi
+ log "Request fail clients: $num, to fail: $FAIL_NUM, failed: $DOWN_NUM"
+ if [ -z "$num" ] || [ "$num" -gt $((FAIL_NUM - DOWN_NUM)) ]; then
+ num=$((FAIL_NUM - DOWN_NUM))
+ fi
if [ -z "$num" ] || [ "$num" -le 0 ]; then
log "No clients failed!"
echo "down clients: $DOWN_CLIENTS"
- for client in $DOWN_CLIENTS; do
- boot_node $client
- done
- DOWN_NUM=`echo $DOWN_CLIENTS | wc -w`
- client_rmdirs
+ for client in $DOWN_CLIENTS; do
+ boot_node $client
+ done
+ DOWN_NUM=`echo $DOWN_CLIENTS | wc -w`
+ client_rmdirs
}
reintegrate_clients() {
- for client in $DOWN_CLIENTS; do
- wait_for_host $client
- echo "Restarting $client"
- zconf_mount $client $MOUNT || return 1
- done
- DOWN_CLIENTS=""
- DOWN_NUM=0
+ for client in $DOWN_CLIENTS; do
+ wait_for_host $client
+ echo "Restarting $client"
+ zconf_mount $client $MOUNT || return 1
+ done
+
+ DOWN_CLIENTS=""
+ DOWN_NUM=0
}
start_ost() {
- start ost$1 `ostdevname $1` $OST_MOUNT_OPTS
+ start ost$1 `ostdevname $1` $OST_MOUNT_OPTS
+}
+
+start_mdt() {
+ start mds$1 $(mdsdevname $1) $MDS_MOUNT_OPTS
}
trap exit INT
client_touch() {
- file=$1
- for c in $LIVE_CLIENT $FAIL_CLIENTS; do
- if echo $DOWN_CLIENTS | grep -q $c; then continue; fi
- $PDSH $c touch $TESTDIR/${c}_$file || return 1
- done
+ file=$1
+ for c in $LIVE_CLIENT $FAIL_CLIENTS; do
+ echo $DOWN_CLIENTS | grep -q $c && continue
+ $PDSH $c touch $TESTDIR/${c}_$file || return 1
+ done
}
client_rm() {
- file=$1
- for c in $LIVE_CLIENT $FAIL_CLIENTS; do
- $PDSH $c rm $TESTDIR/${c}_$file
- done
+ file=$1
+ for c in $LIVE_CLIENT $FAIL_CLIENTS; do
+ $PDSH $c rm $TESTDIR/${c}_$file
+ done
}
client_mkdirs() {
- for c in $LIVE_CLIENT $FAIL_CLIENTS; do
- echo "$c mkdir $TESTDIR/$c"
- $PDSH $c "mkdir $TESTDIR/$c && ls -l $TESTDIR/$c"
- done
+ for c in $LIVE_CLIENT $FAIL_CLIENTS; do
+ echo "$c mkdir $TESTDIR/$c"
+ $PDSH $c "mkdir $TESTDIR/$c && ls -l $TESTDIR/$c"
+ done
}
client_rmdirs() {
- for c in $LIVE_CLIENT $FAIL_CLIENTS; do
- echo "rmdir $TESTDIR/$c"
- $PDSH $LIVE_CLIENT "rmdir $TESTDIR/$c"
- done
+ for c in $LIVE_CLIENT $FAIL_CLIENTS; do
+ echo "rmdir $TESTDIR/$c"
+ $PDSH $LIVE_CLIENT "rmdir $TESTDIR/$c"
+ done
}
clients_recover_osts() {
echo "Starting Test 17 at `date`"
test_0() {
- fail $SINGLEMDS
+ for i in $(seq $MDSCOUNT) ; do
+ fail mds$i
+ done
for i in $(seq $OSTCOUNT) ; do
fail ost$i
############### First Failure Mode ###############
test_1() {
-echo "Don't do a MDS - MDS Failure Case"
-echo "This makes no sense"
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ [ "$(facet_fstype mds2)" = "zfs" ] &&
+ skip "LU-2059: no local config for ZFS MDTs" && return
+
+ clients_up
+
+ shutdown_facet mds1
+ reboot_facet mds1
+
+ # prepare for MDS failover
+ change_active mds1
+ reboot_facet mds1
+
+ clients_up &
+ DFPID=$!
+ sleep 5
+
+ shutdown_facet mds2
+
+ echo "Reintegrating MDS2"
+ reboot_facet mds2
+ wait_for_facet mds2
+ start_mdt 2 || return 2
+
+ wait_for_facet mds1
+ start_mdt 1 || return $?
+
+ #Check FS
+ wait $DFPID
+ echo "Verify reintegration"
+ clients_up || return 1
}
run_test 1 "MDS/MDS failure"
###################################################
############### Second Failure Mode ###############
test_2() {
- echo "Verify Lustre filesystem is up and running"
- [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+ echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
- client_df
+ clients_up
- shutdown_facet $SINGLEMDS
- reboot_facet $SINGLEMDS
+ for i in $(seq $MDSCOUNT) ; do
+ shutdown_facet mds$i
+ reboot_facet mds$i
- # prepare for MDS failover
- change_active $SINGLEMDS
- reboot_facet $SINGLEMDS
+ # prepare for MDS failover
+ change_active mds$i
+ reboot_facet mds$i
+ done
- client_df &
+ clients_up &
DFPID=$!
sleep 5
echo "Reintegrating OST"
reboot_facet ost1
- wait_for ost1
+ wait_for_facet ost1
start_ost 1 || return 2
- wait_for $SINGLEMDS
- start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS || return $?
+ for i in $(seq $MDSCOUNT) ; do
+ wait_for_facet mds$i
+ start_mdt $i || return $?
+ done
#Check FS
wait $DFPID
clients_recover_osts ost1
echo "Verify reintegration"
- client_df || return 1
+ clients_up || return 1
}
run_test 2 "Second Failure Mode: MDS/OST `date`"
###################################################
-
############### Third Failure Mode ###############
test_3() {
#Create files
echo "Verify Lustre filesystem is up and running"
[ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
-
+
#MDS Portion
- fail $SINGLEMDS
+ for i in $(seq $MDSCOUNT) ; do
+ fail mds$i
+ done
#Check FS
echo "Test Lustre stability after MDS failover"
- client_df
+ clients_up
#CLIENT Portion
echo "Failing 2 CLIENTS"
#Check FS
echo "Test Lustre stability after CLIENT failure"
- client_df
+ clients_up
#Reintegration
echo "Reintegrating CLIENTS"
reintegrate_clients || return 1
- client_df || return 3
+ clients_up || return 3
sleep 2 # give it a little time for fully recovered before next test
}
run_test 3 "Thirdb Failure Mode: MDS/CLIENT `date`"
############### Fourth Failure Mode ###############
test_4() {
- echo "Fourth Failure Mode: OST/MDS `date`"
+ echo "Fourth Failure Mode: OST/MDS `date`"
#OST Portion
shutdown_facet ost1
-
+
#Check FS
echo "Test Lustre stability after OST failure"
- client_df &
+ clients_up &
DFPIDA=$!
sleep 5
- #MDS Portion
- shutdown_facet $SINGLEMDS
- reboot_facet $SINGLEMDS
+ for i in $(seq $MDSCOUNT) ; do
+ shutdown_facet mds$i
+ reboot_facet mds$i
- # prepare for MDS failover
- change_active $SINGLEMDS
- reboot_facet $SINGLEMDS
+ # prepare for MDS failover
+ change_active mds$i
+ reboot_facet mds$i
+ done
- client_df &
+ clients_up &
DFPIDB=$!
sleep 5
#Reintegration
echo "Reintegrating OST"
reboot_facet ost1
- wait_for ost1
+ wait_for_facet ost1
start_ost 1
-
- wait_for $SINGLEMDS
- start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS
+
+ for i in $(seq $MDSCOUNT) ; do
+ wait_for_facet mds$i
+ start_mdt $i || return $?
+ done
#Check FS
-
+
wait $DFPIDA
wait $DFPIDB
clients_recover_osts ost1
echo "Test Lustre stability after MDS failover"
- client_df || return 1
+ clients_up || return 1
}
run_test 4 "Fourth Failure Mode: OST/MDS `date`"
###################################################
echo "Verify Lustre filesystem is up and running"
[ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
- client_df
+ clients_up
#OST Portion
shutdown_facet ost1
#Check FS
echo "Test Lustre stability after OST failure"
- client_df &
+ clients_up &
DFPIDA=$!
sleep 5
#Check FS
echo "Test Lustre stability after OST failure"
- client_df &
+ clients_up &
DFPIDB=$!
sleep 5
#Reintegration
echo "Reintegrating OSTs"
- wait_for ost1
+ wait_for_facet ost1
start_ost 1
- wait_for ost2
+ wait_for_facet ost2
start_ost 2
clients_recover_osts ost1
wait $DFPIDA
wait $DFPIDB
- client_df || return 2
+ clients_up || return 2
}
run_test 5 "Fifth Failure Mode: OST/OST `date`"
###################################################
echo "Verify Lustre filesystem is up and running"
[ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
- client_df
+ clients_up
client_touch testfile || return 2
#OST Portion
#Check FS
echo "Test Lustre stability after OST failure"
- client_df &
+ clients_up &
DFPIDA=$!
echo DFPIDA=$DFPIDA
sleep 5
#Check FS
echo "Test Lustre stability after CLIENTs failure"
- client_df &
+ clients_up &
DFPIDB=$!
echo DFPIDB=$DFPIDB
sleep 5
#Reintegration
echo "Reintegrating OST/CLIENTs"
- wait_for ost1
+ wait_for_facet ost1
start_ost 1
reintegrate_clients || return 1
sleep 5
- wait_remote_prog df $((TIMEOUT * 3 + 10))
+ wait_remote_prog "stat -f" $((TIMEOUT * 3 + 20))
wait $DFPIDA
wait $DFPIDB
echo "Verifying mount"
[ -z "$(mounted_lustre_filesystems)" ] && return 3
- client_df
+ clients_up
}
run_test 6 "Sixth Failure Mode: OST/CLIENT `date`"
###################################################
echo "Verify Lustre filesystem is up and running"
[ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
- client_df
+ clients_up
client_touch testfile || return 1
#CLIENT Portion
#Check FS
echo "Test Lustre stability after CLIENTs failure"
- client_df
+ clients_up
$PDSH $LIVE_CLIENT "ls -l $TESTDIR"
$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile"
echo "Verify Lustre filesystem is up and running"
[ -z "$(mounted_lustre_filesystems)" ] && return 2
- client_df
+ clients_up
client_rm testfile
#MDS Portion
- fail $SINGLEMDS
+ for i in $(seq $MDSCOUNT) ; do
+ fail mds$i
+ done
$PDSH $LIVE_CLIENT "ls -l $TESTDIR"
$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile"
#Reintegration
echo "Reintegrating CLIENTs"
reintegrate_clients || return 2
- client_df
+ clients_up
#Sleep
echo "wait 1 minutes"
echo "Verify Lustre filesystem is up and running"
[ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
- client_df
+ clients_up
client_touch testfile
#CLIENT Portion
#Check FS
echo "Test Lustre stability after CLIENTs failure"
- client_df
+ clients_up
$PDSH $LIVE_CLIENT "ls -l $TESTDIR"
$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile"
echo "Verify Lustre filesystem is up and running"
[ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
- client_df
+ clients_up
client_touch testfile
#Check FS
echo "Test Lustre stability after OST failure"
- client_df &
+ clients_up &
DFPID=$!
sleep 5
#non-failout hangs forever here
#Reintegration
echo "Reintegrating CLIENTs/OST"
reintegrate_clients || return 3
- wait_for ost1
+ wait_for_facet ost1
start_ost 1
wait $DFPID
- client_df || return 1
+ clients_up || return 1
client_touch testfile2 || return 2
#Sleep
echo "Verify Lustre filesystem is up and running"
[ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
- client_df
+ clients_up
client_touch testfile || return 1
#CLIENT Portion
#Check FS
echo "Test Lustre stability after CLIENTs failure"
- client_df
+ clients_up
$PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 1
$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 2
#Create files
echo "Verify Lustre filesystem is up and running"
- $PDSH $LIVE_CLIENT df $MOUNT || return 3
+ client_up $LIVE_CLIENT || return 3
client_touch testfile || return 4
#CLIENT Portion
#Check FS
echo "Test Lustre stability after CLIENTs failure"
- client_df
+ clients_up
$PDSH $LIVE_CLIENT "ls -l $TESTDIR" || return 5
$PDSH $LIVE_CLIENT "rm -f $TESTDIR/*_testfile" || return 6
#Reintegration
echo "Reintegrating CLIENTs/CLIENTs"
reintegrate_clients || return 7
- client_df
+ clients_up
#Sleep
echo "Wait 1 minutes"
run_test 9 "Ninth Failure Mode: CLIENT/CLIENT `date`"
###################################################
+############### Tenth Failure Mode ###############
test_10() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+
+ shutdown_facet mds1
+ reboot_facet mds1
+
+ # prepare for MDS failover
+ change_active mds1
+ reboot_facet mds1
+
+ clients_up &
+ DFPID=$!
+ sleep 5
+
+ shutdown_facet ost1
+
+ echo "Reintegrating OST"
+ reboot_facet ost1
+ wait_for_facet ost1
+ start_ost 1 || return 2
+
+ shutdown_facet mds2
+ reboot_facet mds2
+
+ # prepare for MDS failover
+ change_active mds2
+ reboot_facet mds2
+
+ wait_for_facet mds1
+ start_mdt 1 || return $?
+
+ wait_for_facet mds2
+ start_mdt 2 || return $?
+
+ #Check FS
+ wait $DFPID
+ clients_recover_osts ost1
+ echo "Verify reintegration"
+ clients_up || return 1
+}
+run_test 10 "Tenth Failure Mode: MDT0/OST/MDT1 `date`"
+###################################################
+
+############### Seventh Failure Mode ###############
+test_11() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
+ #MDS Portion
+ fail mds1
+ #Check FS
+
+ echo "Test Lustre stability after MDS failover"
+ clients_up
+
+ #CLIENT Portion
+ echo "Failing 2 CLIENTS"
+ fail_clients 2
+
+ #Check FS
+ echo "Test Lustre stability after CLIENT failure"
+ clients_up
+
+ #Reintegration
+ echo "Reintegrating CLIENTS"
+ reintegrate_clients || return 1
+
+ fail mds2
+
+ clients_up || return 3
+ sleep 2 # give it a little time for fully recovered before next test
+}
+run_test 11 "Eleventh Failure Mode: MDS0/CLIENT/MDS1 `date`"
+###################################################
+
+test_12() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
+ #MDS Portion
+ fail mds1,mds2
+ clients_up
+
+ #OSS Portion
+ fail ost1,ost2
+ clients_up
+
+ #CLIENT Portion
+ echo "Failing 2 CLIENTS"
+ fail_clients 2
+
+ #Check FS
+ echo "Test Lustre stability after CLIENT failure"
+ clients_up
+
+ #Reintegration
+ echo "Reintegrating CLIENTS"
+ reintegrate_clients || return 1
+
+ clients_up || return 3
+ sleep 2 # give it a little time for fully recovered before next test
+}
+run_test 12 "Twelve Failure Mode: MDS0,MDS1/OST0, OST1/CLIENTS `date`"
+###################################################
+
+test_13() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
+ #MDS Portion
+ fail mds1,mds2
+ clients_up
+
+ #CLIENT Portion
+ echo "Failing 2 CLIENTS"
+ fail_clients 2
+
+ #Check FS
+ echo "Test Lustre stability after CLIENT failure"
+ clients_up
+
+ #Reintegration
+ echo "Reintegrating CLIENTS"
+ reintegrate_clients || return 1
+
+ clients_up || return 3
+ sleep 2 # give it a little time for fully recovered before next test
+
+ #OSS Portion
+ fail ost1,ost2
+ clients_up || return 4
+}
+run_test 13 "Thirteen Failure Mode: MDS0,MDS1/CLIENTS/OST0,OST1 `date`"
+###################################################
+
+test_14() {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ echo "Verify Lustre filesystem is up and running"
+ [ -z "$(mounted_lustre_filesystems)" ] && error "Lustre is not running"
+
+ #OST Portion
+ fail ost1,ost2
+ clients_up
+
+ #CLIENT Portion
+ echo "Failing 2 CLIENTS"
+ fail_clients 2
+
+ #Check FS
+ echo "Test Lustre stability after CLIENT failure"
+ clients_up
+
+ #Reintegration
+ echo "Reintegrating CLIENTS"
+ reintegrate_clients || return 1
+
+ clients_up || return 3
+ sleep 2 # give it a little time for fully recovered before next test
+
+ #OSS Portion
+ fail mds1,mds2
+ clients_up || return 4
+}
+run_test 14 "Fourteen Failure Mode: OST0,OST1/CLIENTS/MDS0,MDS1 `date`"
+###################################################
+
+test_15() {
#Run availability after all failures
DURATION=${DURATION:-$((2 * 60 * 60))} # 6 hours default
LOADTEST=${LOADTEST:-metadata-load.py}
$PWD/availability.sh $CONFIG $DURATION $CLIENTS || return 1
}
-run_test 10 "Running Availability for 6 hours..."
+run_test 15 "Running Availability for 6 hours..."
-equals_msg `basename $0`: test complete, cleaning up
+complete $SECONDS
check_and_cleanup_lustre
-[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true
+exit_status