init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
+
+DEBUGLOG=$TESTLOG_PREFIX.suite_debug_log.$(hostname -s).log
-TESTSUITELOG=${TESTSUITELOG:-$TMP/recovery-mds-scale}
-DEBUGLOG=$TESTSUITELOG.debug
exec 2>$DEBUGLOG
echo "--- env ---" >&2
env >&2
set -x
[ "$SHARED_DIRECTORY" ] || \
- { skip "$0: Empty SHARED_DIRECTORY" && exit 0; }
+ { FAIL_ON_ERROR=true skip_env "$0 Empty SHARED_DIRECTORY" && exit 0; }
+
+check_shared_dir $SHARED_DIRECTORY ||
+ error "$SHARED_DIRECTORY isn't a shared directory"
+
+[ -n "$CLIENTS" ] || \
+ { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients" && exit 0; }
-[ -n "$CLIENTS" ] || { skip "$0 Need two or more remote clients" && exit 0; }
[ $CLIENTCOUNT -ge 3 ] || \
- { skip "$0 Need two or more clients, have $CLIENTCOUNT" && exit 0; }
+ { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients, have $((CLIENTCOUNT - 1))" && exit 0; }
-END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY}/end_run_file}
+END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY/end_run_file}
LOAD_PID_FILE=${LOAD_PID_FILE:-$TMP/client-load.pid}
+VMSTAT_PID_FILE=${VMSTAT_PID_FILE:-$TMP/vmstat.pid}
remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
check_and_setup_lustre
rm -rf $DIR/[df][0-9]*
+max_recov_time=$(max_recovery_time)
+
# the test node needs to be insulated from a lustre failure as much as possible,
# so not even loading the lustre modules is ideal.
# -- umount lustre
# -- remove hostname from clients list
zconf_umount $(hostname) $MOUNT
NODES_TO_USE=${NODES_TO_USE:-$CLIENTS}
-NODES_TO_USE=$(exclude_item_from_list $NODES_TO_USE $(hostname))
+NODES_TO_USE=$(exclude_items_from_list $NODES_TO_USE $(hostname))
check_progs_installed $NODES_TO_USE ${CLIENT_LOADS[@]}
-MDTS=""
-for ((i=1; i<=$MDSCOUNT; i++)) do
- MDTS="$MDTS mds$i"
-done
-MDTS=$(comma_list $MDTS)
-
-OSTS=""
-for ((i=1; i<=$OSTCOUNT; i++)) do
- OSTS="$OSTS ost$i"
-done
-OSTS=$(comma_list $OSTS)
+MDTS=$(get_facets MDS)
+OSTS=$(get_facets OST)
ERRORS_OK="" # No application failures should occur during this test.
FLAVOR=${FLAVOR:-"MDS"}
-rm -f $END_RUN_FILE
+if [ "$FLAVOR" == "MDS" ]; then
+ SERVERS=$MDTS
+else
+ SERVERS=$OSTS
+fi
+
+if [ "$SLOW" = "no" ]; then
+ DURATION=${DURATION:-$((60 * 30))}
+ SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 5))}
+else
+ DURATION=${DURATION:-$((60 * 60 * 24))}
+ SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 10))} # 10 minutes
+fi
-vmstatLOG=${TESTSUITELOG}_$(basename $0 .sh).vmstat
+rm -f $END_RUN_FILE
server_numfailovers () {
+ local facet=$1
+ local var=${facet}_numfailovers
+ local val=0
+
+ [[ ${!var} ]] && val=${!var}
+ echo $val
+}
+
+servers_numfailovers () {
local facet
local var
- for facet in $MDTS ${OSTS//,/ }; do
- var=${facet}_nums
- val=${!var}
- if [ "$val" ] ; then
- echo "$facet failed over $val times"
- fi
+ for facet in ${MDTS//,/ } ${OSTS//,/ }; do
+ echo "$facet: $(server_numfailovers $facet) times"
done
}
summary_and_cleanup () {
-
local rc=$?
local var
trap 0
local END_RUN_NODE=
read END_RUN_NODE < $END_RUN_FILE
- # a client load will end (i.e. fail) if it finds
- # the end run file. that does not mean that that client load
- # actually failed though. the first node in the END_RUN_NODE is
- # the one we are really interested in.
+ # A client load will stop if it found the END_RUN_FILE file.
+ # That does not mean the client load actually failed though.
+ # The first node in END_RUN_FILE is the one we are interested in.
if [ -n "$END_RUN_NODE" ]; then
- var=${END_RUN_NODE}_load
- echo "Client load failed on node $END_RUN_NODE"
+ var=$(node_var_name $END_RUN_NODE)_load
+ echo "Client load failed on node $END_RUN_NODE"
echo
- echo "client $END_RUN_NODE load stdout and debug files :
- ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE}
- ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE}.debug"
+ echo "Client $END_RUN_NODE load stdout and debug files:
+ $TESTLOG_PREFIX.run_${!var}_stdout.$END_RUN_NODE.log
+ $TESTLOG_PREFIX.run_${!var}_debug.$END_RUN_NODE.log"
fi
rc=1
fi
-
+
echo $(date +'%F %H:%M:%S') Terminating clients loads ...
echo "$0" >> $END_RUN_FILE
local result=PASS
[ $rc -eq 0 ] || result=FAIL
- log "Duraion: $DURATION
+ log "Duration: $DURATION
Server failover period: $SERVER_FAILOVER_PERIOD seconds
Exited after: $ELAPSED seconds
Number of failovers before exit:
-$(server_numfailovers)
+$(servers_numfailovers)
Status: $result: rc=$rc"
# stop the vmstats on the OSTs
if [ "$VMSTAT" ]; then
- do_nodes $(comma_list $(osts_nodes)) "test -f /tmp/vmstat.pid && \
- { kill -s TERM \$(cat /tmp/vmstat.pid); rm -f /tmp/vmstat.pid; \
- gzip -f9 $vmstatLOG-\$(hostname); }"
+ do_nodes $(comma_list $(osts_nodes)) "test -f $VMSTAT_PID_FILE &&
+ { kill -s TERM \\\$(cat $VMSTAT_PID_FILE);
+ rm -f $VMSTAT_PID_FILE || true; }"
fi
# make sure the client loads die
- do_nodes $NODES_TO_USE "set -x; test -f $LOAD_PID_FILE && \
- { kill -s TERM \$(cat $LOAD_PID_FILE) || true; }"
+ do_nodes $NODES_TO_USE "set -x; test -f $LOAD_PID_FILE &&
+ { kill -s TERM \\\$(cat $LOAD_PID_FILE);
+ rm -f $LOAD_PID_FILE || true; }"
# and free up the pdshes that started them, if any are still around
if [ -n "$CLIENT_LOAD_PIDS" ]; then
sleep 5
kill -9 $CLIENT_LOAD_PIDS || true
fi
+
+ if [ $rc -ne 0 ]; then
+ # we are interested in only on failed clients and servers
+ local failedclients=$(cat $END_RUN_FILE | grep -v $0)
+ # FIXME: need ostfailover-s nodes also for FLAVOR=OST
+ local product=$(gather_logs $(comma_list $(osts_nodes) \
+ $(mdts_nodes) $mdsfailover_HOST $failedclients) 1)
+ echo $product
+ fi
+
[ $rc -eq 0 ] && zconf_mount $(hostname) $MOUNT
exit $rc
}
#
-# MAIN
+# MAIN
#
log "-----============= $0 starting =============-----"
trap summary_and_cleanup EXIT INT
-if [ "$SLOW" = "no" ]; then
- DURATION=${DURATION:-$((60 * 30))}
-else
- DURATION=${DURATION:-$((60 * 60 * 24))}
-fi
-
ELAPSED=0
-NUM_FAILOVERS=0
# vmstat the osts
if [ "$VMSTAT" ]; then
- do_nodes $(comma_list $(osts_nodes)) "vmstat 1 > $vmstatLOG-\$(hostname) 2>/dev/null </dev/null & echo \$! > /tmp/vmstat.pid"
+ do_nodes $(comma_list $(osts_nodes)) \
+ "vmstat 1 > $TESTLOG_PREFIX.vmstat.\\\$(hostname -s).log \
+ 2>/dev/null </dev/null & echo \\\$! > $VMSTAT_PID_FILE"
fi
# Start client loads.
start_client_loads $NODES_TO_USE
echo clients load pids:
-if ! do_nodes $NODES_TO_USE "set -x; echo \$(hostname): && cat $LOAD_PID_FILE"; then
- if [ -e $DEBUGLOG ]; then
- exec 2<&-
- cat $DEBUGLOG
- exit 3
- fi
+if ! do_nodesv $NODES_TO_USE "cat $LOAD_PID_FILE"; then
+ exit 3
fi
-START_TS=$(date +%s)
-CURRENT_TS=$START_TS
-
-if [ "$SLOW" = "no" ]; then
- SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 5))}
-else
- SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 10))} # 10 minutes
-fi
-
MINSLEEP=${MINSLEEP:-120}
REQFAIL_PERCENT=${REQFAIL_PERCENT:-3} # bug17839 comment 62
REQFAIL=${REQFAIL:-$(( DURATION / SERVER_FAILOVER_PERIOD * REQFAIL_PERCENT / 100))}
reqfail=0
sleep=0
+
+START_TS=$(date +%s)
+CURRENT_TS=$START_TS
+
while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do
- # In order to perform the
+ # In order to perform the
# expected number of failovers, we need to account the following :
# 1) the time that has elapsed during the client load checking
# 2) time takes for failover
it_time_start=$(date +%s)
-
+
SERVERFACET=$(get_random_entry $SERVERS)
- var=${SERVERFACET}_nums
+ var=${SERVERFACET}_numfailovers
- # Check that our client loads are still running. If any have died,
- # that means they have died outside of recovery, which is unacceptable.
+ # Check that our client loads are still running. If any have died,
+ # that means they have died outside of recovery, which is unacceptable.
log "==== Checking the clients loads BEFORE failover -- failure NOT OK \
- ELAPSED=$ELAPSED DURATION=$DURATION PERIOD=$SERVER_FAILOVER_PERIOD"
+ ELAPSED=$ELAPSED DURATION=$DURATION PERIOD=$SERVER_FAILOVER_PERIOD"
if ! check_client_loads $NODES_TO_USE; then
exit 4
fi
- log "Starting failover on $SERVERNODE"
+ log "Wait $SERVERFACET recovery complete before doing next failover ...."
+
+ if ! wait_recovery_complete $SERVERFACET ; then
+ echo "$SERVERFACET recovery is not completed!"
+ exit 7
+ fi
+
+ log "Checking clients are in FULL state before doing next failover"
+ if ! wait_clients_import_state $NODES_TO_USE $SERVERFACET FULL; then
+ echo "Clients import not FULL, please consider to increase SERVER_FAILOVER_PERIOD=$SERVER_FAILOVER_PERIOD !"
+
+ fi
+ log "Starting failover on $SERVERFACET"
facet_failover "$SERVERFACET" || exit 1
fi
# Increment the number of failovers
- NUM_FAILOVERS=$((NUM_FAILOVERS+1))
val=$((${!var} + 1))
eval $var=$val
-
+
CURRENT_TS=$(date +%s)
ELAPSED=$((CURRENT_TS - START_TS))
-
+
sleep=$((SERVER_FAILOVER_PERIOD-(CURRENT_TS - it_time_start)))
# keep count the number of itterations when
if [ $sleep -lt $MINSLEEP ]; then
reqfail=$((reqfail +1))
log "WARNING: failover and two check_client_loads time exceeded SERVER_FAILOVER_PERIOD - MINSLEEP !
-Failed to meet interval $reqfail times ( REQFAIL=$REQFAIL ); have sleep=$sleep"
- [ $reqfail -gt $REQFAIL ] && exit 6
- fi
+Failed to load the filesystem with I/O for a minimum period of $MINSLEEP $reqfail times ( REQFAIL=$REQFAIL ).
+This iteration, the load was only applied for sleep=$sleep seconds.
+Estimated max recovery time : $max_recov_time
+Probably the hardware is taking excessively long to boot.
+Try to increase SERVER_FAILOVER_PERIOD (current is $SERVER_FAILOVER_PERIOD), bug 20918"
+ [ $reqfail -gt $REQFAIL ] && exit 6
+ fi
log "$SERVERFACET has failed over ${!var} times, and counting..."
- if [ $sleep -gt 0 ]; then
+
+ if [ $((ELAPSED + sleep)) -ge $DURATION ]; then
+ break
+ fi
+
+ if [ $sleep -gt 0 ]; then
echo "sleeping $sleep seconds ... "
sleep $sleep
fi