X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Frecovery-random-scale.sh;h=b977b9c3b349c1d756ad3c5b23162aa549be5762;hb=b594948509;hp=061a2b9b83bd2e27019d4dfbfaf66dc31559c344;hpb=bebf2b2ca89bbba9af3da6fbae2e97f02409bed0;p=fs%2Flustre-release.git diff --git a/lustre/tests/recovery-random-scale.sh b/lustre/tests/recovery-random-scale.sh index 061a2b9..b977b9c 100644 --- a/lustre/tests/recovery-random-scale.sh +++ b/lustre/tests/recovery-random-scale.sh @@ -18,9 +18,13 @@ CLEANUP=${CLEANUP:-""} init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} +init_logging -TESTSUITELOG=${TESTSUITELOG:-$TMP/recovery-random-scale} +TESTSUITELOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh)} DEBUGLOG=$TESTSUITELOG.debug + +cleanup_logs + exec 2>$DEBUGLOG echo "--- env ---" >&2 env >&2 @@ -28,22 +32,29 @@ echo "--- env ---" >&2 set -x [ "$SHARED_DIRECTORY" ] || \ - { skip "$0: Empty SHARED_DIRECTORY" && exit 0; } + { FAIL_ON_ERROR=true skip_env "$0 Empty SHARED_DIRECTORY" && exit 0; } + +[ -n "$CLIENTS" ] || \ + { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients" && exit 0; } -[ -n "$CLIENTS" ] || { skip "$0 Need two or more remote clients" && exit 0; } [ $CLIENTCOUNT -ge 3 ] || \ - { skip "$0 Need two or more clients, have $CLIENTCOUNT" && exit 0; } + { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients, have $((CLIENTCOUNT - 1))" && exit 0; } END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY/end_run_file} LOAD_PID_FILE=${LOAD_PID_FILE:-$TMP/client-load.pid} remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0 +[[ $FAILURE_MODE = SOFT ]] && \ + log "WARNING: $0 is not functional with FAILURE_MODE = SOFT, bz22797" + build_test_filter check_and_setup_lustre rm -rf $DIR/[df][0-9]* +max_recov_time=$(max_recovery_time) + # the test node needs to be insulated from a lustre failure as much as possible, # so not even loading the lustre modules is ideal. # -- umount lustre @@ -112,8 +123,8 @@ summary_and_cleanup () { # actually failed though. the first node in the END_RUN_NODE is # the one we are really interested in. if [ -n "$END_RUN_NODE" ]; then - var=${END_RUN_NODE}_load - echo "Client load failed on node $END_RUN_NODE" + var=$(node_var_name $END_RUN_NODE)_load + echo "Client load failed on node $END_RUN_NODE" echo echo "client $END_RUN_NODE load stdout and debug files : ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE} @@ -128,7 +139,7 @@ summary_and_cleanup () { local result=PASS [ $rc -eq 0 ] || result=FAIL - log "Duraion: $DURATION + log "Duration: $DURATION Server failover period: $SERVER_FAILOVER_PERIOD seconds Exited after: $ELAPSED seconds Number of failovers before exit: @@ -155,13 +166,16 @@ Status: $result: rc=$rc" if [ $rc -ne 0 ]; then print_logs $NODES_TO_USE + # we are interested in only on failed clients and servers + local failedclients=$(cat $END_RUN_FILE | grep -v $0) + # FIXME: need ostfailover-s nodes also for FLAVOR=OST + local product=$(gather_logs $(comma_list $(osts_nodes) \ + $(mdts_nodes) $mdsfailover_HOST $failedclients)) + echo logs files $product fi - if [ $rc -eq 0 ]; then - zconf_mount $(hostname) $MOUNT - else - error "exited with rc=$rc" - fi + [ $rc -eq 0 ] && zconf_mount $(hostname) $MOUNT + exit $rc } @@ -183,12 +197,8 @@ fi start_client_loads $NODES_TO_USE echo clients load pids: -if ! do_nodes $NODES_TO_USE "set -x; echo \$(hostname): && cat $LOAD_PID_FILE"; then - if [ -e $DEBUGLOG ]; then - exec 2<&- - cat $DEBUGLOG +if ! do_nodesv $NODES_TO_USE "cat $LOAD_PID_FILE"; then exit 3 - fi fi START_TS=$(date +%s) @@ -212,7 +222,7 @@ while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do it_time_start=$(date +%s) FAIL_CLIENT=$(get_random_entry $NODES_TO_USE) - client_var=$(client_var_name $FAIL_CLIENT)_nums + client_var=$(node_var_name $FAIL_CLIENT)_nums # store the list of failed clients # lists are comma separated @@ -237,11 +247,11 @@ while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do log "Starting failover on $SERVERFACET" facet_failover "$SERVERFACET" || exit 1 - if ! wait_recovery_complete $SERVERFACET $((TIMEOUT * 10)); then + if ! wait_recovery_complete $SERVERFACET ; then echo "$SERVERFACET recovery is not completed!" exit 7 fi - + boot_node $FAIL_CLIENT echo "Reintegrating $FAIL_CLIENT" zconf_mount $FAIL_CLIENT $MOUNT || exit $? @@ -260,10 +270,10 @@ while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do # not for all clients. if [ -e $END_RUN_FILE ]; then read END_RUN_NODE < $END_RUN_FILE - [[ $END_RUN_NODE = $FAIL_CLIENT ]] && + [[ $END_RUN_NODE = $FAIL_CLIENT ]] && rm -f $END_RUN_FILE || exit 13 fi - + restart_client_loads $FAIL_CLIENT $ERRORS_OK || exit $? # Check that not failed clients loads are still running. @@ -277,7 +287,6 @@ while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do CURRENT_TS=$(date +%s) ELAPSED=$((CURRENT_TS - START_TS)) - sleep=$((SERVER_FAILOVER_PERIOD-(CURRENT_TS - it_time_start))) # keep count the number of itterations when @@ -285,16 +294,23 @@ while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do # the value ( SERVER_FAILOVER_PERIOD - MINSLEEP ) if [ $sleep -lt $MINSLEEP ]; then reqfail=$((reqfail +1)) - log "WARNING: failover, client reintegration and check_client_loads time -exceeded SERVER_FAILOVER_PERIOD - MINSLEEP ! -Failed to meet interval $reqfail times ( REQFAIL=$REQFAIL ); have sleep=$sleep" - [ $reqfail -gt $REQFAIL ] && exit 6 - fi + log "WARNING: failover, client reintegration and check_client_loads time exceeded SERVER_FAILOVER_PERIOD - MINSLEEP ! +Failed to load the filesystem with I/O for a minimum period of $MINSLEEP $reqfail times ( REQFAIL=$REQFAIL ). +This iteration, the load was only applied for sleep=$sleep seconds. +Estimated max recovery time : $max_recov_time +Probably the hardware is taking excessively long to boot. +Try to increase SERVER_FAILOVER_PERIOD (current is $SERVER_FAILOVER_PERIOD), bug 20918" + [ $reqfail -gt $REQFAIL ] && exit 6 + fi log " Number of failovers: $(numfailovers) and counting..." - if [ $sleep -gt 0 ]; then + if [ $((ELAPSED + sleep)) -ge $DURATION ]; then + break + fi + + if [ $sleep -gt 0 ]; then echo "sleeping $sleep seconds ... " sleep $sleep fi