X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Frecovery-mds-scale.sh;h=298debdbbed72302ffefdae20de68e37d21596eb;hb=741229ebf7312fa72631f4b265d22b0b0904929d;hp=4d6bb7c4c100a87edb816a8e8b69c042066c77a4;hpb=693663b3b9e7f823931fd79510e1292e63e4fe06;p=fs%2Flustre-release.git diff --git a/lustre/tests/recovery-mds-scale.sh b/lustre/tests/recovery-mds-scale.sh index 4d6bb7c..298debd 100644 --- a/lustre/tests/recovery-mds-scale.sh +++ b/lustre/tests/recovery-mds-scale.sh @@ -14,9 +14,13 @@ CLEANUP=${CLEANUP:-""} init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} +init_logging -TESTSUITELOG=${TESTSUITELOG:-$TMP/recovery-mds-scale} +TESTSUITELOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh)} DEBUGLOG=$TESTSUITELOG.debug + +cleanup_logs + exec 2>$DEBUGLOG echo "--- env ---" >&2 env >&2 @@ -24,13 +28,15 @@ echo "--- env ---" >&2 set -x [ "$SHARED_DIRECTORY" ] || \ - { skip "$0: Empty SHARED_DIRECTORY" && exit 0; } + { FAIL_ON_ERROR=true skip_env "$0 Empty SHARED_DIRECTORY" && exit 0; } + +[ -n "$CLIENTS" ] || \ + { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients" && exit 0; } -[ -n "$CLIENTS" ] || { skip "$0 Need two or more remote clients" && exit 0; } [ $CLIENTCOUNT -ge 3 ] || \ - { skip "$0 Need two or more clients, have $CLIENTCOUNT" && exit 0; } + { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients, have $((CLIENTCOUNT - 1))" && exit 0; } -END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY}/end_run_file} +END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY/end_run_file} LOAD_PID_FILE=${LOAD_PID_FILE:-$TMP/client-load.pid} remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0 @@ -41,27 +47,20 @@ build_test_filter check_and_setup_lustre rm -rf $DIR/[df][0-9]* +max_recov_time=$(max_recovery_time) + # the test node needs to be insulated from a lustre failure as much as possible, # so not even loading the lustre modules is ideal. # -- umount lustre # -- remove hostname from clients list zconf_umount $(hostname) $MOUNT NODES_TO_USE=${NODES_TO_USE:-$CLIENTS} -NODES_TO_USE=$(exclude_item_from_list $NODES_TO_USE $(hostname)) +NODES_TO_USE=$(exclude_items_from_list $NODES_TO_USE $(hostname)) check_progs_installed $NODES_TO_USE ${CLIENT_LOADS[@]} -MDTS="" -for ((i=1; i<=$MDSCOUNT; i++)) do - MDTS="$MDTS mds$i" -done -MDTS=$(comma_list $MDTS) - -OSTS="" -for ((i=1; i<=$OSTCOUNT; i++)) do - OSTS="$OSTS ost$i" -done -OSTS=$(comma_list $OSTS) +MDTS=$(get_facets MDS) +OSTS=$(get_facets OST) ERRORS_OK="" # No application failures should occur during this test. FLAVOR=${FLAVOR:-"MDS"} @@ -85,15 +84,20 @@ rm -f $END_RUN_FILE vmstatLOG=${TESTSUITELOG}_$(basename $0 .sh).vmstat server_numfailovers () { + local facet=$1 + local var=${facet}_numfailovers + local val=0 + + [[ ${!var} ]] && val=${!var} + echo $val +} + +servers_numfailovers () { local facet local var - for facet in $MDTS ${OSTS//,/ }; do - var=${facet}_nums - val=${!var} - if [ "$val" ] ; then - echo "$facet failed over $val times" - fi + for facet in ${MDTS//,/ } ${OSTS//,/ }; do + echo "$facet: $(server_numfailovers $facet) times" done } @@ -115,7 +119,7 @@ summary_and_cleanup () { # actually failed though. the first node in the END_RUN_NODE is # the one we are really interested in. if [ -n "$END_RUN_NODE" ]; then - var=${END_RUN_NODE}_load + var=$(client_var_name $END_RUN_NODE)_load echo "Client load failed on node $END_RUN_NODE" echo echo "client $END_RUN_NODE load stdout and debug files : @@ -124,17 +128,17 @@ summary_and_cleanup () { fi rc=1 fi - + echo $(date +'%F %H:%M:%S') Terminating clients loads ... echo "$0" >> $END_RUN_FILE local result=PASS [ $rc -eq 0 ] || result=FAIL - log "Duraion: $DURATION + log "Duration: $DURATION Server failover period: $SERVER_FAILOVER_PERIOD seconds Exited after: $ELAPSED seconds Number of failovers before exit: -$(server_numfailovers) +$(servers_numfailovers) Status: $result: rc=$rc" # stop the vmstats on the OSTs @@ -154,20 +158,28 @@ Status: $result: rc=$rc" sleep 5 kill -9 $CLIENT_LOAD_PIDS || true fi + if [ $rc -ne 0 ]; then + # we are interested in only on failed clients and servers + local failedclients=$(cat $END_RUN_FILE | grep -v $0) + # FIXME: need ostfailover-s nodes also for FLAVOR=OST + local product=$(gather_logs $(comma_list $(osts_nodes) \ + $(mdts_nodes) $mdsfailover_HOST $failedclients)) + echo logs files $product + fi + [ $rc -eq 0 ] && zconf_mount $(hostname) $MOUNT exit $rc } # -# MAIN +# MAIN # log "-----============= $0 starting =============-----" trap summary_and_cleanup EXIT INT ELAPSED=0 -NUM_FAILOVERS=0 # vmstat the osts if [ "$VMSTAT" ]; then @@ -178,44 +190,53 @@ fi start_client_loads $NODES_TO_USE echo clients load pids: -if ! do_nodes $NODES_TO_USE "set -x; echo \$(hostname): && cat $LOAD_PID_FILE"; then - if [ -e $DEBUGLOG ]; then - exec 2<&- - cat $DEBUGLOG +if ! do_nodesv $NODES_TO_USE "cat $LOAD_PID_FILE"; then exit 3 - fi fi -START_TS=$(date +%s) -CURRENT_TS=$START_TS - MINSLEEP=${MINSLEEP:-120} REQFAIL_PERCENT=${REQFAIL_PERCENT:-3} # bug17839 comment 62 REQFAIL=${REQFAIL:-$(( DURATION / SERVER_FAILOVER_PERIOD * REQFAIL_PERCENT / 100))} reqfail=0 sleep=0 + +START_TS=$(date +%s) +CURRENT_TS=$START_TS + while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do - # In order to perform the + # In order to perform the # expected number of failovers, we need to account the following : # 1) the time that has elapsed during the client load checking # 2) time takes for failover it_time_start=$(date +%s) - + SERVERFACET=$(get_random_entry $SERVERS) - var=${SERVERFACET}_nums + var=${SERVERFACET}_numfailovers - # Check that our client loads are still running. If any have died, - # that means they have died outside of recovery, which is unacceptable. + # Check that our client loads are still running. If any have died, + # that means they have died outside of recovery, which is unacceptable. log "==== Checking the clients loads BEFORE failover -- failure NOT OK \ - ELAPSED=$ELAPSED DURATION=$DURATION PERIOD=$SERVER_FAILOVER_PERIOD" + ELAPSED=$ELAPSED DURATION=$DURATION PERIOD=$SERVER_FAILOVER_PERIOD" if ! check_client_loads $NODES_TO_USE; then exit 4 fi + log "Wait $SERVERFACET recovery complete before doing next failover ...." + + if ! wait_recovery_complete $SERVERFACET ; then + echo "$SERVERFACET recovery is not completed!" + exit 7 + fi + + log "Checking clients are in FULL state before doing next failover" + if ! wait_clients_import_state $NODES_TO_USE $SERVERFACET FULL; then + echo "Clients import not FULL, please consider to increase SERVER_FAILOVER_PERIOD=$SERVER_FAILOVER_PERIOD !" + + fi log "Starting failover on $SERVERFACET" facet_failover "$SERVERFACET" || exit 1 @@ -230,13 +251,12 @@ while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do fi # Increment the number of failovers - NUM_FAILOVERS=$((NUM_FAILOVERS+1)) val=$((${!var} + 1)) eval $var=$val - + CURRENT_TS=$(date +%s) ELAPSED=$((CURRENT_TS - START_TS)) - + sleep=$((SERVER_FAILOVER_PERIOD-(CURRENT_TS - it_time_start))) # keep count the number of itterations when @@ -245,12 +265,21 @@ while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do if [ $sleep -lt $MINSLEEP ]; then reqfail=$((reqfail +1)) log "WARNING: failover and two check_client_loads time exceeded SERVER_FAILOVER_PERIOD - MINSLEEP ! -Failed to meet interval $reqfail times ( REQFAIL=$REQFAIL ); have sleep=$sleep" - [ $reqfail -gt $REQFAIL ] && exit 6 - fi +Failed to load the filesystem with I/O for a minimum period of $MINSLEEP $reqfail times ( REQFAIL=$REQFAIL ). +This iteration, the load was only applied for sleep=$sleep seconds. +Estimated max recovery time : $max_recov_time +Probably the hardware is taking excessively long to boot. +Try to increase SERVER_FAILOVER_PERIOD (current is $SERVER_FAILOVER_PERIOD), bug 20918" + [ $reqfail -gt $REQFAIL ] && exit 6 + fi log "$SERVERFACET has failed over ${!var} times, and counting..." - if [ $sleep -gt 0 ]; then + + if [ $((ELAPSED + sleep)) -gt $DURATION ]; then + break + fi + + if [ $sleep -gt 0 ]; then echo "sleeping $sleep seconds ... " sleep $sleep fi