X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Frecovery-mds-scale.sh;h=134f2857a336543bdade59099f47897095380925;hp=3ebe05b274dcc1f6c2bdd312b861349f5a656d8f;hb=5290c48735290a0920d804daa7f428bb0df655c2;hpb=b594948509f42859565d3ac141621b0f35d806d2 diff --git a/lustre/tests/recovery-mds-scale.sh b/lustre/tests/recovery-mds-scale.sh index 3ebe05b..134f285 100644 --- a/lustre/tests/recovery-mds-scale.sh +++ b/lustre/tests/recovery-mds-scale.sh @@ -1,87 +1,55 @@ #!/bin/bash - +# # Was Test 11 in cmd3. # For duration of 24 hours repeatedly failover a random MDS at # 10 minute intervals and verify that no application errors occur. # Test runs one of CLIENT_LOAD progs on remote clients. +set -e -LUSTRE=${LUSTRE:-`dirname $0`/..} -SETUP=${SETUP:-""} -CLEANUP=${CLEANUP:-""} -. $LUSTRE/tests/test-framework.sh +ONLY=${ONLY:-"$*"} +LUSTRE=${LUSTRE:-$(dirname $0)/..} +. $LUSTRE/tests/test-framework.sh init_test_env $@ - -. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} init_logging -TESTSUITELOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh)} -DEBUGLOG=$TESTSUITELOG.debug - -cleanup_logs - -exec 2>$DEBUGLOG -echo "--- env ---" >&2 -env >&2 -echo "--- env ---" >&2 -set -x - -[ "$SHARED_DIRECTORY" ] || \ - { FAIL_ON_ERROR=true skip_env "$0 Empty SHARED_DIRECTORY" && exit 0; } - -[ -n "$CLIENTS" ] || \ - { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients" && exit 0; } - -[ $CLIENTCOUNT -ge 3 ] || \ - { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients, have $((CLIENTCOUNT - 1))" && exit 0; } - -END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY/end_run_file} -LOAD_PID_FILE=${LOAD_PID_FILE:-$TMP/client-load.pid} - -remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0 -remote_ost_nodsh && skip "remote OST with nodsh" && exit 0 +# bug number for skipped test: +ALWAYS_EXCEPT="$RECOVERY_MDS_SCALE_EXCEPT" +# UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT! build_test_filter -check_and_setup_lustre -rm -rf $DIR/[df][0-9]* +remote_mds_nodsh && skip_env "remote MDS with nodsh" && exit 0 +remote_ost_nodsh && skip_env "remote OST with nodsh" && exit 0 -max_recov_time=$(max_recovery_time) +[ -z "$CLIENTS" -o $CLIENTCOUNT -lt 3 ] && + skip_env "need three or more clients" && exit 0 -# the test node needs to be insulated from a lustre failure as much as possible, -# so not even loading the lustre modules is ideal. -# -- umount lustre -# -- remove hostname from clients list -zconf_umount $(hostname) $MOUNT -NODES_TO_USE=${NODES_TO_USE:-$CLIENTS} -NODES_TO_USE=$(exclude_items_from_list $NODES_TO_USE $(hostname)) - -check_progs_installed $NODES_TO_USE ${CLIENT_LOADS[@]} - -MDTS=$(get_facets MDS) -OSTS=$(get_facets OST) +if [ -z "$SHARED_DIRECTORY" ] || ! check_shared_dir $SHARED_DIRECTORY; then + skip_env "SHARED_DIRECTORY should be specified with a shared directory \ +which is accessable on all of the nodes" + exit 0 +fi ERRORS_OK="" # No application failures should occur during this test. -FLAVOR=${FLAVOR:-"MDS"} -if [ "$FLAVOR" == "MDS" ]; then - SERVERS=$MDTS -else - SERVERS=$OSTS -fi - if [ "$SLOW" = "no" ]; then DURATION=${DURATION:-$((60 * 30))} - SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 5))} else DURATION=${DURATION:-$((60 * 60 * 24))} - SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 10))} # 10 minutes fi +SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 10))} # 10 minutes -rm -f $END_RUN_FILE +MINSLEEP=${MINSLEEP:-120} +REQFAIL_PERCENT=${REQFAIL_PERCENT:-3} # bug17839 comment 62 +# round up the result of integer division: C=(A + (B - 1)) / B +REQFAIL=${REQFAIL:-$(((DURATION * REQFAIL_PERCENT + (SERVER_FAILOVER_PERIOD * + 100 - 1 )) / SERVER_FAILOVER_PERIOD / 100))} -vmstatLOG=${TESTSUITELOG}_$(basename $0 .sh).vmstat +END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY/end_run_file} +LOAD_PID_FILE=${LOAD_PID_FILE:-$TMP/client-load.pid} +VMSTAT_PID_FILE=${VMSTAT_PID_FILE:-$TMP/vmstat.pid} server_numfailovers () { local facet=$1 @@ -102,30 +70,12 @@ servers_numfailovers () { } summary_and_cleanup () { - local rc=$? - local var trap 0 # Having not empty END_RUN_FILE means the failed loads only if [ -s $END_RUN_FILE ]; then - echo "Found the END_RUN_FILE file: $END_RUN_FILE" - cat $END_RUN_FILE - local END_RUN_NODE= - read END_RUN_NODE < $END_RUN_FILE - - # a client load will end (i.e. fail) if it finds - # the end run file. that does not mean that that client load - # actually failed though. the first node in the END_RUN_NODE is - # the one we are really interested in. - if [ -n "$END_RUN_NODE" ]; then - var=$(node_var_name $END_RUN_NODE)_load - echo "Client load failed on node $END_RUN_NODE" - echo - echo "client $END_RUN_NODE load stdout and debug files : - ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE} - ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE}.debug" - fi + print_end_run_file $END_RUN_FILE rc=1 fi @@ -134,155 +84,166 @@ summary_and_cleanup () { local result=PASS [ $rc -eq 0 ] || result=FAIL - log "Duration: $DURATION + log "Duration: $DURATION Server failover period: $SERVER_FAILOVER_PERIOD seconds Exited after: $ELAPSED seconds Number of failovers before exit: $(servers_numfailovers) Status: $result: rc=$rc" - # stop the vmstats on the OSTs - if [ "$VMSTAT" ]; then - do_nodes $(comma_list $(osts_nodes)) "test -f /tmp/vmstat.pid && \ - { kill -s TERM \$(cat /tmp/vmstat.pid); rm -f /tmp/vmstat.pid; \ - gzip -f9 $vmstatLOG-\$(hostname); }" - fi + # stop vmstat on OSS nodes + [ "$VMSTAT" ] && stop_process $(comma_list $(osts_nodes)) $VMSTAT_PID_FILE - # make sure the client loads die - do_nodes $NODES_TO_USE "set -x; test -f $LOAD_PID_FILE && \ - { kill -s TERM \$(cat $LOAD_PID_FILE) || true; }" + # stop the client loads + stop_client_loads $NODES_TO_USE $LOAD_PID_FILE - # and free up the pdshes that started them, if any are still around - if [ -n "$CLIENT_LOAD_PIDS" ]; then - kill $CLIENT_LOAD_PIDS || true - sleep 5 - kill -9 $CLIENT_LOAD_PIDS || true - fi - if [ $rc -ne 0 ]; then - # we are interested in only on failed clients and servers - local failedclients=$(cat $END_RUN_FILE | grep -v $0) - # FIXME: need ostfailover-s nodes also for FLAVOR=OST - local product=$(gather_logs $(comma_list $(osts_nodes) \ - $(mdts_nodes) $mdsfailover_HOST $failedclients)) - echo logs files $product - fi - - [ $rc -eq 0 ] && zconf_mount $(hostname) $MOUNT + if [ $rc -ne 0 ]; then + # we are interested in only on failed clients and servers + local failedclients=$(cat $END_RUN_FILE | grep -v $0) + gather_logs $(comma_list $(all_server_nodes) $failedclients) + fi exit $rc } -# -# MAIN -# -log "-----============= $0 starting =============-----" +failover_target() { + local flavor=${1:-"MDS"} + local servers + local serverfacet + local var -trap summary_and_cleanup EXIT INT + [ "$flavor" = "MDS" ] && servers=$MDTS || servers=$OSTS -ELAPSED=0 + trap summary_and_cleanup EXIT INT -# vmstat the osts -if [ "$VMSTAT" ]; then - do_nodes $(comma_list $(osts_nodes)) "vmstat 1 > $vmstatLOG-\$(hostname) 2>/dev/null /tmp/vmstat.pid" -fi + # start vmstat on OSS nodes + [ "$VMSTAT" ] && start_vmstat $(comma_list $(osts_nodes)) $VMSTAT_PID_FILE -# Start client loads. -start_client_loads $NODES_TO_USE + # start client loads + rm -f $END_RUN_FILE + start_client_loads $NODES_TO_USE -echo clients load pids: -if ! do_nodesv $NODES_TO_USE "cat $LOAD_PID_FILE"; then - exit 3 -fi + echo client loads pids: + do_nodesv $NODES_TO_USE "cat $LOAD_PID_FILE" || exit 3 -MINSLEEP=${MINSLEEP:-120} -REQFAIL_PERCENT=${REQFAIL_PERCENT:-3} # bug17839 comment 62 -REQFAIL=${REQFAIL:-$(( DURATION / SERVER_FAILOVER_PERIOD * REQFAIL_PERCENT / 100))} -reqfail=0 -sleep=0 + ELAPSED=0 + local sleep=0 + local reqfail=0 + local it_time_start + local start_ts=$(date +%s) + local current_ts=$start_ts -START_TS=$(date +%s) -CURRENT_TS=$START_TS + while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do + # In order to perform the + # expected number of failovers, we need to account the following: + # 1) the time that has elapsed during the client load checking + # 2) time takes for failover + it_time_start=$(date +%s) -while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do + serverfacet=$(get_random_entry $servers) + var=${serverfacet}_numfailovers - # In order to perform the - # expected number of failovers, we need to account the following : - # 1) the time that has elapsed during the client load checking - # 2) time takes for failover + # Check that our client loads are still running. If any have died, + # that means they have died outside of recovery, which is unacceptable. + log "==== Checking the clients loads BEFORE failover -- failure NOT OK \ + ELAPSED=$ELAPSED DURATION=$DURATION PERIOD=$SERVER_FAILOVER_PERIOD" + check_client_loads $NODES_TO_USE || exit 4 - it_time_start=$(date +%s) + log "Wait $serverfacet recovery complete before doing next failover..." + if ! wait_recovery_complete $serverfacet; then + echo "$serverfacet recovery is not completed!" + exit 7 + fi - SERVERFACET=$(get_random_entry $SERVERS) - var=${SERVERFACET}_numfailovers + log "Checking clients are in FULL state before doing next failover..." + if ! wait_clients_import_state $NODES_TO_USE $serverfacet FULL; then + echo "Clients import not FULL, please consider to increase \ +SERVER_FAILOVER_PERIOD=$SERVER_FAILOVER_PERIOD!" + fi - # Check that our client loads are still running. If any have died, - # that means they have died outside of recovery, which is unacceptable. + log "Starting failover on $serverfacet" + facet_failover "$serverfacet" || exit 1 - log "==== Checking the clients loads BEFORE failover -- failure NOT OK \ - ELAPSED=$ELAPSED DURATION=$DURATION PERIOD=$SERVER_FAILOVER_PERIOD" + # Check that our client loads are still running during failover. + # No application failures should occur. + log "==== Checking the clients loads AFTER failover -- failure NOT OK" + if ! check_client_loads $NODES_TO_USE; then + log "Client load failed during failover. Exiting..." + exit 5 + fi - if ! check_client_loads $NODES_TO_USE; then - exit 4 - fi + # Increment the number of failovers. + val=$((${!var} + 1)) + eval $var=$val - log "Wait $SERVERFACET recovery complete before doing next failover ...." + current_ts=$(date +%s) + ELAPSED=$((current_ts - start_ts)) - if ! wait_recovery_complete $SERVERFACET ; then - echo "$SERVERFACET recovery is not completed!" - exit 7 - fi + sleep=$((SERVER_FAILOVER_PERIOD - (current_ts - it_time_start))) - log "Checking clients are in FULL state before doing next failover" - if ! wait_clients_import_state $NODES_TO_USE $SERVERFACET FULL; then - echo "Clients import not FULL, please consider to increase SERVER_FAILOVER_PERIOD=$SERVER_FAILOVER_PERIOD !" + # Keep counting the number of iterations when + # time spent to failover and two client loads check exceeded + # the value ( SERVER_FAILOVER_PERIOD - MINSLEEP ). + if [ $sleep -lt $MINSLEEP ]; then + reqfail=$((reqfail + 1)) + log "WARNING: failover and two check_client_loads time exceeded \ +SERVER_FAILOVER_PERIOD - MINSLEEP! +Failed to load the filesystem with I/O for a minimum period of \ +$MINSLEEP $reqfail times ( REQFAIL=$REQFAIL ). +This iteration, the load was only applied for sleep=$sleep seconds. +Estimated max recovery time: $MAX_RECOV_TIME +Probably the hardware is taking excessively long time to boot. +Try to increase SERVER_FAILOVER_PERIOD (current is $SERVER_FAILOVER_PERIOD), \ +bug 20918" + [ $reqfail -gt $REQFAIL ] && exit 6 + fi - fi - log "Starting failover on $SERVERFACET" + log "$serverfacet has failed over ${!var} times, and counting..." - facet_failover "$SERVERFACET" || exit 1 + [ $((ELAPSED + sleep)) -ge $DURATION ] && break - # Check that our client loads are still running during failover. - # No application failures should occur. + if [ $sleep -gt 0 ]; then + echo "sleeping $sleep seconds... " + sleep $sleep + fi + done + exit 0 +} - log "==== Checking the clients loads AFTER failover -- failure NOT OK" - if ! check_client_loads $NODES_TO_USE; then - log "Client load failed during failover. Exiting" - exit 5 - fi +################################## Main Flow ################################### +check_and_setup_lustre +rm -rf $DIR/[Rdfs][0-9]* - # Increment the number of failovers - val=$((${!var} + 1)) - eval $var=$val +MAX_RECOV_TIME=$(max_recovery_time) - CURRENT_TS=$(date +%s) - ELAPSED=$((CURRENT_TS - START_TS)) +# The test node needs to be insulated from a lustre failure as much as possible, +# so not even loading the lustre modules is ideal. +# -- umount lustre +# -- remove hostname from clients list +zconf_umount $HOSTNAME $MOUNT +NODES_TO_USE=${NODES_TO_USE:-$CLIENTS} +NODES_TO_USE=$(exclude_items_from_list $NODES_TO_USE $HOSTNAME) - sleep=$((SERVER_FAILOVER_PERIOD-(CURRENT_TS - it_time_start))) +check_progs_installed $NODES_TO_USE ${CLIENT_LOADS[@]} - # keep count the number of itterations when - # time spend to failover and two client loads check exceeded - # the value ( SERVER_FAILOVER_PERIOD - MINSLEEP ) - if [ $sleep -lt $MINSLEEP ]; then - reqfail=$((reqfail +1)) - log "WARNING: failover and two check_client_loads time exceeded SERVER_FAILOVER_PERIOD - MINSLEEP ! -Failed to load the filesystem with I/O for a minimum period of $MINSLEEP $reqfail times ( REQFAIL=$REQFAIL ). -This iteration, the load was only applied for sleep=$sleep seconds. -Estimated max recovery time : $max_recov_time -Probably the hardware is taking excessively long to boot. -Try to increase SERVER_FAILOVER_PERIOD (current is $SERVER_FAILOVER_PERIOD), bug 20918" - [ $reqfail -gt $REQFAIL ] && exit 6 - fi +MDTS=$(get_facets MDS) +OSTS=$(get_facets OST) - log "$SERVERFACET has failed over ${!var} times, and counting..." +test_failover_mds() { + # failover a random MDS + failover_target MDS +} +run_test failover_mds "failover MDS" - if [ $((ELAPSED + sleep)) -ge $DURATION ]; then - break - fi +test_failover_ost() { + # failover a random OST + failover_target OST +} +run_test failover_ost "failover OST" - if [ $sleep -gt 0 ]; then - echo "sleeping $sleep seconds ... " - sleep $sleep - fi -done +zconf_mount $HOSTNAME $MOUNT || error "mount $MOUNT on $HOSTNAME failed" +client_up || error "start client on $HOSTNAME failed" -exit 0 +complete $SECONDS +check_and_cleanup_lustre +exit_status