Whamcloud - gitweb
b=18696
authorgrev <grev>
Wed, 11 Mar 2009 16:18:18 +0000 (16:18 +0000)
committergrev <grev>
Wed, 11 Mar 2009 16:18:18 +0000 (16:18 +0000)
i=Tappro
new RECOVERY_RANDOM_SCALE test;
zconf_umount_clients fn remote command fix;

lustre/tests/acceptance-small.sh
lustre/tests/recovery-double-scale.sh
lustre/tests/recovery-random-scale.sh [new file with mode: 0644]
lustre/tests/test-framework.sh

index 166ba49..ceca69f 100755 (executable)
@@ -23,7 +23,7 @@ fi
 [ "$DEBUG_OFF" ] || DEBUG_OFF="eval lctl set_param debug=\"$DEBUG_LVL\""
 [ "$DEBUG_ON" ] || DEBUG_ON="eval lctl set_param debug=0x33f0484"
 
 [ "$DEBUG_OFF" ] || DEBUG_OFF="eval lctl set_param debug=\"$DEBUG_LVL\""
 [ "$DEBUG_ON" ] || DEBUG_ON="eval lctl set_param debug=0x33f0484"
 
-export TESTSUITE_LIST="RUNTESTS SANITY DBENCH BONNIE IOZONE FSX SANITYN LFSCK LIBLUSTRE RACER REPLAY_SINGLE CONF_SANITY RECOVERY_SMALL REPLAY_OST_SINGLE REPLAY_DUAL INSANITY SANITY_QUOTA SANITY_SEC SANITY_GSS PERFORMANCE_SANITY RECOVERY_MDS_SCALE RECOVERY_DOUBLE_SCALE"
+export TESTSUITE_LIST="RUNTESTS SANITY DBENCH BONNIE IOZONE FSX SANITYN LFSCK LIBLUSTRE RACER REPLAY_SINGLE CONF_SANITY RECOVERY_SMALL REPLAY_OST_SINGLE REPLAY_DUAL INSANITY SANITY_QUOTA SANITY_SEC SANITY_GSS PERFORMANCE_SANITY RECOVERY_MDS_SCALE RECOVERY_DOUBLE_SCALE RECOVERY_RANDOM_SCALE"
 
 if [ "$ACC_SM_ONLY" ]; then
     for O in $TESTSUITE_LIST; do
 
 if [ "$ACC_SM_ONLY" ]; then
     for O in $TESTSUITE_LIST; do
@@ -36,6 +36,7 @@ if [ "$ACC_SM_ONLY" ]; then
     done
 fi
 LFSCK="no" # bug 13698
     done
 fi
 LFSCK="no" # bug 13698
+RECOVERY_RANDOM_SCALE="no" # bug 16353
 
 LIBLUSTRETESTS=${LIBLUSTRETESTS:-../liblustre/tests}
 
 
 LIBLUSTRETESTS=${LIBLUSTRETESTS:-../liblustre/tests}
 
@@ -448,6 +449,13 @@ if [ "$RECOVERY_DOUBLE_SCALE" != "no" ]; then
         RECOVERY_DOUBLE_SCALE="done"
 fi
 
         RECOVERY_DOUBLE_SCALE="done"
 fi
 
+[ "$RECOVERY_RANDOM_SCALE" != "no" ] && skip_remmds recovery-double-scale && RECOVERY_RANDOM_SCALE=no && MSKIPPED=1
+if [ "$RECOVERY_RANDOM_SCALE" != "no" ]; then
+        title recovery-random-scale
+        bash recovery-random-scale.sh
+        RECOVERY_RANDOM_SCALE="done"
+fi
+
 RC=$?
 title FINISHED
 echo "Finished at `date` in $((`date +%s` - $STARTTIME))s"
 RC=$?
 title FINISHED
 echo "Finished at `date` in $((`date +%s` - $STARTTIME))s"
index d98dc65..e77f7fb 100644 (file)
@@ -72,8 +72,10 @@ reboot_recover_node () {
        clients) for c in ${item//,/ }; do
                       shutdown_client $c
                       boot_node $c
        clients) for c in ${item//,/ }; do
                       shutdown_client $c
                       boot_node $c
+                      echo "Reintegrating $c"
+                      zconf_mount $c $MOUNT || return $?
                  done
                  done
-                 start_client_loads $list || return $?
+                 start_client_loads $item || return $?
                  ;;
        * )      error "reboot_recover_node: nodetype=$nodetype. Must be one of 'MDS', 'OST', or 'clients'."
                 exit 1;;
                  ;;
        * )      error "reboot_recover_node: nodetype=$nodetype. Must be one of 'MDS', 'OST', or 'clients'."
                 exit 1;;
diff --git a/lustre/tests/recovery-random-scale.sh b/lustre/tests/recovery-random-scale.sh
new file mode 100644 (file)
index 0000000..d624b0c
--- /dev/null
@@ -0,0 +1,303 @@
+#!/bin/bash
+
+# client failure does not affect other clients
+
+# Start load on clients (each client works on it's own directory).
+# At defined (5-10 minutes) interval fail one random client and then fail mds.
+# Reintegrate failed client after recovery completed,
+# application errors are allowed for that client but not on other clients.
+# 10 minute intervals and verify that no application errors occur.
+
+# Test runs one of CLIENT_LOAD progs on remote clients.
+
+LUSTRE=${LUSTRE:-`dirname $0`/..}
+SETUP=${SETUP:-""}
+CLEANUP=${CLEANUP:-""}
+. $LUSTRE/tests/test-framework.sh
+
+init_test_env $@
+
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+
+TESTSUITELOG=${TESTSUITELOG:-$TMP/recovery-random-scale}
+DEBUGLOG=$TESTSUITELOG.debug
+exec 2>$DEBUGLOG
+echo "--- env ---" >&2
+env >&2
+echo "--- env ---" >&2
+set -x
+
+[ "$SHARED_DIRECTORY" ] || \
+    { skip "$0: Empty SHARED_DIRECTORY" && exit 0; }
+
+[ -n "$CLIENTS" ] || { skip "$0 Need two or more remote clients" && exit 0; }
+[ $CLIENTCOUNT -ge 3 ] || \
+    { skip "$0 Need two or more clients, have $CLIENTCOUNT" && exit 0; }
+
+END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY/end_run_file}
+LOAD_PID_FILE=${LOAD_PID_FILE:-$TMP/client-load.pid}
+
+remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
+
+build_test_filter
+
+check_and_setup_lustre
+rm -rf $DIR/[df][0-9]*
+
+# the test node needs to be insulated from a lustre failure as much as possible,
+# so not even loading the lustre modules is ideal.
+# -- umount lustre
+# -- remove hostname from clients list
+zconf_umount $(hostname) $MOUNT
+NODES_TO_USE=${NODES_TO_USE:-$CLIENTS}
+NODES_TO_USE=$(exclude_items_from_list $NODES_TO_USE $(hostname))
+
+check_progs_installed $NODES_TO_USE ${CLIENT_LOADS[@]}
+
+MDTS=$(get_facets MDS)
+
+if [ "$SLOW" = "no" ]; then
+    DURATION=${DURATION:-$((60 * 30))}
+    SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 5))}
+else
+    DURATION=${DURATION:-$((60 * 60 * 24))}
+    SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 10))} # 10 minutes
+fi
+
+rm -f $END_RUN_FILE
+
+vmstatLOG=${TESTSUITELOG}_$(basename $0 .sh).vmstat
+
+numfailovers () {
+    local facet
+    local var
+
+    for facet in $MDTS ${failed_clients//,/ }; do
+        var=${facet}_nums
+        val=${!var}
+        if [ "$val" ] ; then
+            echo "$facet failed  over  $val times"
+        fi
+    done
+}
+
+# list is comma separated
+print_logs () {
+    local list=$1
+
+    do_nodes $list "node=\\\$(hostname)
+var=\\\${node}_load
+log=${TESTSUITELOG}_run_${!var}.sh-\\\$node.debug
+if [ -e \\\$log ] ; then
+echo Node \\\$node debug log:
+cat \\\$log
+fi"
+}
+
+summary_and_cleanup () {
+
+    local rc=$?
+    local var
+    trap 0
+
+    # Having not empty END_RUN_FILE means the failed loads only
+    if [ -s $END_RUN_FILE ]; then
+        echo "Found the END_RUN_FILE file: $END_RUN_FILE"
+        cat $END_RUN_FILE
+        local END_RUN_NODE=
+        read END_RUN_NODE < $END_RUN_FILE
+
+    # a client load will end (i.e. fail) if it finds
+    # the end run file.  that does not mean that that client load
+    # actually failed though.  the first node in the END_RUN_NODE is
+    # the one we are really interested in.
+        if [ -n "$END_RUN_NODE" ]; then
+            var=${END_RUN_NODE}_load
+            echo "Client load failed on node $END_RUN_NODE" 
+            echo
+            echo "client $END_RUN_NODE load stdout and debug files :
+              ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE}
+              ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE}.debug"
+        fi
+        rc=1
+    fi
+
+
+    echo $(date +'%F %H:%M:%S') Terminating clients loads ...
+    echo "$0" >> $END_RUN_FILE
+    local result=PASS
+    [ $rc -eq 0 ] || result=FAIL
+
+    log "Duraion:                $DURATION
+Server failover period: $SERVER_FAILOVER_PERIOD seconds
+Exited after:           $ELAPSED seconds
+Number of failovers before exit:
+$(numfailovers)
+Status: $result: rc=$rc"
+
+    # stop the vmstats on the OSTs
+    if [ "$VMSTAT" ]; then
+        do_nodes $(comma_list $(osts_nodes)) "test -f /tmp/vmstat.pid && \
+            { kill -s TERM \$(cat /tmp/vmstat.pid); rm -f /tmp/vmstat.pid; \
+            gzip -f9 $vmstatLOG-\$(hostname); }"
+    fi
+
+    # make sure the client loads die
+    do_nodes $NODES_TO_USE "set -x; test -f $LOAD_PID_FILE && \
+        { kill -s TERM \$(cat $LOAD_PID_FILE) || true; }"
+
+    # and free up the pdshes that started them, if any are still around
+    if [ -n "$CLIENT_LOAD_PIDS" ]; then
+        kill $CLIENT_LOAD_PIDS || true
+        sleep 5
+        kill -9 $CLIENT_LOAD_PIDS || true
+    fi
+
+    if [ $rc -ne 0 ]; then
+        print_logs $NODES_TO_USE
+    fi
+
+    if [ $rc -eq 0 ]; then
+        zconf_mount $(hostname) $MOUNT
+    else
+        error "exited with rc=$rc"
+    fi
+    exit $rc
+}
+
+#
+# MAIN 
+#
+log "-----============= $0 starting =============-----"
+
+trap summary_and_cleanup EXIT # INT
+
+ELAPSED=0
+
+# vmstat the osts
+if [ "$VMSTAT" ]; then
+    do_nodes $(comma_list $(osts_nodes)) "vmstat 1 > $vmstatLOG-\$(hostname) 2>/dev/null </dev/null & echo \$! > /tmp/vmstat.pid"
+fi
+
+# Start client loads.
+start_client_loads $NODES_TO_USE
+
+echo clients load pids:
+if ! do_nodes $NODES_TO_USE "set -x; echo \$(hostname): && cat $LOAD_PID_FILE"; then
+    if [ -e $DEBUGLOG ]; then
+        exec 2<&-
+        cat $DEBUGLOG
+        exit 3
+    fi
+fi
+
+START_TS=$(date +%s)
+CURRENT_TS=$START_TS
+
+MINSLEEP=${MINSLEEP:-120}
+REQFAIL_PERCENT=${REQFAIL_PERCENT:-3}  # bug17839 comment 62
+REQFAIL=${REQFAIL:-$(( DURATION / SERVER_FAILOVER_PERIOD * REQFAIL_PERCENT / 100))}
+reqfail=0
+sleep=0
+
+# This is used for FAIL_CLIENT only
+ERRORS_OK="yes"
+while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do
+
+    # In order to perform the 
+    # expected number of failovers, we need to account the following :
+    # 1) the time that has elapsed during the client load checking
+    # 2) time takes for failover
+
+    it_time_start=$(date +%s)
+    
+    FAIL_CLIENT=$(get_random_entry $NODES_TO_USE)
+    client_var=${FAIL_CLIENT}_nums
+
+    # store the list of failed clients
+    # lists are comma separated
+    failed_clients=$(expand_list $failed_clients $FAIL_CLIENT)
+
+    SERVERFACET=$(get_random_entry $MDTS)
+    var=${SERVERFACET}_nums
+
+    # Check that our client loads are still running. If any have died, 
+    # that means they have died outside of recovery, which is unacceptable.    
+
+    log "==== Checking the clients loads BEFORE failover -- failure NOT OK \
+    ELAPSED=$ELAPSED DURATION=$DURATION PERIOD=$SERVER_FAILOVER_PERIOD" 
+
+    if ! check_client_loads $NODES_TO_USE; then
+        exit 4
+    fi
+
+    log "FAIL CLIENT $FAIL_CLIENT ... "
+    shutdown_client $FAIL_CLIENT
+
+    log "Starting failover on $SERVERFACET"
+
+    facet_failover "$SERVERFACET" || exit 1
+    if ! wait_recovery_complete $SERVERFACET $((TIMEOUT * 10)); then 
+        echo "$SERVERFACET recovery is not completed!"
+        exit 7
+    fi
+    boot_node $FAIL_CLIENT
+    echo "Reintegrating $FAIL_CLIENT"
+    zconf_mount $FAIL_CLIENT $MOUNT || exit $?
+
+    # Increment the number of failovers
+    val=$((${!var} + 1))
+    eval $var=$val
+    val=$((${!client_var} + 1))
+    eval $client_var=$val
+
+    # load script on failed clients could create END_RUN_FILE
+    # We shuold remove it and ignore the failure if this
+    # file contains the failed client only.
+    # We can not use ERRORS_OK when start all loads at the start of this script
+    # because the application errors allowed for random failed client only, but
+    # not for all clients.
+    if [ -e $END_RUN_FILE ]; then
+        read END_RUN_NODE < $END_RUN_FILE
+        [[ $END_RUN_NODE = $FAIL_CLIENT ]] && 
+            rm -f $END_RUN_FILE || exit 13
+    fi
+   
+    restart_client_loads $FAIL_CLIENT $ERRORS_OK || exit $?
+
+    # Check that not failed clients loads are still running.
+    # No application failures should occur on clients that was not failed.
+
+    log "==== Checking the clients loads AFTER failed client reintegrated -- failure NOT OK"
+    if ! ERRORS_OK= check_client_loads $(exclude_items_from_list $NODES_TO_USE $FAIL_CLIENT); then
+        log "Client load failed. Exiting"
+        exit 5
+    fi
+
+    CURRENT_TS=$(date +%s)
+    ELAPSED=$((CURRENT_TS - START_TS))
+    sleep=$((SERVER_FAILOVER_PERIOD-(CURRENT_TS - it_time_start)))
+
+    # keep count the number of itterations when
+    # time spend to failover and two client loads check exceeded 
+    # the value ( SERVER_FAILOVER_PERIOD - MINSLEEP )
+    if [ $sleep -lt $MINSLEEP ]; then
+        reqfail=$((reqfail +1))
+        log "WARNING: failover, client reintegration and check_client_loads time
+exceeded SERVER_FAILOVER_PERIOD - MINSLEEP !
+Failed to meet interval $reqfail times ( REQFAIL=$REQFAIL ); have sleep=$sleep"
+        [ $reqfail -gt $REQFAIL ] && exit 6 
+    fi  
+
+    log " Number of failovers:
+$(numfailovers)                and counting..."
+
+    if [ $sleep -gt 0 ]; then 
+        echo "sleeping $sleep seconds ... "
+        sleep $sleep
+    fi
+done
+
+exit 0
index e081f8d..f72a59f 100644 (file)
@@ -610,7 +610,7 @@ echo Stopping client \\\$(hostname) client $mnt opts:$force
 lsof -t $mnt || need_kill=no
 if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then
     pids=\\\$(lsof -t $mnt | sort -u);
 lsof -t $mnt || need_kill=no
 if [ "x$force" != "x" -a "x\\\$need_kill" != "xno" ]; then
     pids=\\\$(lsof -t $mnt | sort -u);
-    if [ -n \\\$pids ]; then
+    if [ -n \\\"\\\$pids\\\" ]; then
              kill -9 \\\$pids
     fi
 fi
              kill -9 \\\$pids
     fi
 fi
@@ -767,7 +767,7 @@ restart_client_loads () {
     for client in $clients; do
         check_client_load $client
         rc=${PIPESTATUS[0]}
     for client in $clients; do
         check_client_load $client
         rc=${PIPESTATUS[0]}
-        if [ "$rc" != 0 -a "$expectedfail"]; then
+        if [ "$rc" != 0 -a "$expectedfail" ]; then
             start_client_load $client
             echo "Restarted client load: on $client. Checking ..."
             check_client_load $client 
             start_client_load $client
             echo "Restarted client load: on $client. Checking ..."
             check_client_load $client 
@@ -1631,6 +1631,16 @@ exclude_items_from_list () {
     echo $(comma_list $list) 
 }
 
     echo $(comma_list $list) 
 }
 
+# list, expand  are the comma separated lists
+expand_list () {
+    local list=${1//,/ }
+    local expand=${2//,/ }
+    local expanded=
+
+    expanded=$(for i in $list $expand; do echo $i; done | sort -u)
+    echo $(comma_list $expanded)
+}
+
 absolute_path() {
     (cd `dirname $1`; echo $PWD/`basename $1`)
 }
 absolute_path() {
     (cd `dirname $1`; echo $PWD/`basename $1`)
 }
@@ -2236,7 +2246,7 @@ get_random_entry () {
 
     local nodes=($rnodes)
     local num=${#nodes[@]} 
 
     local nodes=($rnodes)
     local num=${#nodes[@]} 
-    local i=$((RANDOM * num  / 65536))
+    local i=$((RANDOM * num * 2 / 65536))
 
     echo ${nodes[i]}
 }
 
     echo ${nodes[i]}
 }