X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Frecovery-mds-scale.sh;h=1c7cc84b51aea9c30355d757155b6402248ca158;hb=3c42a7fca2a101298be18733564e92549b35ef95;hp=7440ed5b75161c692aa0f81f67ecda179fcb02b1;hpb=332d4326393357665279caf9d31d0da19a4834ab;p=fs%2Flustre-release.git diff --git a/lustre/tests/recovery-mds-scale.sh b/lustre/tests/recovery-mds-scale.sh index 7440ed5..1c7cc84 100644 --- a/lustre/tests/recovery-mds-scale.sh +++ b/lustre/tests/recovery-mds-scale.sh @@ -30,7 +30,7 @@ set -x [ $CLIENTCOUNT -ge 3 ] || \ { skip "$0 Need two or more clients, have $CLIENTCOUNT" && exit 0; } -END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY}/end_run_file} +END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY/end_run_file} LOAD_PID_FILE=${LOAD_PID_FILE:-$TMP/client-load.pid} remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0 @@ -47,25 +47,30 @@ rm -rf $DIR/[df][0-9]* # -- remove hostname from clients list zconf_umount $(hostname) $MOUNT NODES_TO_USE=${NODES_TO_USE:-$CLIENTS} -NODES_TO_USE=$(exclude_item_from_list $NODES_TO_USE $(hostname)) +NODES_TO_USE=$(exclude_items_from_list $NODES_TO_USE $(hostname)) check_progs_installed $NODES_TO_USE ${CLIENT_LOADS[@]} -MDTS="" -for ((i=1; i<=$MDSCOUNT; i++)) do - MDTS="$MDTS mds$i" -done -MDTS=$(comma_list $MDTS) - -OSTS="" -for ((i=1; i<=$OSTCOUNT; i++)) do - OSTS="$OSTS ost$i" -done -OSTS=$(comma_list $OSTS) +MDTS=$(get_facets MDS) +OSTS=$(get_facets OST) ERRORS_OK="" # No application failures should occur during this test. FLAVOR=${FLAVOR:-"MDS"} +if [ "$FLAVOR" == "MDS" ]; then + SERVERS=$MDTS +else + SERVERS=$OSTS +fi + +if [ "$SLOW" = "no" ]; then + DURATION=${DURATION:-$((60 * 30))} + SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 5))} +else + DURATION=${DURATION:-$((60 * 60 * 24))} + SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 10))} # 10 minutes +fi + rm -f $END_RUN_FILE vmstatLOG=${TESTSUITELOG}_$(basename $0 .sh).vmstat @@ -101,7 +106,7 @@ summary_and_cleanup () { # actually failed though. the first node in the END_RUN_NODE is # the one we are really interested in. if [ -n "$END_RUN_NODE" ]; then - var=${END_RUN_NODE}_load + var=$(client_var_name $END_RUN_NODE)_load echo "Client load failed on node $END_RUN_NODE" echo echo "client $END_RUN_NODE load stdout and debug files : @@ -152,7 +157,6 @@ log "-----============= $0 starting =============-----" trap summary_and_cleanup EXIT INT -DURATION=${DURATION:-$((60*60*24))} ELAPSED=0 NUM_FAILOVERS=0 @@ -176,16 +180,6 @@ fi START_TS=$(date +%s) CURRENT_TS=$START_TS -if [ "$FLAVOR" == "MDS" ]; then - SERVER_FAILOVER_PERIOD=$MDS_FAILOVER_PERIOD - SERVERS=$MDTS -else - SERVER_FAILOVER_PERIOD=$OSS_FAILOVER_PERIOD - SERVERS=$OSTS -fi - -SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 10))} # 10 minutes - MINSLEEP=${MINSLEEP:-120} REQFAIL_PERCENT=${REQFAIL_PERCENT:-3} # bug17839 comment 62 REQFAIL=${REQFAIL:-$(( DURATION / SERVER_FAILOVER_PERIOD * REQFAIL_PERCENT / 100))} @@ -213,7 +207,20 @@ while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do exit 4 fi - log "Starting failover on $SERVERNODE" + log "Wait $SERVERFACET recovery complete before doing next failover ...." + if [[ $NUM_FAILOVERS != 0 ]]; then + if ! wait_recovery_complete $SERVERFACET ; then + echo "$SERVERFACET recovery is not completed!" + exit 7 + fi + fi + + log "Checking clients are in FULL state before doing next failover" + if ! wait_clients_import_state $NODES_TO_USE $SERVERFACET FULL; then + echo "Clients import not FULL, please consider to increase SERVER_FAILOVER_PERIOD=$SERVER_FAILOVER_PERIOD !" + + fi + log "Starting failover on $SERVERFACET" facet_failover "$SERVERFACET" || exit 1