declare ha_tmp_dir=/tmp/$(basename $0)-$$
declare ha_stop_file=$ha_tmp_dir/stop
declare ha_fail_file=$ha_tmp_dir/fail
+declare ha_pm_states=$ha_tmp_dir/ha_pm_states
declare ha_status_file_prefix=$ha_tmp_dir/status
declare -a ha_status_files
declare ha_machine_file=$ha_tmp_dir/machine_file
declare ha_power_down_cmd=${POWER_DOWN:-"pm -0"}
declare ha_power_up_cmd=${POWER_UP:-"pm -1"}
declare ha_power_delay=${POWER_DELAY:-60}
+declare ha_node_up_delay=${NODE_UP_DELAY:-10}
+declare ha_pm_host=${PM_HOST:-$(hostname)}
declare ha_failback_delay=${DELAY:-5}
declare ha_failback_cmd=${FAILBACK:-""}
declare ha_stripe_params=${STRIPEPARAMS:-"-c 0"}
+declare ha_test_dir_stripe_count=${TDSTRIPECOUNT:-"1"}
+declare ha_test_dir_mdt_index=${TDMDTINDEX:-"0"}
+declare ha_test_dir_mdt_index_random=${TDMDTINDEXRAND:-false}
declare ha_dir_stripe_count=${DSTRIPECOUNT:-"1"}
declare ha_mdt_index=${MDTINDEX:-"0"}
declare ha_mdt_index_random=${MDTINDEXRAND:-false}
declare -a ha_clients
declare -a ha_servers
declare -a ha_victims
+declare -a ha_victims_pair
declare ha_test_dir=/mnt/lustre/$(basename $0)-$$
+declare -a ha_testdirs=(${ha_test_dirs="$ha_test_dir"})
+
+for ((i=0; i<${#ha_testdirs[@]}; i++)); do
+ echo I=$i ${ha_testdirs[i]}
+ ha_testdirs[i]="${ha_testdirs[i]}/$(basename $0)-$$"
+ echo i=$i ${ha_testdirs[i]}
+done
+
+declare ha_cleanup=${CLEANUP:-true}
declare ha_start_time=$(date +%s)
declare ha_expected_duration=$((60 * 60 * 24))
declare ha_max_failover_period=10
declare ha_nr_loops=0
declare ha_stop_signals="SIGINT SIGTERM SIGHUP"
-declare ha_load_timeout=$((60 * 10))
+declare ha_load_timeout=${LOAD_TIMEOUT:-$((60 * 10))}
declare ha_workloads_only=false
declare ha_workloads_dry_run=false
declare ha_simultaneous=false
declare ha_mpi_loads=${ha_mpi_loads="ior simul mdtest"}
declare -a ha_mpi_load_tags=($ha_mpi_loads)
declare -a ha_mpiusers=(${ha_mpi_users="mpiuser"})
+declare -a ha_users
+declare -A ha_mpiopts
+
+for ((i=0; i<${#ha_mpiusers[@]}; i++)); do
+ u=${ha_mpiusers[i]%%:*}
+ o=""
+ # user gets empty option if ha_mpi_users does not specify it explicitly
+ [[ ${ha_mpiusers[i]} =~ : ]] && o=${ha_mpiusers[i]##*:}
+ ha_users[i]=$u
+ ha_mpiopts[$u]+=" $o"
+done
+ha_users=(${!ha_mpiopts[@]})
declare ha_ior_params=${IORP:-'" -b $ior_blockSize -t 2m -w -W -T 1"'}
declare ha_simul_params=${SIMULP:-'" -n 10"'}
declare ha_mpirun_options=${MPIRUN_OPTIONS:-""}
declare ha_clients_stripe=${CLIENTSSTRIPE:-'"$STRIPEPARAMS"'}
declare ha_nclientsset=${NCLIENTSSET:-1}
+declare ha_ninstmustfail=${NINSTMUSTFAIL:-0}
declare ha_racer_params=${RACERP:-"MDSCOUNT=1"}
ha_usage()
{
- ha_info "Usage: $0 -c HOST[,...] -s HOST[,...]" \
- "-v HOST[,...] [-d DIRECTORY] [-u SECONDS]"
+ ha_info "Usage: $0 -c HOST[,...] -s HOST[,...]" \
+ "-v HOST[,...] -f HOST[,...] [-d DIRECTORY] [-u SECONDS]"
}
ha_process_arguments()
{
local opt
- while getopts hc:s:v:d:p:u:wrm opt; do
+ while getopts hc:s:v:d:p:u:wrmf: opt; do
case $opt in
h)
ha_usage
m)
ha_simultaneous=true
;;
+ f)
+ ha_victims_pair=(${OPTARG//,/ })
+ ;;
\?)
ha_usage
exit 1
# -S is to be used here to track the
# remote command return values
#
- pdsh -S -w $nodes PATH=/usr/local/sbin:/usr/local/bin:/sbin:\
-/bin:/usr/sbin:/usr/bin "$@" ||
+ pdsh -S -w $nodes "PATH=/usr/local/sbin:/usr/local/bin:/sbin:\
+/bin:/usr/sbin:/usr/bin; $@" ||
rc=$?
return $rc
}
touch "$ha_stop_file"
trap 0
if [ -e "$ha_fail_file" ]; then
- ha_info "Test directory $ha_test_dir not removed"
+ ha_info "Test directories ${ha_testdirs[@]} not removed"
ha_info "Temporary directory $ha_tmp_dir not removed"
else
- ha_on ${ha_clients[0]} rm -rf "$ha_test_dir"
+ $ha_cleanup &&
+ ha_on ${ha_clients[0]} rm -rf ${ha_testdirs[@]} ||
+ ha_info "Test directories ${ha_testdirs[@]} not removed"
ha_info "Please find the results in the directory $ha_tmp_dir"
fi
}
local machines=$5
local stripeparams=$6
local mpiuser=$7
+ local mustpass=$8
+ local mpirunoptions=$9
+ local test_dir=${10}
local tag=${ha_mpi_load_tags[$load]}
local cmd=${ha_mpi_load_cmds[$tag]}
- local dir=$ha_test_dir/$client-$tag
+ local dir=$test_dir/$client-$tag
local log=$ha_tmp_dir/$client-$tag
local rc=0
local nr_loops=0
cmd=${cmd//"{}"/$dir}
cmd=${cmd//"{params}"/$parameter}
+ [[ -n "$ha_postcmd" ]] && ha_postcmd=${ha_postcmd//"{}"/$dir}
+ [[ -n "$ha_precmd" ]] && ha_precmd=${ha_precmd//"{}"/$dir}
ha_info "Starting $tag"
machines="-machinefile $machines"
while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
+ ha_info "$client Starts: $mpiuser: $cmd" 2>&1 | tee -a $log
{
local mdt_index
if $ha_mdt_index_random && [ $ha_mdt_index -ne 0 ]; then
else
mdt_index=$ha_mdt_index
fi
+ [[ -n "$ha_precmd" ]] && ha_info "$ha_precmd" &&
+ ha_on $client "$ha_precmd" >>"$log" 2>&1
ha_on $client $LFS mkdir -i$mdt_index -c$ha_dir_stripe_count "$dir" &&
ha_on $client $LFS getdirstripe "$dir" &&
ha_on $client $LFS setstripe $stripeparams $dir &&
ha_on $client $LFS getstripe $dir &&
ha_on $client chmod a+xwr $dir &&
- ha_on $client "su $mpiuser sh -c \" $mpirun $ha_mpirun_options \
- -np $((${#ha_clients[@]} * mpi_threads_per_client )) \
- $machines $cmd \" " &&
+ ha_on $client "su $mpiuser sh -c \" $mpirun $mpirunoptions \
+ -np $((${#ha_clients[@]} * mpi_threads_per_client / ha_nclientsset)) \
+ $machines $cmd \" " || rc=$?
+ [[ -n "$ha_postcmd" ]] && ha_info "$ha_postcmd" &&
+ ha_on $client "$ha_postcmd" >>"$log" 2>&1
+ (( ((rc == 0)) && (( mustpass != 0 )) )) ||
+ (( ((rc != 0)) && (( mustpass == 0 )) )) &&
ha_on $client rm -rf "$dir";
} >>"$log" 2>&1 || rc=$?
- ha_info rc=$rc
-
- if ((rc != 0)); then
+ ha_info $client: rc=$rc mustpass=$mustpass
+
+ # mustpass=0 means that failure is expected
+ if (( rc !=0 )); then
+ if (( mustpass != 0 )); then
+ touch "$ha_fail_file"
+ touch "$ha_stop_file"
+ ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
+ else
+ # Ok to fail
+ rc=0
+ fi
+ elif (( mustpass == 0 )); then
touch "$ha_fail_file"
touch "$ha_stop_file"
ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
fi
- echo $rc >"$status"
+ echo rc=$rc mustpass=$mustpass >"$status"
nr_loops=$((nr_loops + 1))
done
[ $nr_loops -ne 0 ] &&
avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
- ha_info "$tag stopped: rc $rc avg loop time $avg_loop_time"
+ ha_info "$tag stopped: rc=$rc mustpass=$mustpass \
+ avg loop time $avg_loop_time"
}
ha_start_mpi_loads()
local m
local -a mach
local mpiuser
+ local nmpi
# ha_mpi_instances defines the number of
# clients start mpi loads; should be <= ${#ha_clients[@]}
scp $ha_machine_file* $client:$dirname
done
+ local ndir
for ((n = 0; n < $inst; n++)); do
client=${ha_clients[n]}
- mpiuser=${ha_mpiusers[$((n % ${#ha_mpiusers[@]}))]}
+ nmpi=$((n % ${#ha_users[@]}))
+ mpiuser=${ha_users[nmpi]}
+ ndir=$((n % ${#ha_testdirs[@]}))
+ test_dir=${ha_testdirs[ndir]}
for ((load = 0; load < ${#ha_mpi_load_tags[@]}; load++)); do
tag=${ha_mpi_load_tags[$load]}
status=$ha_status_file_prefix-$tag-$client
local stripe=${!aref}
local m=$(( n % ha_nclientsset))
machines=${mach[m]}
- ha_repeat_mpi_load $client $load $status "$parameter" $machines "$stripe" "$mpiuser" &
+ local mustpass=1
+ [[ $ha_ninstmustfail == 0 ]] ||
+ mustpass=$(( n % ha_ninstmustfail ))
+ ha_repeat_mpi_load $client $load $status "$parameter" \
+ $machines "$stripe" "$mpiuser" "$mustpass" \
+ "${ha_mpiopts[$mpiuser]} $ha_mpirun_options" "$test_dir" &
ha_status_files+=("$status")
done
done
local status=$3
local tag=${ha_nonmpi_load_tags[$load]}
local cmd=${ha_nonmpi_load_cmds[$tag]}
- local dir=$ha_test_dir/$client-$tag
+ local test_dir=$4
+ local dir=$test_dir/$client-$tag
local log=$ha_tmp_dir/$client-$tag
local rc=0
local nr_loops=0
local avg_loop_time=0
local start_time=$(date +%s)
- cmd=${cmd//"{}"/$dir}
+ cmd=${cmd//"{}"/$dir}
- ha_info "Starting $tag on $client"
+ ha_info "Starting $tag on $client on $dir"
while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
+ ha_info "$client Starts: $cmd" 2>&1 | tee -a $log
ha_on $client "mkdir -p $dir && \
$cmd && \
rm -rf $dir" >>"$log" 2>&1 || rc=$?
ha_start_nonmpi_loads()
{
- local client
- local load
- local tag
- local status
-
- for client in ${ha_clients[@]}; do
- for ((load = 0; load < ${#ha_nonmpi_load_tags[@]}; load++)); do
- tag=${ha_nonmpi_load_tags[$load]}
- status=$ha_status_file_prefix-$tag-$client
- ha_repeat_nonmpi_load $client $load $status &
- ha_status_files+=("$status")
- done
- done
+ local client
+ local load
+ local tag
+ local status
+ local n
+ local test_dir
+ local ndir
+
+ for (( n = 0; n < ${#ha_clients[@]}; n++)); do
+ client=${ha_clients[n]}
+ ndir=$((n % ${#ha_testdirs[@]}))
+ test_dir=${ha_testdirs[ndir]}
+ for ((load = 0; load < ${#ha_nonmpi_load_tags[@]}; load++)); do
+ tag=${ha_nonmpi_load_tags[$load]}
+ status=$ha_status_file_prefix-$tag-$client
+ ha_repeat_nonmpi_load $client $load $status $test_dir &
+ ha_status_files+=("$status")
+ done
+ done
+}
+
+declare ha_bgcmd=${ha_bgcmd:-""}
+declare ha_bgcmd_log=$ha_tmp_dir/bgcmdlog
+
+ha_cmd_bg () {
+ [[ -z "$ha_bgcmd" ]] && return 0
+ for ((i=0; i<${#ha_testdirs[@]}; i++)); do
+ ha_bgcmd=${ha_bgcmd//"{}"/${ha_testdirs[i]}}
+ done
+
+ ha_info "BG cmd: $ha_bgcmd"
+ while [ true ]; do
+ [ -f $ha_stop_file ] &&
+ ha_info "$ha_stop_file found! $ha_bgcmd no started" &&
+ break
+ eval $ha_bgcmd 2>&1 | tee -a $ha_bgcmd_log
+ sleep 1
+ done &
+ CMD_BG_PID=$!
+ ha_info CMD BG PID: $CMD_BG_PID
+ ps aux | grep $CMD_BG_PID
}
ha_lfsck_bg () {
ha_start_loads()
{
+ ha_cmd_bg
$ha_lfsck_bg && ha_lfsck_bg
trap ha_trap_stop_signals $ha_stop_signals
ha_start_nonmpi_loads
ha_stop_loads()
{
touch $ha_stop_file
+ [[ -n $CMD_BG_PID ]] && wait $CMD_BG_PID || true
# true because of lfsck_bg could be stopped already
$ha_lfsck_bg && wait $LFSCK_BG_PID || true
trap - $ha_stop_signals
local file
local end=$(($(date +%s) + ha_load_timeout))
- ha_info "Waiting for workload status"
+ ha_info "Waiting $ha_load_timeout sec for workload status..."
rm -f "${ha_status_files[@]}"
#
done
}
+ha_powermanage()
+{
+ local nodes=$1
+ local expected_state=$2
+ local state
+ local -a states
+ local i
+ local rc=0
+
+ # store pm -x -q $nodes results in a file to have
+ # more information about nodes statuses
+ ha_on $ha_pm_host pm -x -q $nodes | awk '{print $2 $3}' > $ha_pm_states
+ rc=${PIPESTATUS[0]}
+ echo pmrc=$rc
+
+ while IFS=": " read node state; do
+ [[ "$state" = "$expected_state" ]] && {
+ nodes=${nodes/$node/}
+ nodes=${nodes//,,/,}
+ nodes=${nodes/#,}
+ nodes=${nodes/%,}
+ }
+ done < $ha_pm_states
+
+ if [ -n "$nodes" ]; then
+ cat $ha_pm_states
+ return 1
+ fi
+ return 0
+}
+
+ha_power_down_cmd_fn()
+{
+ local nodes=$1
+ local cmd
+
+ case $ha_power_down_cmd in
+ # format is: POWER_DOWN=sysrqcrash
+ sysrqcrash) cmd="pdsh -S -w $nodes 'echo c > /proc/sysrq-trigger' &" ;;
+ *) cmd="$ha_power_down_cmd $nodes" ;;
+ esac
+
+ eval $cmd
+}
+
ha_power_down()
{
local nodes=$1
local rc=1
local i
+ local state
+
+ case $ha_power_down_cmd in
+ *pm*) state=off ;;
+ sysrqcrash) state=off ;;
+ *) state=on;;
+ esac
if $ha_lfsck_bg && [[ ${nodes//,/ /} =~ $ha_lfsck_node ]]; then
ha_info "$ha_lfsck_node down, delay start LFSCK"
ha_lock $ha_lfsck_lock
fi
- ha_info "Powering down $nodes"
- for i in $(seq 1 5); do
- $ha_power_down_cmd $nodes && rc=0 && break
+ ha_info "Powering down $nodes : cmd: $ha_power_down_cmd"
+ for (( i=0; i<10; i++ )) {
+ ha_info "attempt: $i"
+ ha_power_down_cmd_fn $nodes &&
+ ha_powermanage $nodes $state && rc=0 && break
sleep $ha_power_delay
+ }
+
+ [ $rc -eq 0 ] || {
+ ha_info "Failed Powering down in $i attempts:" \
+ "$ha_power_down_cmd"
+ cat $ha_pm_states
+ exit 1
+ }
+}
+
+ha_get_pair()
+{
+ local node=$1
+ local i
+
+ for ((i=0; i<${#ha_victims[@]}; i++)) {
+ [[ ${ha_victims[i]} == $node ]] && echo ${ha_victims_pair[i]} &&
+ return
+ }
+ [[ $i -ne ${#ha_victims[@]} ]] ||
+ ha_error "No pair found!"
+}
+
+ha_power_up_delay()
+{
+ local nodes=$1
+ local end=$(($(date +%s) + ha_node_up_delay))
+ local rc
+
+ if [[ ${#ha_victims_pair[@]} -eq 0 ]]; then
+ ha_sleep $ha_node_up_delay
+ return 0
+ fi
+
+ # Check CRM status on failover pair
+ while (($(date +%s) <= end)); do
+ rc=0
+ for n in ${nodes//,/ }; do
+ local pair=$(ha_get_pair $n)
+ local status=$(ha_on $pair crm_mon -1rQ | \
+ grep -w $n | head -1)
+
+ ha_info "$n pair: $pair status: $status"
+ [[ "$status" == *OFFLINE* ]] ||
+ rc=$((rc + $?))
+ ha_info "rc: $rc"
+ done
+
+ if [[ $rc -eq 0 ]]; then
+ ha_info "CRM: Got all victims status OFFLINE"
+ return 0
+ fi
+ sleep 60
done
- [ $rc -eq 0 ] || ha_info "Failed Powering down in $i attempts"
+ ha_info "$nodes CRM status not OFFLINE"
+ for n in ${nodes//,/ }; do
+ local pair=$(ha_get_pair $n)
+
+ ha_info "CRM --- $n"
+ ha_on $pair crm_mon -1rQ
+ done
+ ha_error "CRM: some of $nodes are not OFFLINE in $ha_node_up_delay sec"
+ exit 1
}
ha_power_up()
local rc=1
local i
- ha_info "Powering up $nodes"
- for i in $(seq 1 5); do
- $ha_power_up_cmd $nodes && rc=0 && break
+ ha_power_up_delay $nodes
+ ha_info "Powering up $nodes : cmd: $ha_power_up_cmd"
+ for (( i=0; i<10; i++ )) {
+ ha_info "attempt: $i"
+ $ha_power_up_cmd $nodes &&
+ ha_powermanage $nodes on && rc=0 && break
sleep $ha_power_delay
- done
+ }
- [ $rc -eq 0 ] || ha_info "Failed Powering up in $i attempts"
+ [ $rc -eq 0 ] || {
+ ha_info "Failed Powering up in $i attempts: $ha_power_up_cmd"
+ cat $ha_pm_states
+ exit 1
+ }
}
#
"START: $0: $(date +%H:%M:%S' '%s)"
trap ha_trap_exit EXIT
mkdir "$ha_tmp_dir"
- ha_on ${ha_clients[0]} mkdir "$ha_test_dir"
- ha_on ${ha_clients[0]} " \
- $LFS setstripe $ha_stripe_params $ha_test_dir"
+
+ local mdt_index
+ if $ha_test_dir_mdt_index_random &&
+ [ $ha_test_dir_mdt_index -ne 0 ]; then
+ mdt_index=$(ha_rand $ha_test_dir_mdt_index)
+ else
+ mdt_index=$ha_test_dir_mdt_index
+ fi
+
+ local dir
+ test_dir=${ha_testdirs[0]}
+ ha_on ${ha_clients[0]} "$LFS mkdir -i$mdt_index \
+ -c$ha_test_dir_stripe_count $test_dir"
+ for ((i=0; i<${#ha_testdirs[@]}; i++)); do
+ test_dir=${ha_testdirs[i]}
+ ha_on ${ha_clients[0]} $LFS getdirstripe $test_dir
+ ha_on ${ha_clients[0]} " \
+ $LFS setstripe $ha_stripe_params $test_dir"
+ done
ha_start_loads
ha_wait_loads