# Workloads dry run for several seconds; no failures will be introduced.
# This option is useful to verify the loads.
# -u is ignored in this case
+# -m
+# Reboot victim nodes simultaneously.
#
#
# ASSUMPTIONS
# applications are run in short loops so that their exit status can be waited
# for and checked within reasonable time by ha_wait_loads.
# The set of MPI and non-MPI workloads are configurable by parameters:
-# ha_mpi_loads
-# default set: dd, tar, iozone
# ha_nonmpi_loads
-# default set: ior, simul.
+# default set: dd, tar, iozone
+# ha_mpi_loads
+# default set: ior, simul, mdtest
#
# The number of clients run MPI loads is configured by parameter
# ha_mpi_instances. Only one client runs MPI workloads by default.
# ~ mpirun IOR
# ~ ha.sh (ha_repeat_mpi_load simul)
# ~ mpirun simul
+# ~ ha.sh (ha_repeat_mpi_load mdtest)
+# ~ mpirun mdtest
# ~ ... (one for each MPI load)
#
# ~ ha.sh (ha_repeat_nonmpi_load client2 dbench)
SIMUL=${SIMUL:-$(which simul 2> /dev/null || true)}
IOR=${IOR:-$(which IOR 2> /dev/null || true)}
+MDTEST=${MDTEST:-$(which mdtest 2> /dev/null || true)}
ior_blockSize=${ior_blockSize:-6g}
mpi_threads_per_client=${mpi_threads_per_client:-2}
declare ha_machine_file=$ha_tmp_dir/machine_file
declare ha_power_down_cmd=${POWER_DOWN:-"pm -0"}
declare ha_power_up_cmd=${POWER_UP:-"pm -1"}
+declare ha_power_delay=${POWER_DELAY:-60}
declare ha_failback_delay=${DELAY:-5}
declare ha_failback_cmd=${FAILBACK:-""}
declare ha_stripe_params=${STRIPEPARAMS:-"-c 0"}
+declare ha_dir_stripe_count=${DSTRIPECOUNT:-"1"}
+declare ha_mdt_index=${MDTINDEX:-"0"}
+declare ha_mdt_index_random=${MDTINDEXRAND:-false}
declare -a ha_clients
declare -a ha_servers
declare -a ha_victims
declare ha_load_timeout=$((60 * 10))
declare ha_workloads_only=false
declare ha_workloads_dry_run=false
+declare ha_simultaneous=false
declare ha_mpi_instances=${ha_mpi_instances:-1}
-declare ha_mpi_loads=${ha_mpi_loads="ior simul"}
+declare ha_mpi_loads=${ha_mpi_loads="ior simul mdtest"}
declare -a ha_mpi_load_tags=($ha_mpi_loads)
declare ha_ior_params=${IORP:-'" -b $ior_blockSize -t 2m -w -W -T 1"'}
declare ha_simul_params=${SIMULP:-'" -n 10"'}
+declare ha_mdtest_params=${MDTESTP:-'" -i 1 -n 1000"'}
declare ha_mpirun_options=${MPIRUN_OPTIONS:-""}
eval ha_params_ior=($ha_ior_params)
eval ha_params_simul=($ha_simul_params)
+eval ha_params_mdtest=($ha_mdtest_params)
declare ha_nparams_ior=${#ha_params_ior[@]}
declare ha_nparams_simul=${#ha_params_simul[@]}
+declare ha_nparams_mdtest=${#ha_params_mdtest[@]}
declare -A ha_mpi_load_cmds=(
- [ior]="$IOR -o {}/f.ior {params}"
- [simul]="$SIMUL {params} -d {}"
+ [ior]="$IOR -o {}/f.ior {params}"
+ [simul]="$SIMUL {params} -d {}"
+ [mdtest]="$MDTEST {params} -d {}"
)
declare ha_nonmpi_loads=${ha_nonmpi_loads="dd tar iozone"}
{
local opt
- while getopts hc:s:v:d:p:u:wr opt; do
+ while getopts hc:s:v:d:p:u:wrm opt; do
case $opt in
h)
ha_usage
r)
ha_workloads_dry_run=true
;;
+ m)
+ ha_simultaneous=true
+ ;;
\?)
ha_usage
exit 1
ha_dump_logs()
{
- local nodes=${1// /,}
- local file=/tmp/$(basename $0)-$$-$(date +%s).dk
- local lock=$ha_tmp_dir/lock-dump-logs
+ local nodes=${1// /,}
+ local file=/tmp/$(basename $0)-$$-$(date +%s).dk
+ local lock=$ha_tmp_dir/lock-dump-logs
+ local rc=0
- ha_lock "$lock"
- ha_info "Dumping lctl log to $file"
+ ha_lock "$lock"
+ ha_info "Dumping lctl log to $file"
#
# some nodes could crash, so
# do not exit with error if not all logs are dumped
#
- ha_on $nodes "lctl dk >$file" ||
+ ha_on $nodes "lctl dk >>$file" || rc=$?
+
+ [ $rc -eq 0 ] ||
ha_error "not all logs are dumped! Some nodes are unreachable."
ha_unlock "$lock"
}
local log=$ha_tmp_dir/$client-$tag
local rc=0
local nr_loops=0
+ local avg_loop_time=0
local start_time=$(date +%s)
cmd=${cmd//"{}"/$dir}
local machines="-machinefile $ha_machine_file"
while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
{
- ha_on $client mkdir -p "$dir" &&
+ local mdt_index
+ if $ha_mdt_index_random && [ $ha_mdt_index -ne 0 ]; then
+ mdt_index=$(ha_rand $ha_mdt_index)
+ else
+ mdt_index=$ha_mdt_index
+ fi
+ ha_on $client $LFS mkdir -i$mdt_index -c$ha_dir_stripe_count "$dir" &&
+ ha_on $client $LFS getdirstripe "$dir" &&
ha_on $client chmod a+xwr $dir &&
ha_on $client "su mpiuser sh -c \" $mpirun $ha_mpirun_options \
-np $((${#ha_clients[@]} * mpi_threads_per_client )) \
nr_loops=$((nr_loops + 1))
done
- avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
+ [ $nr_loops -ne 0 ] &&
+ avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
ha_info "$tag stopped: rc $rc avg loop time $avg_loop_time"
}
ha_repeat_nonmpi_load()
{
- local client=$1
- local load=$2
- local status=$3
- local tag=${ha_nonmpi_load_tags[$load]}
- local cmd=${ha_nonmpi_load_cmds[$load]}
- local dir=$ha_test_dir/$client-$tag
- local log=$ha_tmp_dir/$client-$tag
- local rc=0
- local nr_loops=0
- local start_time=$(date +%s)
+ local client=$1
+ local load=$2
+ local status=$3
+ local tag=${ha_nonmpi_load_tags[$load]}
+ local cmd=${ha_nonmpi_load_cmds[$load]}
+ local dir=$ha_test_dir/$client-$tag
+ local log=$ha_tmp_dir/$client-$tag
+ local rc=0
+ local nr_loops=0
+ local avg_loop_time=0
+ local start_time=$(date +%s)
cmd=${cmd//"{}"/$dir}
nr_loops=$((nr_loops + 1))
done
- avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
+ [ $nr_loops -ne 0 ] &&
+ avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
- ha_info "$tag on $client stopped: rc $rc avg loop time ${avg_loop_time}s"
+ ha_info "$tag on $client stopped: rc $rc avg loop time ${avg_loop_time}s"
}
ha_start_nonmpi_loads()
ha_power_down()
{
- local node=$1
+ local nodes=$1
+ local rc=1
+ local i
- ha_info "Powering down $node"
- $ha_power_down_cmd $node
+ ha_info "Powering down $nodes"
+ for i in $(seq 1 5); do
+ $ha_power_down_cmd $nodes && rc=0 && break
+ sleep $ha_power_delay
+ done
+
+ [ $rc -eq 0 ] || ha_info "Failed Powering down in $i attempts"
}
ha_power_up()
{
- local node=$1
+ local nodes=$1
+ local rc=1
+ local i
- ha_info "Powering up $node"
- $ha_power_up_cmd $node
+ ha_info "Powering up $nodes"
+ for i in $(seq 1 5); do
+ $ha_power_up_cmd $nodes && rc=0 && break
+ sleep $ha_power_delay
+ done
+
+ [ $rc -eq 0 ] || ha_info "Failed Powering up in $i attempts"
}
#
ha_aim()
{
- local i=$(ha_rand ${#ha_victims[@]})
+ local i
+ local nodes
- echo -n ${ha_victims[$i]}
+ if $ha_simultaneous ; then
+ nodes=$(echo ${ha_victims[@]})
+ nodes=${nodes// /,}
+ else
+ i=$(ha_rand ${#ha_victims[@]})
+ nodes=${ha_victims[$i]}
+ fi
+
+ echo -n $nodes
}
-ha_wait_node()
+ha_wait_nodes()
{
- local node=$1
+ local nodes=$1
local end=$(($(date +%s) + 10 * 60))
- ha_info "Waiting for $node to boot up"
- until ha_on $node hostname >/dev/null 2>&1 ||
+ ha_info "Waiting for $nodes to boot up"
+ until ha_on $nodes hostname >/dev/null 2>&1 ||
[ -e "$ha_stop_file" ] ||
(($(date +%s) >= end)); do
ha_sleep 1 >/dev/null
ha_failback()
{
- local node=$1
- ha_info "Failback resources on $node in $ha_failback_delay sec"
+ local nodes=$1
+ ha_info "Failback resources on $nodes in $ha_failback_delay sec"
ha_sleep $ha_failback_delay
[ "$ha_failback_cmd" ] ||
return 0
}
- $ha_failback_cmd $node
+ $ha_failback_cmd $nodes
}
ha_summarize()
ha_killer()
{
- local node
+ local nodes
while (($(date +%s) < ha_start_time + ha_expected_duration)) &&
[ ! -e "$ha_stop_file" ]; do
ha_info "---------------8<---------------"
- $ha_workloads_only || node=$(ha_aim)
+ $ha_workloads_only || nodes=$(ha_aim)
- ha_info "Failing $node"
+ ha_info "Failing $nodes"
$ha_workloads_only && ha_info " is skipped: workload only..."
ha_sleep $(ha_rand $ha_max_failover_period)
- $ha_workloads_only || ha_power_down $node
+ $ha_workloads_only || ha_power_down $nodes
ha_sleep 10
ha_wait_loads || return
if [ -e $ha_stop_file ]; then
- $ha_workloads_only || ha_power_up $node
+ $ha_workloads_only || ha_power_up $nodes
break
fi
- ha_info "Bringing $node back"
+ ha_info "Bringing $nodes back"
ha_sleep $(ha_rand 10)
$ha_workloads_only ||
{
- ha_power_up $node
- ha_wait_node $node
- ha_failback $node
+ ha_power_up $nodes
+ ha_wait_nodes $nodes
+ ha_failback $nodes
}
#