2 # -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
3 # vim:shiftwidth=4:softtabstop=4:tabstop=4:
7 # ha.sh - test Lustre HA (aka failover) configurations
15 # ha.sh tests Lustre HA (aka failover) configurations with a CRM.
23 # Specify client nodes.
26 # Specify server nodes.
29 # Specify victim nodes to be rebooted.
32 # Choose a parent of the test directory. "/mnt/lustre" if not specified.
35 # Define a duration for the test. 86400 seconds if not specified.
38 # Define a max failover period. 10 minutes if not set.
41 # Only run the workloads; no failure will be introduced.
42 # -v, -s are ignored in this case.
44 # Workloads dry run for several seconds; no failures will be introduced.
45 # This option is useful to verify the loads.
46 # -u is ignored in this case
51 # A Lustre file system is up and mounted on all client nodes. This script
52 # does not mount or unmount any Lustre targets or clients, let alone format
55 # Each target has a failnode, so that workloads can continue after a power
58 # CRM could be configured by 2 ways:
60 # Targets are automatically failed back when their primary node is back. This
61 # assumption avoids calling CRM-specific commands to trigger failbacks, making
62 # this script more CRM-neural.
64 # Targets are not automatically failed back when their primary node is back.
65 # CRM-specific command is executed to trigger failbacks.
67 # A crash dump mechanism is configured to catch LBUGs, panics, etc.
71 # Each client runs set of MPI and non-MPI workloads. These
72 # applications are run in short loops so that their exit status can be waited
73 # for and checked within reasonable time by ha_wait_loads.
74 # The set of MPI and non-MPI workloads are configurable by parameters:
76 # default set: dd, tar, iozone
78 # default set: ior, simul.
80 # The number of clients run MPI loads is configured by parameter
81 # ha_mpi_instances. Only one client runs MPI workloads by default.
83 # PROCESS STRUCTURE AND IPC
85 # On the node where this script is run, the processes look like this:
89 # ~ ha.sh (ha_repeat_mpi_load ior)
91 # ~ ha.sh (ha_repeat_mpi_load simul)
93 # ~ ... (one for each MPI load)
95 # ~ ha.sh (ha_repeat_nonmpi_load client2 dbench)
96 # ~ pdsh client2 dbench
97 # ~ ha.sh (ha_repeat_nonmpi_load client2 iozone)
98 # ~ pdsh client2 iozone
99 # ~ ha.sh (ha_repeat_nonmpi_load client5 iozone)
100 # ~ pdsh client5 iozone
101 # ~ ... (one for each non-MPI load on each client)
103 # Each tilde represents a process. Indentations imply parent-children
106 # IPC is done by files in the temporary directory.
111 SIMUL=${SIMUL:-$(which simul 2> /dev/null || true)}
112 IOR=${IOR:-$(which IOR 2> /dev/null || true)}
114 ior_blockSize=${ior_blockSize:-6g}
115 mpi_threads_per_client=${mpi_threads_per_client:-2}
117 iozone_SIZE=${iozone_SIZE:-262144} # 256m
119 mpirun=${MPIRUN:-$(which mpirun)}
120 LFS=${LFS:-$(which lfs)}
124 for ((load = 0; load < ${#ha_mpi_load_tags[@]}; load++)); do
125 local tag=${ha_mpi_load_tags[$load]}
126 local bin=$(echo $tag | tr '[:lower:]' '[:upper:]')
127 if [ x${!bin} = x ]; then
128 ha_error ha_mpi_loads: ${ha_mpi_loads}, $bin is not set
136 echo "$0: $(date +%H:%M:%S' '%s):" "$@"
141 local nodes=${1// /,}
143 ha_on $nodes "lctl mark $*"
155 ha_error "Trap ERR triggered by:"
156 ha_error " $BASH_COMMAND"
157 ha_error "Call trace:"
158 for ((i = 0; i < ${#FUNCNAME[@]}; i++)); do
159 ha_error " ${FUNCNAME[$i]} [${BASH_SOURCE[$i]}:${BASH_LINENO[$i]}]"
166 declare ha_tmp_dir=/tmp/$(basename $0)-$$
167 declare ha_stop_file=$ha_tmp_dir/stop
168 declare ha_fail_file=$ha_tmp_dir/fail
169 declare ha_status_file_prefix=$ha_tmp_dir/status
170 declare -a ha_status_files
171 declare ha_machine_file=$ha_tmp_dir/machine_file
172 declare ha_power_down_cmd=${POWER_DOWN:-"pm -0"}
173 declare ha_power_up_cmd=${POWER_UP:-"pm -1"}
174 declare ha_failback_delay=${DELAY:-5}
175 declare ha_failback_cmd=${FAILBACK:-""}
176 declare ha_stripe_params=${STRIPEPARAMS:-"-c 0"}
177 declare -a ha_clients
178 declare -a ha_servers
179 declare -a ha_victims
180 declare ha_test_dir=/mnt/lustre/$(basename $0)-$$
181 declare ha_start_time=$(date +%s)
182 declare ha_expected_duration=$((60 * 60 * 24))
183 declare ha_max_failover_period=10
184 declare ha_nr_loops=0
185 declare ha_stop_signals="SIGINT SIGTERM SIGHUP"
186 declare ha_load_timeout=$((60 * 10))
187 declare ha_workloads_only=false
188 declare ha_workloads_dry_run=false
190 declare ha_mpi_instances=${ha_mpi_instances:-1}
192 declare ha_mpi_loads=${ha_mpi_loads="ior simul"}
193 declare -a ha_mpi_load_tags=($ha_mpi_loads)
195 declare ha_ior_params=${IORP:-'" -b $ior_blockSize -t 2m -w -W -T 1"'}
196 declare ha_simul_params=${SIMULP:-'" -n 10"'}
197 declare ha_mpirun_options=${MPIRUN_OPTIONS:-""}
199 eval ha_params_ior=($ha_ior_params)
200 eval ha_params_simul=($ha_simul_params)
202 declare ha_nparams_ior=${#ha_params_ior[@]}
203 declare ha_nparams_simul=${#ha_params_simul[@]}
205 declare -A ha_mpi_load_cmds=(
206 [ior]="$IOR -o {}/f.ior {params}"
207 [simul]="$SIMUL {params} -d {}"
210 declare ha_nonmpi_loads=${ha_nonmpi_loads="dd tar iozone"}
211 declare -a ha_nonmpi_load_tags=($ha_nonmpi_loads)
212 declare -a ha_nonmpi_load_cmds=(
213 "dd if=/dev/zero of={}/f.dd bs=1M count=256"
214 "tar cf - /etc | tar xf - -C {}"
215 "iozone -a -e -+d -s $iozone_SIZE {}/f.iozone"
220 ha_info "Usage: $0 -c HOST[,...] -s HOST[,...]" \
221 "-v HOST[,...] [-d DIRECTORY] [-u SECONDS]"
224 ha_process_arguments()
228 while getopts hc:s:v:d:p:u:wr opt; do
235 ha_clients=(${OPTARG//,/ })
238 ha_servers=(${OPTARG//,/ })
241 ha_victims=(${OPTARG//,/ })
244 ha_test_dir=$OPTARG/$(basename $0)-$$
247 ha_expected_duration=$OPTARG
250 ha_max_failover_period=$OPTARG
253 ha_workloads_only=true
256 ha_workloads_dry_run=true
265 if [ -z "${ha_clients[*]}" ]; then
266 ha_error "-c is mandatory"
270 if ! ($ha_workloads_dry_run ||
271 $ha_workloads_only) &&
272 ([ -z "${ha_servers[*]}" ] ||
273 [ -z "${ha_victims[*]}" ]); then
274 ha_error "-s, and -v are all mandatory"
288 # -S is to be used here to track the
289 # remote command return values
291 pdsh -S -w $nodes PATH=/usr/local/sbin:/usr/local/bin:/sbin:\
292 /bin:/usr/sbin:/usr/bin "$@" ||
299 touch "$ha_stop_file"
301 if [ -e "$ha_fail_file" ]; then
302 ha_info "Test directory $ha_test_dir not removed"
303 ha_info "Temporary directory $ha_tmp_dir not removed"
305 ha_on ${ha_clients[0]} rm -rf "$ha_test_dir"
306 ha_info "Please find the results in the directory $ha_tmp_dir"
310 ha_trap_stop_signals()
312 ha_info "${ha_stop_signals// /,} received"
313 touch "$ha_stop_file"
320 ha_info "Sleeping for ${n}s"
322 # sleep(1) could interrupted.
331 until mkdir "$lock" >/dev/null 2>&1; do
332 ha_sleep 1 >/dev/null
345 local nodes=${1// /,}
346 local file=/tmp/$(basename $0)-$$-$(date +%s).dk
347 local lock=$ha_tmp_dir/lock-dump-logs
350 ha_info "Dumping lctl log to $file"
353 # some nodes could crash, so
354 # do not exit with error if not all logs are dumped
356 ha_on $nodes "lctl dk >$file" ||
357 ha_error "not all logs are dumped! Some nodes are unreachable."
367 local tag=${ha_mpi_load_tags[$load]}
368 local cmd=${ha_mpi_load_cmds[$tag]}
369 local dir=$ha_test_dir/$client-$tag
370 local log=$ha_tmp_dir/$client-$tag
373 local start_time=$(date +%s)
375 cmd=${cmd//"{}"/$dir}
376 cmd=${cmd//"{params}"/$parameter}
378 ha_info "Starting $tag"
380 local machines="-machinefile $ha_machine_file"
381 while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
383 ha_on $client mkdir -p "$dir" &&
384 ha_on $client chmod a+xwr $dir &&
385 ha_on $client "su mpiuser sh -c \" $mpirun $ha_mpirun_options \
386 -np $((${#ha_clients[@]} * mpi_threads_per_client )) \
387 $machines $cmd \" " &&
388 ha_on $client rm -rf "$dir";
389 } >>"$log" 2>&1 || rc=$?
394 touch "$ha_fail_file"
395 touch "$ha_stop_file"
396 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
400 nr_loops=$((nr_loops + 1))
403 avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
405 ha_info "$tag stopped: rc $rc avg loop time $avg_loop_time"
417 for client in ${ha_clients[@]}; do
418 ha_info ha_machine_file=$ha_machine_file
419 echo $client >> $ha_machine_file
421 local dirname=$(dirname $ha_machine_file)
422 for client in ${ha_clients[@]}; do
423 ha_on $client mkdir -p $dirname
424 scp $ha_machine_file $client:$ha_machine_file
427 # ha_mpi_instances defines the number of
428 # clients start mpi loads; should be <= ${#ha_clients[@]}
429 local inst=$ha_mpi_instances
430 (( inst <= ${#ha_clients[@]} )) || inst=${#ha_clients[@]}
432 for ((n = 0; n < $inst; n++)); do
433 client=${ha_clients[n]}
434 for ((load = 0; load < ${#ha_mpi_load_tags[@]}; load++)); do
435 tag=${ha_mpi_load_tags[$load]}
436 status=$ha_status_file_prefix-$tag-$client
439 local num=ha_nparams_$tag
441 local aref=ha_params_$tag[nparam]
442 local parameter=${!aref}
443 ha_repeat_mpi_load $client $load $status "$parameter" &
444 ha_status_files+=("$status")
449 ha_repeat_nonmpi_load()
454 local tag=${ha_nonmpi_load_tags[$load]}
455 local cmd=${ha_nonmpi_load_cmds[$load]}
456 local dir=$ha_test_dir/$client-$tag
457 local log=$ha_tmp_dir/$client-$tag
460 local start_time=$(date +%s)
462 cmd=${cmd//"{}"/$dir}
464 ha_info "Starting $tag on $client"
466 while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
467 ha_on $client "mkdir -p $dir && \
469 rm -rf $dir" >>"$log" 2>&1 || rc=$?
472 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
473 touch "$ha_fail_file"
474 touch "$ha_stop_file"
478 nr_loops=$((nr_loops + 1))
481 avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
483 ha_info "$tag on $client stopped: rc $rc avg loop time ${avg_loop_time}s"
486 ha_start_nonmpi_loads()
493 for client in ${ha_clients[@]}; do
494 for ((load = 0; load < ${#ha_nonmpi_load_tags[@]}; load++)); do
495 tag=${ha_nonmpi_load_tags[$load]}
496 status=$ha_status_file_prefix-$tag-$client
497 ha_repeat_nonmpi_load $client $load $status &
498 ha_status_files+=("$status")
505 trap ha_trap_stop_signals $ha_stop_signals
506 ha_start_nonmpi_loads
513 trap - $ha_stop_signals
514 ha_info "Waiting for workloads to stop"
521 local end=$(($(date +%s) + ha_load_timeout))
523 ha_info "Waiting for workload status"
524 rm -f "${ha_status_files[@]}"
527 # return immediately if ha_stop_file exists,
528 # all status_files not needed to be checked
530 for file in "${ha_status_files[@]}"; do
531 if [ -e "$ha_stop_file" ]; then
532 ha_info "$ha_stop_file found! Stop."
536 # Wait status file created during ha_load_timeout.
537 # Existing file guarantees that some application
538 # is completed. If no status file was created
539 # this function guarantees that we allow
540 # applications to continue after/before
541 # failover/failback during ha_load_timeout time.
543 until [ -e "$file" ] || (($(date +%s) >= end)); do
545 # check ha_stop_file again, it could appear
546 # during ha_load_timeout
548 if [ -e "$ha_stop_file" ]; then
549 ha_info "$ha_stop_file found! Stop."
552 ha_sleep 1 >/dev/null
561 ha_info "Powering down $node"
562 $ha_power_down_cmd $node
569 ha_info "Powering up $node"
570 $ha_power_up_cmd $node
576 # Print a random integer within [0, MAX).
583 # See "5.2 Bash Variables" from "info bash".
585 echo -n $((RANDOM * max / 32768))
590 local i=$(ha_rand ${#ha_victims[@]})
592 echo -n ${ha_victims[$i]}
598 local end=$(($(date +%s) + 10 * 60))
600 ha_info "Waiting for $node to boot up"
601 until ha_on $node hostname >/dev/null 2>&1 ||
602 [ -e "$ha_stop_file" ] ||
603 (($(date +%s) >= end)); do
604 ha_sleep 1 >/dev/null
611 ha_info "Failback resources on $node in $ha_failback_delay sec"
613 ha_sleep $ha_failback_delay
614 [ "$ha_failback_cmd" ] ||
616 ha_info "No failback command set, skiping"
620 $ha_failback_cmd $node
625 ha_info "---------------8<---------------"
627 ha_info " Duration: $(($(date +%s) - $ha_start_time))s"
628 ha_info " Loops: $ha_nr_loops"
635 while (($(date +%s) < ha_start_time + ha_expected_duration)) &&
636 [ ! -e "$ha_stop_file" ]; do
637 ha_info "---------------8<---------------"
639 $ha_workloads_only || node=$(ha_aim)
641 ha_info "Failing $node"
642 $ha_workloads_only && ha_info " is skipped: workload only..."
644 ha_sleep $(ha_rand $ha_max_failover_period)
645 $ha_workloads_only || ha_power_down $node
647 ha_wait_loads || return
649 if [ -e $ha_stop_file ]; then
650 $ha_workloads_only || ha_power_up $node
654 ha_info "Bringing $node back"
655 ha_sleep $(ha_rand 10)
656 $ha_workloads_only ||
664 # Wait for the failback to start.
667 ha_wait_loads || return
669 ha_sleep $(ha_rand 20)
671 ha_nr_loops=$((ha_nr_loops + 1))
672 ha_info "Loop $ha_nr_loops done"
679 ha_process_arguments "$@"
682 ha_log "${ha_clients[*]} ${ha_servers[*]}" \
683 "START: $0: $(date +%H:%M:%S' '%s)"
684 trap ha_trap_exit EXIT
686 ha_on ${ha_clients[0]} mkdir "$ha_test_dir"
687 ha_on ${ha_clients[0]} " \
688 $LFS setstripe $ha_stripe_params $ha_test_dir"
693 if $ha_workloads_dry_run; then
697 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
702 if [ -e "$ha_fail_file" ]; then
705 ha_log "${ha_clients[*]} ${ha_servers[*]}" \
706 "END: $0: $(date +%H:%M:%S' '%s)"