2 # -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
3 # vim:shiftwidth=4:softtabstop=4:tabstop=4:
7 # ha.sh - test Lustre HA (aka failover) configurations
15 # ha.sh tests Lustre HA (aka failover) configurations with a CRM.
23 # Specify client nodes.
26 # Specify server nodes.
29 # Specify victim nodes to be rebooted.
32 # Choose a parent of the test directory. "/mnt/lustre" if not specified.
35 # Define a duration for the test. 86400 seconds if not specified.
38 # Define a max failover period. 10 minutes if not set.
41 # Only run the workloads; no failure will be introduced.
42 # -v, -s are ignored in this case.
44 # Workloads dry run for several seconds; no failures will be introduced.
45 # This option is useful to verify the loads.
46 # -u is ignored in this case
48 # Reboot victim nodes simultaneously.
53 # A Lustre file system is up and mounted on all client nodes. This script
54 # does not mount or unmount any Lustre targets or clients, let alone format
57 # Each target has a failnode, so that workloads can continue after a power
60 # CRM could be configured by 2 ways:
62 # Targets are automatically failed back when their primary node is back. This
63 # assumption avoids calling CRM-specific commands to trigger failbacks, making
64 # this script more CRM-neural.
66 # Targets are not automatically failed back when their primary node is back.
67 # CRM-specific command is executed to trigger failbacks.
69 # A crash dump mechanism is configured to catch LBUGs, panics, etc.
73 # Each client runs set of MPI and non-MPI workloads. These
74 # applications are run in short loops so that their exit status can be waited
75 # for and checked within reasonable time by ha_wait_loads.
76 # The set of MPI and non-MPI workloads are configurable by parameters:
78 # default set: dd, tar, iozone
80 # default set: ior, simul, mdtest
82 # The number of clients run MPI loads is configured by parameter
83 # ha_mpi_instances. Only one client runs MPI workloads by default.
85 # PROCESS STRUCTURE AND IPC
87 # On the node where this script is run, the processes look like this:
91 # ~ ha.sh (ha_repeat_mpi_load ior)
93 # ~ ha.sh (ha_repeat_mpi_load simul)
95 # ~ ha.sh (ha_repeat_mpi_load mdtest)
97 # ~ ... (one for each MPI load)
99 # ~ ha.sh (ha_repeat_nonmpi_load client2 dbench)
100 # ~ pdsh client2 dbench
101 # ~ ha.sh (ha_repeat_nonmpi_load client2 iozone)
102 # ~ pdsh client2 iozone
103 # ~ ha.sh (ha_repeat_nonmpi_load client5 iozone)
104 # ~ pdsh client5 iozone
105 # ~ ... (one for each non-MPI load on each client)
107 # Each tilde represents a process. Indentations imply parent-children
110 # IPC is done by files in the temporary directory.
115 SIMUL=${SIMUL:-$(which simul 2> /dev/null || true)}
116 IOR=${IOR:-$(which IOR 2> /dev/null || true)}
117 MDTEST=${MDTEST:-$(which mdtest 2> /dev/null || true)}
119 ior_blockSize=${ior_blockSize:-6g}
120 mpi_threads_per_client=${mpi_threads_per_client:-2}
122 iozone_SIZE=${iozone_SIZE:-262144} # 256m
124 mpirun=${MPIRUN:-$(which mpirun)}
125 LFS=${LFS:-$(which lfs)}
129 for ((load = 0; load < ${#ha_mpi_load_tags[@]}; load++)); do
130 local tag=${ha_mpi_load_tags[$load]}
131 local bin=$(echo $tag | tr '[:lower:]' '[:upper:]')
132 if [ x${!bin} = x ]; then
133 ha_error ha_mpi_loads: ${ha_mpi_loads}, $bin is not set
141 echo "$0: $(date +%H:%M:%S' '%s):" "$@"
146 local nodes=${1// /,}
148 ha_on $nodes "lctl mark $*"
160 ha_error "Trap ERR triggered by:"
161 ha_error " $BASH_COMMAND"
162 ha_error "Call trace:"
163 for ((i = 0; i < ${#FUNCNAME[@]}; i++)); do
164 ha_error " ${FUNCNAME[$i]} [${BASH_SOURCE[$i]}:${BASH_LINENO[$i]}]"
171 declare ha_tmp_dir=/tmp/$(basename $0)-$$
172 declare ha_stop_file=$ha_tmp_dir/stop
173 declare ha_fail_file=$ha_tmp_dir/fail
174 declare ha_status_file_prefix=$ha_tmp_dir/status
175 declare -a ha_status_files
176 declare ha_machine_file=$ha_tmp_dir/machine_file
177 declare ha_lfsck_log=$ha_tmp_dir/lfsck.log
178 declare ha_lfsck_lock=$ha_tmp_dir/lfsck.lock
179 declare ha_lfsck_stop=$ha_tmp_dir/lfsck.stop
180 declare ha_lfsck_bg=${LFSCK_BG:-false}
181 declare ha_lfsck_after=${LFSCK_AFTER:-false}
182 declare ha_lfsck_node=${LFSCK_NODE:-""}
183 declare ha_lfsck_device=${LFSCK_DEV:-""}
184 declare ha_lfsck_types=${LFSCK_TYPES:-"namespace layout"}
185 declare ha_lfsck_custom_params=${LFSCK_CUSTOM_PARAMS:-""}
186 declare ha_lfsck_wait=${LFSCK_WAIT:-1200}
187 declare ha_lfsck_fail_on_repaired=${LFSCK_FAIL_ON_REPAIRED:-false}
188 declare ha_power_down_cmd=${POWER_DOWN:-"pm -0"}
189 declare ha_power_up_cmd=${POWER_UP:-"pm -1"}
190 declare ha_power_delay=${POWER_DELAY:-60}
191 declare ha_failback_delay=${DELAY:-5}
192 declare ha_failback_cmd=${FAILBACK:-""}
193 declare ha_stripe_params=${STRIPEPARAMS:-"-c 0"}
194 declare ha_dir_stripe_count=${DSTRIPECOUNT:-"1"}
195 declare ha_mdt_index=${MDTINDEX:-"0"}
196 declare ha_mdt_index_random=${MDTINDEXRAND:-false}
197 declare -a ha_clients
198 declare -a ha_servers
199 declare -a ha_victims
200 declare ha_test_dir=/mnt/lustre/$(basename $0)-$$
201 declare ha_start_time=$(date +%s)
202 declare ha_expected_duration=$((60 * 60 * 24))
203 declare ha_max_failover_period=10
204 declare ha_nr_loops=0
205 declare ha_stop_signals="SIGINT SIGTERM SIGHUP"
206 declare ha_load_timeout=$((60 * 10))
207 declare ha_workloads_only=false
208 declare ha_workloads_dry_run=false
209 declare ha_simultaneous=false
211 declare ha_mpi_instances=${ha_mpi_instances:-1}
213 declare ha_mpi_loads=${ha_mpi_loads="ior simul mdtest"}
214 declare -a ha_mpi_load_tags=($ha_mpi_loads)
216 declare ha_ior_params=${IORP:-'" -b $ior_blockSize -t 2m -w -W -T 1"'}
217 declare ha_simul_params=${SIMULP:-'" -n 10"'}
218 declare ha_mdtest_params=${MDTESTP:-'" -i 1 -n 1000"'}
219 declare ha_mpirun_options=${MPIRUN_OPTIONS:-""}
221 eval ha_params_ior=($ha_ior_params)
222 eval ha_params_simul=($ha_simul_params)
223 eval ha_params_mdtest=($ha_mdtest_params)
225 declare ha_nparams_ior=${#ha_params_ior[@]}
226 declare ha_nparams_simul=${#ha_params_simul[@]}
227 declare ha_nparams_mdtest=${#ha_params_mdtest[@]}
229 declare -A ha_mpi_load_cmds=(
230 [ior]="$IOR -o {}/f.ior {params}"
231 [simul]="$SIMUL {params} -d {}"
232 [mdtest]="$MDTEST {params} -d {}"
235 declare ha_nonmpi_loads=${ha_nonmpi_loads="dd tar iozone"}
236 declare -a ha_nonmpi_load_tags=($ha_nonmpi_loads)
237 declare -a ha_nonmpi_load_cmds=(
238 "dd if=/dev/zero of={}/f.dd bs=1M count=256"
239 "tar cf - /etc | tar xf - -C {}"
240 "iozone -a -e -+d -s $iozone_SIZE {}/f.iozone"
245 ha_info "Usage: $0 -c HOST[,...] -s HOST[,...]" \
246 "-v HOST[,...] [-d DIRECTORY] [-u SECONDS]"
249 ha_process_arguments()
253 while getopts hc:s:v:d:p:u:wrm opt; do
260 ha_clients=(${OPTARG//,/ })
263 ha_servers=(${OPTARG//,/ })
266 ha_victims=(${OPTARG//,/ })
269 ha_test_dir=$OPTARG/$(basename $0)-$$
272 ha_expected_duration=$OPTARG
275 ha_max_failover_period=$OPTARG
278 ha_workloads_only=true
281 ha_workloads_dry_run=true
293 if [ -z "${ha_clients[*]}" ]; then
294 ha_error "-c is mandatory"
298 if ! ($ha_workloads_dry_run ||
299 $ha_workloads_only) &&
300 ([ -z "${ha_servers[*]}" ] ||
301 [ -z "${ha_victims[*]}" ]); then
302 ha_error "-s, and -v are all mandatory"
316 # -S is to be used here to track the
317 # remote command return values
319 pdsh -S -w $nodes PATH=/usr/local/sbin:/usr/local/bin:/sbin:\
320 /bin:/usr/sbin:/usr/bin "$@" ||
327 touch "$ha_stop_file"
329 if [ -e "$ha_fail_file" ]; then
330 ha_info "Test directory $ha_test_dir not removed"
331 ha_info "Temporary directory $ha_tmp_dir not removed"
333 ha_on ${ha_clients[0]} rm -rf "$ha_test_dir"
334 ha_info "Please find the results in the directory $ha_tmp_dir"
338 ha_trap_stop_signals()
340 ha_info "${ha_stop_signals// /,} received"
341 touch "$ha_stop_file"
348 ha_info "Sleeping for ${n}s"
350 # sleep(1) could interrupted.
359 while [ -e $lock ]; do
368 until mkdir "$lock" >/dev/null 2>&1; do
369 ha_sleep 1 >/dev/null
382 local nodes=${1// /,}
383 local file=/tmp/$(basename $0)-$$-$(date +%s).dk
384 local lock=$ha_tmp_dir/lock-dump-logs
388 ha_info "Dumping lctl log to $file"
391 # some nodes could crash, so
392 # do not exit with error if not all logs are dumped
394 ha_on $nodes "lctl dk >>$file" || rc=$?
397 ha_error "not all logs are dumped! Some nodes are unreachable."
407 local tag=${ha_mpi_load_tags[$load]}
408 local cmd=${ha_mpi_load_cmds[$tag]}
409 local dir=$ha_test_dir/$client-$tag
410 local log=$ha_tmp_dir/$client-$tag
413 local avg_loop_time=0
414 local start_time=$(date +%s)
416 cmd=${cmd//"{}"/$dir}
417 cmd=${cmd//"{params}"/$parameter}
419 ha_info "Starting $tag"
421 local machines="-machinefile $ha_machine_file"
422 while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
425 if $ha_mdt_index_random && [ $ha_mdt_index -ne 0 ]; then
426 mdt_index=$(ha_rand $ha_mdt_index)
428 mdt_index=$ha_mdt_index
430 ha_on $client $LFS mkdir -i$mdt_index -c$ha_dir_stripe_count "$dir" &&
431 ha_on $client $LFS getdirstripe "$dir" &&
432 ha_on $client chmod a+xwr $dir &&
433 ha_on $client "su mpiuser sh -c \" $mpirun $ha_mpirun_options \
434 -np $((${#ha_clients[@]} * mpi_threads_per_client )) \
435 $machines $cmd \" " &&
436 ha_on $client rm -rf "$dir";
437 } >>"$log" 2>&1 || rc=$?
442 touch "$ha_fail_file"
443 touch "$ha_stop_file"
444 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
448 nr_loops=$((nr_loops + 1))
451 [ $nr_loops -ne 0 ] &&
452 avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
454 ha_info "$tag stopped: rc $rc avg loop time $avg_loop_time"
466 for client in ${ha_clients[@]}; do
467 ha_info ha_machine_file=$ha_machine_file
468 echo $client >> $ha_machine_file
470 local dirname=$(dirname $ha_machine_file)
471 for client in ${ha_clients[@]}; do
472 ha_on $client mkdir -p $dirname
473 scp $ha_machine_file $client:$ha_machine_file
476 # ha_mpi_instances defines the number of
477 # clients start mpi loads; should be <= ${#ha_clients[@]}
478 local inst=$ha_mpi_instances
479 (( inst <= ${#ha_clients[@]} )) || inst=${#ha_clients[@]}
481 for ((n = 0; n < $inst; n++)); do
482 client=${ha_clients[n]}
483 for ((load = 0; load < ${#ha_mpi_load_tags[@]}; load++)); do
484 tag=${ha_mpi_load_tags[$load]}
485 status=$ha_status_file_prefix-$tag-$client
488 local num=ha_nparams_$tag
490 local aref=ha_params_$tag[nparam]
491 local parameter=${!aref}
492 ha_repeat_mpi_load $client $load $status "$parameter" &
493 ha_status_files+=("$status")
498 ha_repeat_nonmpi_load()
503 local tag=${ha_nonmpi_load_tags[$load]}
504 local cmd=${ha_nonmpi_load_cmds[$load]}
505 local dir=$ha_test_dir/$client-$tag
506 local log=$ha_tmp_dir/$client-$tag
509 local avg_loop_time=0
510 local start_time=$(date +%s)
512 cmd=${cmd//"{}"/$dir}
514 ha_info "Starting $tag on $client"
516 while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
517 ha_on $client "mkdir -p $dir && \
519 rm -rf $dir" >>"$log" 2>&1 || rc=$?
522 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
523 touch "$ha_fail_file"
524 touch "$ha_stop_file"
528 nr_loops=$((nr_loops + 1))
531 [ $nr_loops -ne 0 ] &&
532 avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
534 ha_info "$tag on $client stopped: rc $rc avg loop time ${avg_loop_time}s"
537 ha_start_nonmpi_loads()
544 for client in ${ha_clients[@]}; do
545 for ((load = 0; load < ${#ha_nonmpi_load_tags[@]}; load++)); do
546 tag=${ha_nonmpi_load_tags[$load]}
547 status=$ha_status_file_prefix-$tag-$client
548 ha_repeat_nonmpi_load $client $load $status &
549 ha_status_files+=("$status")
560 [ -f $ha_lfsck_stop ] && ha_info "LFSCK stopped" && break
561 [ -f $ha_stop_file ] &&
562 ha_info "$ha_stop_file found! LFSCK not started" &&
564 ha_start_lfsck 2>&1 | tee -a $ha_lfsck_log
568 ha_info LFSCK BG PID: $LFSCK_BG_PID
571 ha_wait_lfsck_completed () {
573 local -a types=($ha_lfsck_types)
577 local nodes="${ha_servers[@]}"
580 # -A start LFSCK on all nodes
582 [ ${#types[@]} -eq 0 ] && types=(namespace layout)
583 ha_info "Waiting LFSCK completed in $ha_lfsck_wait sec: types ${types[@]}"
584 for type in ${types[@]}; do
586 for (( i=0; i<=ha_lfsck_wait; i++)); do
587 status=($(ha_on $nodes lctl get_param -n *.*.lfsck_$type 2>/dev/null | \
588 awk '/status/ { print $3 }'))
589 for (( s=0; s<${#status[@]}; s++ )); do
590 # "partial" is expected after HARD failover
591 [[ "${status[s]}" = "completed" ]] ||
592 [[ "${status[s]}" = "partial" ]] || break
594 [[ $s -eq ${#status[@]} ]] && eval var_$type=1 && break
597 ha_info "LFSCK $type status in $i sec:"
598 ha_on $nodes lctl get_param -n *.*.lfsck_$type 2>/dev/null | grep status
602 for type in ${types[@]}; do
604 ha_on $nodes lctl get_param -n *.*.lfsck_$type 2>/dev/null
605 [[ ${!var} -eq 1 ]] ||
606 { ha_info "lfsck not completed in $ha_lfsck_wait sec";
614 local -a types=($ha_lfsck_types)
617 # -A: start LFSCK on all nodes via the specified MDT device
618 # (see "-M" option) by single LFSCK command
619 local params=" -A -r $ha_lfsck_custom_params"
621 # use specified device if set
622 [ -n "$ha_lfsck_device" ] && params="-M $ha_lfsck_device $params"
624 # -t: check type(s) to be performed (default all)
625 # check only specified types if set
626 if [ ${#types[@]} -ne 0 ]; then
627 local type="${types[@]}"
628 params="$params -t ${type// /,}"
631 ha_info "LFSCK start $params"
632 ha_on $ha_lfsck_node "lctl lfsck_start $params" || rc=1
633 if [ $rc -ne 0 ]; then
634 if [ -e $ha_lfsck_lock ]; then
636 ha_wait_unlock $ha_lfsck_lock
638 ha_on $ha_lfsck_node "lctl lfsck_start $params" || rc=1
643 { touch "$ha_fail_file"; touch "$ha_stop_file";
644 touch $ha_lfsck_stop; return 1; }
646 ha_wait_lfsck_completed ||
647 { touch "$ha_fail_file"; touch "$ha_stop_file";
648 touch $ha_lfsck_stop; return 1; }
657 n=$(cat $ha_lfsck_log | awk '/repaired/ {print $3}' |\
658 awk '{sum += $1} END { print sum }')
660 { ha_info "Total repaired: $n";
661 touch "$ha_fail_file"; return 1; }
667 trap ha_trap_stop_signals $ha_stop_signals
668 ha_start_nonmpi_loads
675 trap - $ha_stop_signals
676 ha_info "Waiting for workloads to stop"
683 local end=$(($(date +%s) + ha_load_timeout))
685 ha_info "Waiting for workload status"
686 rm -f "${ha_status_files[@]}"
689 # return immediately if ha_stop_file exists,
690 # all status_files not needed to be checked
692 for file in "${ha_status_files[@]}"; do
693 if [ -e "$ha_stop_file" ]; then
694 ha_info "$ha_stop_file found! Stop."
698 # Wait status file created during ha_load_timeout.
699 # Existing file guarantees that some application
700 # is completed. If no status file was created
701 # this function guarantees that we allow
702 # applications to continue after/before
703 # failover/failback during ha_load_timeout time.
705 until [ -e "$file" ] || (($(date +%s) >= end)); do
707 # check ha_stop_file again, it could appear
708 # during ha_load_timeout
710 if [ -e "$ha_stop_file" ]; then
711 ha_info "$ha_stop_file found! Stop."
714 ha_sleep 1 >/dev/null
725 if $ha_lfsck_bg && [[ ${nodes//,/ /} =~ $ha_lfsck_node ]]; then
726 ha_info "$ha_lfsck_node down, delay start LFSCK"
727 ha_lock $ha_lfsck_lock
730 ha_info "Powering down $nodes"
731 for i in $(seq 1 5); do
732 $ha_power_down_cmd $nodes && rc=0 && break
733 sleep $ha_power_delay
736 [ $rc -eq 0 ] || ha_info "Failed Powering down in $i attempts"
745 ha_info "Powering up $nodes"
746 for i in $(seq 1 5); do
747 $ha_power_up_cmd $nodes && rc=0 && break
748 sleep $ha_power_delay
751 [ $rc -eq 0 ] || ha_info "Failed Powering up in $i attempts"
757 # Print a random integer within [0, MAX).
764 # See "5.2 Bash Variables" from "info bash".
766 echo -n $((RANDOM * max / 32768))
774 if $ha_simultaneous ; then
775 nodes=$(echo ${ha_victims[@]})
778 i=$(ha_rand ${#ha_victims[@]})
779 nodes=${ha_victims[$i]}
788 local end=$(($(date +%s) + 10 * 60))
790 ha_info "Waiting for $nodes to boot up"
791 until ha_on $nodes hostname >/dev/null 2>&1 ||
792 [ -e "$ha_stop_file" ] ||
793 (($(date +%s) >= end)); do
794 ha_sleep 1 >/dev/null
801 ha_info "Failback resources on $nodes in $ha_failback_delay sec"
803 ha_sleep $ha_failback_delay
804 [ "$ha_failback_cmd" ] ||
806 ha_info "No failback command set, skiping"
810 $ha_failback_cmd $nodes
811 [ -e $ha_lfsck_lock ] && ha_unlock $ha_lfsck_lock || true
816 ha_info "---------------8<---------------"
818 ha_info " Duration: $(($(date +%s) - $ha_start_time))s"
819 ha_info " Loops: $ha_nr_loops"
826 while (($(date +%s) < ha_start_time + ha_expected_duration)) &&
827 [ ! -e "$ha_stop_file" ]; do
828 ha_info "---------------8<---------------"
830 $ha_workloads_only || nodes=$(ha_aim)
832 ha_info "Failing $nodes"
833 $ha_workloads_only && ha_info " is skipped: workload only..."
835 ha_sleep $(ha_rand $ha_max_failover_period)
836 $ha_workloads_only || ha_power_down $nodes
838 ha_wait_loads || return
840 if [ -e $ha_stop_file ]; then
841 $ha_workloads_only || ha_power_up $nodes
845 ha_info "Bringing $nodes back"
846 ha_sleep $(ha_rand 10)
847 $ha_workloads_only ||
855 # Wait for the failback to start.
858 ha_wait_loads || return
860 ha_sleep $(ha_rand 20)
862 ha_nr_loops=$((ha_nr_loops + 1))
863 ha_info "Loop $ha_nr_loops done"
870 ha_process_arguments "$@"
873 ha_log "${ha_clients[*]} ${ha_servers[*]}" \
874 "START: $0: $(date +%H:%M:%S' '%s)"
875 trap ha_trap_exit EXIT
877 ha_on ${ha_clients[0]} mkdir "$ha_test_dir"
878 ha_on ${ha_clients[0]} " \
879 $LFS setstripe $ha_stripe_params $ha_test_dir"
881 $ha_lfsck_bg && ha_lfsck_bg
886 if $ha_workloads_dry_run; then
890 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
895 $ha_lfsck_after && ha_start_lfsck | tee -a $ha_lfsck_log
897 $ha_lfsck_fail_on_repaired && ha_lfsck_repaired
899 # true because of lfsck_bg could be stopped already
900 $ha_lfsck_bg && wait $LFSCK_BG_PID || true
902 if [ -e "$ha_fail_file" ]; then
905 ha_log "${ha_clients[*]} ${ha_servers[*]}" \
906 "END: $0: $(date +%H:%M:%S' '%s)"