2 # -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
3 # vim:shiftwidth=4:softtabstop=4:tabstop=4:
7 # ha.sh - test Lustre HA (aka failover) configurations
15 # ha.sh tests Lustre HA (aka failover) configurations with a CRM.
23 # Specify client nodes.
26 # Specify server nodes.
29 # Specify victim nodes to be rebooted.
32 # Choose a parent of the test directory. "/mnt/lustre" if not specified.
35 # Define a duration for the test. 86400 seconds if not specified.
38 # Define a max failover period. 10 minutes if not set.
41 # Only run the workloads; no failure will be introduced.
42 # -v, -s are ignored in this case.
44 # Workloads dry run for several seconds; no failures will be introduced.
45 # This option is useful to verify the loads.
46 # -u is ignored in this case
48 # Reboot victim nodes simultaneously.
53 # A Lustre file system is up and mounted on all client nodes. This script
54 # does not mount or unmount any Lustre targets or clients, let alone format
57 # Each target has a failnode, so that workloads can continue after a power
60 # CRM could be configured by 2 ways:
62 # Targets are automatically failed back when their primary node is back. This
63 # assumption avoids calling CRM-specific commands to trigger failbacks, making
64 # this script more CRM-neural.
66 # Targets are not automatically failed back when their primary node is back.
67 # CRM-specific command is executed to trigger failbacks.
69 # A crash dump mechanism is configured to catch LBUGs, panics, etc.
73 # Each client runs set of MPI and non-MPI workloads. These
74 # applications are run in short loops so that their exit status can be waited
75 # for and checked within reasonable time by ha_wait_loads.
76 # The set of MPI and non-MPI workloads are configurable by parameters:
78 # default set: dd, tar, iozone
80 # default set: ior, simul, mdtest
82 # The number of clients run MPI loads is configured by parameter
83 # ha_mpi_instances. Only one client runs MPI workloads by default.
85 # MPI workloads can be run from several users. The list of users to use is
86 # configured by parameter ha_mpi_users, default is "mpiuser".
88 # PROCESS STRUCTURE AND IPC
90 # On the node where this script is run, the processes look like this:
94 # ~ ha.sh (ha_repeat_mpi_load ior)
96 # ~ ha.sh (ha_repeat_mpi_load simul)
98 # ~ ha.sh (ha_repeat_mpi_load mdtest)
100 # ~ ... (one for each MPI load)
102 # ~ ha.sh (ha_repeat_nonmpi_load client2 dbench)
103 # ~ pdsh client2 dbench
104 # ~ ha.sh (ha_repeat_nonmpi_load client2 iozone)
105 # ~ pdsh client2 iozone
106 # ~ ha.sh (ha_repeat_nonmpi_load client5 iozone)
107 # ~ pdsh client5 iozone
108 # ~ ... (one for each non-MPI load on each client)
110 # Each tilde represents a process. Indentations imply parent-children
113 # IPC is done by files in the temporary directory.
118 SIMUL=${SIMUL:-$(which simul 2> /dev/null || true)}
119 IOR=${IOR:-$(which IOR 2> /dev/null || true)}
120 MDTEST=${MDTEST:-$(which mdtest 2> /dev/null || true)}
122 ior_blockSize=${ior_blockSize:-6g}
123 mpi_threads_per_client=${mpi_threads_per_client:-2}
125 iozone_SIZE=${iozone_SIZE:-262144} # 256m
127 mpirun=${MPIRUN:-$(which mpirun)}
128 LFS=${LFS:-$(which lfs)}
132 for ((load = 0; load < ${#ha_mpi_load_tags[@]}; load++)); do
133 local tag=${ha_mpi_load_tags[$load]}
134 local bin=$(echo $tag | tr '[:lower:]' '[:upper:]')
135 if [ x${!bin} = x ]; then
136 ha_error ha_mpi_loads: ${ha_mpi_loads}, $bin is not set
144 echo "$0: $(date +%H:%M:%S' '%s):" "$@"
149 local date=$(date +%H:%M:%S' '%s)
152 echo $date ${FUNCNAME[1]} $2 >> $ha_stop_file ||
155 echo $date ${FUNCNAME[1]} $2 >> $ha_fail_file ||
158 echo $date ${FUNCNAME[1]} $2 >> $ha_lfsck_stop ||
164 local nodes=${1// /,}
166 ha_on $nodes "lctl mark $*"
178 ha_error "Trap ERR triggered by:"
179 ha_error " $BASH_COMMAND"
180 ha_error "Call trace:"
181 for ((i = 0; i < ${#FUNCNAME[@]}; i++)); do
182 ha_error " ${FUNCNAME[$i]} [${BASH_SOURCE[$i]}:${BASH_LINENO[$i]}]"
189 declare ha_power_down_pids
190 declare ha_tmp_dir=/tmp/$(basename $0)-$$
191 declare ha_stop_file=$ha_tmp_dir/stop
192 declare ha_fail_file=$ha_tmp_dir/fail
193 declare ha_pm_states=$ha_tmp_dir/ha_pm_states
194 declare ha_status_file_prefix=$ha_tmp_dir/status
195 declare -a ha_status_files
196 declare ha_machine_file=$ha_tmp_dir/machine_file
197 declare ha_lfsck_log=$ha_tmp_dir/lfsck.log
198 declare ha_lfsck_lock=$ha_tmp_dir/lfsck.lock
199 declare ha_lfsck_stop=$ha_tmp_dir/lfsck.stop
200 declare ha_lfsck_bg=${LFSCK_BG:-false}
201 declare ha_lfsck_after=${LFSCK_AFTER:-false}
202 declare ha_lfsck_node=${LFSCK_NODE:-""}
203 declare ha_lfsck_device=${LFSCK_DEV:-""}
204 declare ha_lfsck_types=${LFSCK_TYPES:-"namespace layout"}
205 declare ha_lfsck_custom_params=${LFSCK_CUSTOM_PARAMS:-""}
206 declare ha_lfsck_wait=${LFSCK_WAIT:-1200}
207 declare ha_lfsck_fail_on_repaired=${LFSCK_FAIL_ON_REPAIRED:-false}
208 declare ha_power_down_cmd=${POWER_DOWN:-"pm -0"}
209 declare ha_power_up_cmd=${POWER_UP:-"pm -1"}
210 declare ha_power_delay=${POWER_DELAY:-60}
211 declare ha_node_up_delay=${NODE_UP_DELAY:-10}
212 declare ha_wait_nodes_up=${WAIT_NODES_UP:-600}
213 declare ha_pm_host=${PM_HOST:-$(hostname)}
214 declare ha_failback_delay=${DELAY:-5}
215 declare ha_failback_cmd=${FAILBACK:-""}
216 declare ha_stripe_params=${STRIPEPARAMS:-"-c 0"}
217 declare ha_test_dir_stripe_count=${TDSTRIPECOUNT:-"1"}
218 declare ha_test_dir_mdt_index=${TDMDTINDEX:-"0"}
219 declare ha_test_dir_mdt_index_random=${TDMDTINDEXRAND:-false}
220 declare ha_dir_stripe_count=${DSTRIPECOUNT:-"1"}
221 declare ha_dir_stripe_count_random=${DSTRIPECOUNTRAND:-false}
222 declare ha_mdt_index=${MDTINDEX:-"0"}
223 declare ha_mdt_index_random=${MDTINDEXRAND:-false}
224 declare -a ha_clients
225 declare -a ha_servers
226 declare -a ha_victims
227 declare -a ha_victims_pair
228 declare ha_test_dir=/mnt/lustre/$(basename $0)-$$
229 declare -a ha_testdirs=(${ha_test_dirs="$ha_test_dir"})
231 for ((i=0; i<${#ha_testdirs[@]}; i++)); do
232 echo I=$i ${ha_testdirs[i]}
233 ha_testdirs[i]="${ha_testdirs[i]}/$(basename $0)-$$"
234 echo i=$i ${ha_testdirs[i]}
237 declare ha_cleanup=${CLEANUP:-true}
238 declare ha_start_time=$(date +%s)
239 declare ha_expected_duration=$((60 * 60 * 24))
240 declare ha_max_failover_period=10
241 declare ha_nr_loops=0
242 declare ha_stop_signals="SIGINT SIGTERM SIGHUP"
243 declare ha_load_timeout=${LOAD_TIMEOUT:-$((60 * 10))}
244 declare ha_workloads_only=false
245 declare ha_workloads_dry_run=false
246 declare ha_simultaneous=false
248 declare ha_mpi_instances=${ha_mpi_instances:-1}
250 declare ha_mpi_loads=${ha_mpi_loads="ior simul mdtest"}
251 declare -a ha_mpi_load_tags=($ha_mpi_loads)
252 declare -a ha_mpiusers=(${ha_mpi_users="mpiuser"})
254 declare -A ha_mpiopts
256 for ((i=0; i<${#ha_mpiusers[@]}; i++)); do
257 u=${ha_mpiusers[i]%%:*}
259 # user gets empty option if ha_mpi_users does not specify it explicitly
260 [[ ${ha_mpiusers[i]} =~ : ]] && o=${ha_mpiusers[i]##*:}
262 ha_mpiopts[$u]+=" $o"
264 ha_users=(${!ha_mpiopts[@]})
266 declare ha_ior_params=${IORP:-'" -b $ior_blockSize -t 2m -w -W -T 1"'}
267 declare ha_simul_params=${SIMULP:-'" -n 10"'}
268 declare ha_mdtest_params=${MDTESTP:-'" -i 1 -n 1000"'}
269 declare ha_mpirun_options=${MPIRUN_OPTIONS:-""}
270 declare ha_clients_stripe=${CLIENTSSTRIPE:-'"$STRIPEPARAMS"'}
271 declare ha_nclientsset=${NCLIENTSSET:-1}
272 declare ha_ninstmustfail=${NINSTMUSTFAIL:-0}
274 declare ha_racer_params=${RACERP:-"MDSCOUNT=1"}
276 eval ha_params_ior=($ha_ior_params)
277 eval ha_params_simul=($ha_simul_params)
278 eval ha_params_mdtest=($ha_mdtest_params)
279 eval ha_stripe_clients=($ha_clients_stripe)
281 declare ha_nparams_ior=${#ha_params_ior[@]}
282 declare ha_nparams_simul=${#ha_params_simul[@]}
283 declare ha_nparams_mdtest=${#ha_params_mdtest[@]}
284 declare ha_nstripe_clients=${#ha_stripe_clients[@]}
286 declare -A ha_mpi_load_cmds=(
287 [ior]="$IOR -o {}/f.ior {params}"
288 [simul]="$SIMUL {params} -d {}"
289 [mdtest]="$MDTEST {params} -d {}"
292 declare racer=${RACER:-"$(dirname $0)/racer/racer.sh"}
294 declare ha_nonmpi_loads=${ha_nonmpi_loads="dd tar iozone"}
295 declare -a ha_nonmpi_load_tags=($ha_nonmpi_loads)
296 declare -A ha_nonmpi_load_cmds=(
297 [dd]="dd if=/dev/zero of={}/f.dd bs=1M count=256"
298 [tar]="tar cf - /etc | tar xf - -C {}"
299 [iozone]="iozone -a -e -+d -s $iozone_SIZE {}/f.iozone"
300 [racer]="$ha_racer_params $racer {}"
302 declare ha_check_attrs="find {} -type f -ls 2>&1 | grep -e '?'"
306 ha_info "Usage: $0 -c HOST[,...] -s HOST[,...]" \
307 "-v HOST[,...] -f HOST[,...] [-d DIRECTORY] [-u SECONDS]"
310 ha_process_arguments()
314 while getopts hc:s:v:d:p:u:wrmf: opt; do
321 ha_clients=(${OPTARG//,/ })
324 ha_servers=(${OPTARG//,/ })
327 ha_victims=(${OPTARG//,/ })
330 ha_test_dir=$OPTARG/$(basename $0)-$$
333 ha_expected_duration=$OPTARG
336 ha_max_failover_period=$OPTARG
339 ha_workloads_only=true
342 ha_workloads_dry_run=true
348 ha_victims_pair=(${OPTARG//,/ })
357 if [ -z "${ha_clients[*]}" ]; then
358 ha_error "-c is mandatory"
362 if ! ($ha_workloads_dry_run ||
363 $ha_workloads_only) &&
364 ([ -z "${ha_servers[*]}" ] ||
365 [ -z "${ha_victims[*]}" ]); then
366 ha_error "-s, and -v are all mandatory"
380 # -S is to be used here to track the
381 # remote command return values
383 pdsh -S -w $nodes "PATH=/usr/local/sbin:/usr/local/bin:/sbin:\
384 /bin:/usr/sbin:/usr/bin; $@" ||
393 if [ -e "$ha_fail_file" ]; then
394 ha_info "Test directories ${ha_testdirs[@]} not removed"
395 ha_info "Temporary directory $ha_tmp_dir not removed"
398 ha_on ${ha_clients[0]} rm -rf ${ha_testdirs[@]} ||
399 ha_info "Test directories ${ha_testdirs[@]} not removed"
400 ha_info "Please find the results in the directory $ha_tmp_dir"
404 ha_trap_stop_signals()
406 ha_info "${ha_stop_signals// /,} received"
407 ha_touch stop "${ha_stop_signals// /,} received"
414 ha_info "Sleeping for ${n}s"
416 # sleep(1) could interrupted.
425 while [ -e $lock ]; do
434 until mkdir "$lock" >/dev/null 2>&1; do
435 ha_sleep 1 >/dev/null
448 local nodes=${1// /,}
449 local file=/tmp/$(basename $0)-$$-$(date +%s).dk
450 local lock=$ha_tmp_dir/lock-dump-logs
454 ha_info "Dumping lctl log to $file"
457 # some nodes could crash, so
458 # do not exit with error if not all logs are dumped
460 ha_on $nodes "lctl dk >>$file" || rc=$?
463 ha_error "not all logs are dumped! Some nodes are unreachable."
474 local stripeparams=$6
477 local mpirunoptions=$9
479 local tag=${ha_mpi_load_tags[$load]}
480 local cmd=${ha_mpi_load_cmds[$tag]}
481 local dir=$test_dir/$client-$tag
482 local log=$ha_tmp_dir/$client-$tag
486 local avg_loop_time=0
487 local start_time=$(date +%s)
488 local check_attrs=${ha_check_attrs//"{}"/$dir}
490 cmd=${cmd//"{}"/$dir}
491 cmd=${cmd//"{params}"/$parameter}
493 [[ -n "$ha_postcmd" ]] && ha_postcmd=${ha_postcmd//"{}"/$dir}
494 [[ -n "$ha_precmd" ]] && ha_precmd=${ha_precmd//"{}"/$dir}
495 ha_info "Starting $tag"
497 machines="-machinefile $machines"
498 while [ ! -e "$ha_stop_file" ] && ((rc == 0)) && ((rccheck == 0)); do
499 ha_info "$client Starts: $mpiuser: $cmd" 2>&1 | tee -a $log
502 if $ha_mdt_index_random && [ $ha_mdt_index -ne 0 ]; then
503 mdt_index=$(ha_rand $((ha_mdt_index + 1)) )
505 mdt_index=$ha_mdt_index
507 local dir_stripe_count
508 if $ha_dir_stripe_count_random &&
509 [ $ha_dir_stripe_count -ne 1 ]; then
510 dir_stripe_count=$(($(ha_rand $ha_dir_stripe_count) + 1))
512 dir_stripe_count=$ha_dir_stripe_count
514 [[ -n "$ha_precmd" ]] && ha_info "$ha_precmd" &&
515 ha_on $client "$ha_precmd" >>"$log" 2>&1
516 ha_info "$client Creates $dir with -i$mdt_index -c$dir_stripe_count "
517 ha_on $client $LFS mkdir -i$mdt_index -c$dir_stripe_count "$dir" &&
518 ha_on $client $LFS getdirstripe "$dir" &&
519 ha_on $client $LFS setstripe $stripeparams $dir &&
520 ha_on $client $LFS getstripe $dir &&
521 ha_on $client chmod a+xwr $dir &&
522 ha_on $client "su $mpiuser bash -c \" $mpirun $mpirunoptions \
523 -np $((${#ha_clients[@]} * mpi_threads_per_client / ha_nclientsset)) \
524 $machines $cmd \" " || rc=$?
525 ha_on ${ha_clients[0]} "$check_attrs && \
527 $check_attrs " && rccheck=1
528 [[ -n "$ha_postcmd" ]] && ha_info "$ha_postcmd" &&
529 ha_on $client "$ha_postcmd" >>"$log" 2>&1
530 (( ((rc == 0)) && ((rccheck == 0)) && (( mustpass != 0 )) )) ||
531 (( ((rc != 0)) && ((rccheck == 0)) && (( mustpass == 0 )) )) &&
532 ha_on $client rm -rf "$dir";
535 ha_info $client: rc=$rc rccheck=$rccheck mustpass=$mustpass
537 # mustpass=0 means that failure is expected
538 if (( rccheck != 0 )); then
539 ha_touch stop,fail $client,$tag
540 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
541 elif (( rc !=0 )); then
542 if (( mustpass != 0 )); then
543 ha_touch stop,fail $client,$tag
544 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
549 elif (( mustpass == 0 )); then
550 ha_touch stop,fail $client,$tag
551 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
553 echo rc=$rc rccheck=$rccheck mustpass=$mustpass >"$status"
555 nr_loops=$((nr_loops + 1))
558 [ $nr_loops -ne 0 ] &&
559 avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
561 ha_info "$tag stopped: rc=$rc mustpass=$mustpass \
562 avg loop time $avg_loop_time"
579 # ha_mpi_instances defines the number of
580 # clients start mpi loads; should be <= ${#ha_clients[@]}
582 # ha_mpi_instances = 0
584 # ${#ha_mpi_load_tags[@]} =0
585 local inst=$ha_mpi_instances
586 (( inst == 0 )) || (( ${#ha_mpi_load_tags[@]} == 0 )) &&
587 ha_info "no mpi load to start" &&
590 (( inst <= ${#ha_clients[@]} )) || inst=${#ha_clients[@]}
592 # Define names for machinefiles for each client set
593 for (( n=0; n < $ha_nclientsset; n++ )); do
594 mach[$n]=$ha_machine_file$n
597 for ((n = 0; n < ${#ha_clients[@]}; n++)); do
598 m=$(( n % ha_nclientsset))
600 ha_info machine_file=$machines
601 echo ${ha_clients[n]} >> $machines
603 local dirname=$(dirname $ha_machine_file)
604 for client in ${ha_clients[@]}; do
605 ha_on $client mkdir -p $dirname
606 scp $ha_machine_file* $client:$dirname
610 for ((n = 0; n < $inst; n++)); do
611 client=${ha_clients[n]}
612 nmpi=$((n % ${#ha_users[@]}))
613 mpiuser=${ha_users[nmpi]}
614 ndir=$((n % ${#ha_testdirs[@]}))
615 test_dir=${ha_testdirs[ndir]}
616 for ((load = 0; load < ${#ha_mpi_load_tags[@]}; load++)); do
617 tag=${ha_mpi_load_tags[$load]}
618 status=$ha_status_file_prefix-$tag-$client
621 local num=ha_nparams_$tag
623 local aref=ha_params_$tag[nparam]
624 local parameter=${!aref}
625 local nstripe=$((n % ha_nstripe_clients))
626 aref=ha_stripe_clients[nstripe]
627 local stripe=${!aref}
628 local m=$(( n % ha_nclientsset))
631 [[ $ha_ninstmustfail == 0 ]] ||
632 mustpass=$(( n % ha_ninstmustfail ))
633 ha_repeat_mpi_load $client $load $status "$parameter" \
634 $machines "$stripe" "$mpiuser" "$mustpass" \
635 "${ha_mpiopts[$mpiuser]} $ha_mpirun_options" "$test_dir" &
636 ha_status_files+=("$status")
641 ha_repeat_nonmpi_load()
646 local tag=${ha_nonmpi_load_tags[$load]}
647 local cmd=${ha_nonmpi_load_cmds[$tag]}
649 local dir=$test_dir/$client-$tag
650 local log=$ha_tmp_dir/$client-$tag
655 local avg_loop_time=0
656 local start_time=$(date +%s)
657 local check_attrs=${ha_check_attrs//"{}"/$dir}
659 cmd=${cmd//"{}"/$dir}
661 ha_info "Starting $tag on $client on $dir"
663 while [ ! -e "$ha_stop_file" ] && ((rc == 0)); do
664 ha_info "$client Starts: $cmd" 2>&1 | tee -a $log
665 ha_on $client "mkdir -p $dir && \
666 $cmd" >>"$log" 2>&1 || rc=$?
668 ha_on $client "$check_attrs && \
670 $check_attrs " >>"$log" 2>&1 && rccheck=1 ||
671 ha_on $client "rm -rf $dir" >>"$log" 2>&1
673 ha_info rc=$rc rccheck=$rccheck
675 if (( (rc + rccheck) != 0 )); then
676 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
677 ha_touch stop,fail $client,$tag
681 nr_loops=$((nr_loops + 1))
684 [ $nr_loops -ne 0 ] &&
685 avg_loop_time=$((($(date +%s) - start_time) / nr_loops))
687 ha_info "$tag on $client stopped: rc $rc avg loop time ${avg_loop_time}s"
690 ha_start_nonmpi_loads()
700 for (( n = 0; n < ${#ha_clients[@]}; n++)); do
701 client=${ha_clients[n]}
702 ndir=$((n % ${#ha_testdirs[@]}))
703 test_dir=${ha_testdirs[ndir]}
704 for ((load = 0; load < ${#ha_nonmpi_load_tags[@]}; load++)); do
705 tag=${ha_nonmpi_load_tags[$load]}
706 status=$ha_status_file_prefix-$tag-$client
707 ha_repeat_nonmpi_load $client $load $status $test_dir &
708 ha_status_files+=("$status")
713 declare ha_bgcmd=${ha_bgcmd:-""}
714 declare ha_bgcmd_log=$ha_tmp_dir/bgcmdlog
717 [[ -z "$ha_bgcmd" ]] && return 0
718 for ((i=0; i<${#ha_testdirs[@]}; i++)); do
719 ha_bgcmd=${ha_bgcmd//"{}"/${ha_testdirs[i]}}
722 ha_info "BG cmd: $ha_bgcmd"
724 [ -f $ha_stop_file ] &&
725 ha_info "$ha_stop_file found! $ha_bgcmd no started" &&
727 eval $ha_bgcmd 2>&1 | tee -a $ha_bgcmd_log
731 ha_info CMD BG PID: $CMD_BG_PID
732 ps aux | grep $CMD_BG_PID
741 [ -f $ha_lfsck_stop ] && ha_info "LFSCK stopped" && break
742 [ -f $ha_stop_file ] &&
743 ha_info "$ha_stop_file found! LFSCK not started" &&
745 ha_start_lfsck 2>&1 | tee -a $ha_lfsck_log
749 ha_info LFSCK BG PID: $LFSCK_BG_PID
752 ha_wait_lfsck_completed () {
754 local -a types=($ha_lfsck_types)
758 local nodes="${ha_servers[@]}"
761 # -A start LFSCK on all nodes
763 [ ${#types[@]} -eq 0 ] && types=(namespace layout)
764 ha_info "Waiting LFSCK completed in $ha_lfsck_wait sec: types ${types[@]}"
765 for type in ${types[@]}; do
767 for (( i=0; i<=ha_lfsck_wait; i++)); do
768 status=($(ha_on $nodes lctl get_param -n *.*.lfsck_$type 2>/dev/null | \
769 awk '/status/ { print $3 }'))
770 for (( s=0; s<${#status[@]}; s++ )); do
771 # "partial" is expected after HARD failover
772 [[ "${status[s]}" = "completed" ]] ||
773 [[ "${status[s]}" = "partial" ]] || break
775 [[ $s -eq ${#status[@]} ]] && eval var_$type=1 && break
778 ha_info "LFSCK $type status in $i sec:"
779 ha_on $nodes lctl get_param -n *.*.lfsck_$type 2>/dev/null | grep status
783 for type in ${types[@]}; do
785 ha_on $nodes lctl get_param -n *.*.lfsck_$type 2>/dev/null
786 [[ ${!var} -eq 1 ]] ||
787 { ha_info "lfsck not completed in $ha_lfsck_wait sec";
795 local -a types=($ha_lfsck_types)
798 # -A: start LFSCK on all nodes via the specified MDT device
799 # (see "-M" option) by single LFSCK command
800 local params=" -A -r $ha_lfsck_custom_params"
802 # use specified device if set
803 [ -n "$ha_lfsck_device" ] && params="-M $ha_lfsck_device $params"
805 # -t: check type(s) to be performed (default all)
806 # check only specified types if set
807 if [ ${#types[@]} -ne 0 ]; then
808 local type="${types[@]}"
809 params="$params -t ${type// /,}"
812 ha_info "LFSCK start $params"
813 ha_on $ha_lfsck_node "lctl lfsck_start $params" || rc=1
814 if [ $rc -ne 0 ]; then
815 if [ -e $ha_lfsck_lock ]; then
817 ha_wait_unlock $ha_lfsck_lock
819 ha_on $ha_lfsck_node "lctl lfsck_start $params" || rc=1
824 { ha_touch stop,fail,lfsck; return 1; }
826 ha_wait_lfsck_completed ||
827 { ha_touch stop,fail,lfsck; return 1; }
836 n=$(cat $ha_lfsck_log | awk '/repaired/ {print $3}' |\
837 awk '{sum += $1} END { print sum }')
839 { ha_info "Total repaired: $n";
840 ha_touch fail; return 1; }
847 $ha_lfsck_bg && ha_lfsck_bg
848 trap ha_trap_stop_signals $ha_stop_signals
849 ha_start_nonmpi_loads
856 [[ -n $CMD_BG_PID ]] && wait $CMD_BG_PID || true
857 # true because of lfsck_bg could be stopped already
858 $ha_lfsck_bg && wait $LFSCK_BG_PID || true
859 trap - $ha_stop_signals
860 ha_info "Waiting for workloads to stop"
867 local end=$(($(date +%s) + ha_load_timeout))
869 ha_info "Waiting $ha_load_timeout sec for workload status..."
870 rm -f "${ha_status_files[@]}"
873 # return immediately if ha_stop_file exists,
874 # all status_files not needed to be checked
876 for file in "${ha_status_files[@]}"; do
877 if [ -e "$ha_stop_file" ]; then
878 ha_info "$ha_stop_file found! Stop."
882 # Wait status file created during ha_load_timeout.
883 # Existing file guarantees that some application
884 # is completed. If no status file was created
885 # this function guarantees that we allow
886 # applications to continue after/before
887 # failover/failback during ha_load_timeout time.
889 until [ -e "$file" ] || (($(date +%s) >= end)); do
891 # check ha_stop_file again, it could appear
892 # during ha_load_timeout
894 if [ -e "$ha_stop_file" ]; then
895 ha_info "$ha_stop_file found! Stop."
898 ha_sleep 1 >/dev/null
906 local expected_state=$2
912 # store pm -x -q $nodes results in a file to have
913 # more information about nodes statuses
914 ha_on $ha_pm_host pm -x -q $nodes | awk '{print $2 $3}' > $ha_pm_states
918 while IFS=": " read node state; do
919 [[ "$state" = "$expected_state" ]] && {
920 nodes=${nodes/$node/}
927 if [ -n "$nodes" ]; then
934 ha_power_down_cmd_fn()
941 case $ha_power_down_cmd in
942 # format is: POWER_DOWN=sysrqcrash
944 cmd="pdsh -S -w $nodes -u 120 \"echo c > /proc/sysrq-trigger\" &"
947 ha_power_down_pids=$(echo $ha_power_down_pids $pid)
948 ha_info "ha_power_down_pids: $ha_power_down_pids"
949 [[ -z "$ha_power_down_pids" ]] ||
950 ps aux | grep " ${ha_power_down_pids// / \| } " ||
954 cmd="$ha_power_down_cmd $nodes"
970 case $ha_power_down_cmd in
972 sysrqcrash) state=off ;;
976 if $ha_lfsck_bg && [[ ${nodes//,/ /} =~ $ha_lfsck_node ]]; then
977 ha_info "$ha_lfsck_node down, delay start LFSCK"
978 ha_lock $ha_lfsck_lock
981 ha_info "Powering down $nodes : cmd: $ha_power_down_cmd"
982 ha_power_down_pids=""
983 for (( i=0; i<10; i++ )) {
984 ha_info "attempt: $i"
985 ha_power_down_cmd_fn $nodes || rc=1
986 ha_sleep $ha_power_delay
987 ha_powermanage $nodes $state && rc=0 && break
989 if [[ -n "$ha_power_down_pids" ]]; then
990 kill -9 $ha_power_down_pids || true
991 wait $ha_power_down_pids || true
995 ha_info "Failed Powering down in $i attempts:" \
1007 for ((i=0; i<${#ha_victims[@]}; i++)) {
1008 [[ ${ha_victims[i]} == $node ]] && echo ${ha_victims_pair[i]} &&
1011 [[ $i -ne ${#ha_victims[@]} ]] ||
1012 ha_error "No pair found!"
1018 local end=$(($(date +%s) + ha_node_up_delay))
1021 if [[ ${#ha_victims_pair[@]} -eq 0 ]]; then
1022 ha_sleep $ha_node_up_delay
1026 # Check CRM status on failover pair
1027 while (($(date +%s) <= end)); do
1029 for n in ${nodes//,/ }; do
1030 local pair=$(ha_get_pair $n)
1031 local status=$(ha_on $pair crm_mon -1rQ | \
1032 grep -w $n | head -1)
1034 ha_info "$n pair: $pair status: $status"
1035 [[ "$status" == *OFFLINE* ]] ||
1040 if [[ $rc -eq 0 ]]; then
1041 ha_info "CRM: Got all victims status OFFLINE"
1047 ha_info "$nodes CRM status not OFFLINE"
1048 for n in ${nodes//,/ }; do
1049 local pair=$(ha_get_pair $n)
1051 ha_info "CRM --- $n"
1052 ha_on $pair crm_mon -1rQ
1054 ha_error "CRM: some of $nodes are not OFFLINE in $ha_node_up_delay sec"
1064 ha_power_up_delay $nodes
1065 ha_info "Powering up $nodes : cmd: $ha_power_up_cmd"
1066 for (( i=0; i<10; i++ )) {
1067 ha_info "attempt: $i"
1068 $ha_power_up_cmd $nodes &&
1069 ha_powermanage $nodes on && rc=0 && break
1070 sleep $ha_power_delay
1074 ha_info "Failed Powering up in $i attempts: $ha_power_up_cmd"
1083 # Print a random integer within [0, MAX).
1090 # See "5.2 Bash Variables" from "info bash".
1092 echo -n $((RANDOM * max / 32768))
1100 if $ha_simultaneous ; then
1101 nodes=$(echo ${ha_victims[@]})
1104 i=$(ha_rand ${#ha_victims[@]})
1105 nodes=${ha_victims[$i]}
1114 local end=$(($(date +%s) + $ha_wait_nodes_up))
1116 ha_info "Waiting for $nodes to boot up in $ha_wait_nodes_up"
1117 until ha_on $nodes hostname >/dev/null 2>&1 ||
1118 [ -e "$ha_stop_file" ] ||
1119 (($(date +%s) >= end)); do
1120 ha_sleep 1 >/dev/null
1123 ha_info "Check where we are ..."
1124 [ -e "$ha_stop_file" ] &&
1125 ha_info "$ha_stop_file found!"
1128 nodes_up=($(ha_on $nodes hostname | awk '{ print $2 }'))
1129 ha_info "Nodes $nodes are up: ${nodes_up[@]}"
1130 local -a n=(${nodes//,/ })
1131 if [[ ${#nodes_up[@]} -ne ${#n[@]} ]]; then
1132 ha_info "Failed boot up $nodes in $ha_wait_nodes_up sec!"
1142 ha_info "Failback resources on $nodes in $ha_failback_delay sec"
1144 ha_sleep $ha_failback_delay
1145 [ "$ha_failback_cmd" ] ||
1147 ha_info "No failback command set, skiping"
1151 $ha_failback_cmd $nodes
1152 [ -e $ha_lfsck_lock ] && ha_unlock $ha_lfsck_lock || true
1157 ha_info "---------------8<---------------"
1159 ha_info " Duration: $(($(date +%s) - $ha_start_time))s"
1160 ha_info " Loops: $ha_nr_loops"
1167 while (($(date +%s) < ha_start_time + ha_expected_duration)) &&
1168 [ ! -e "$ha_stop_file" ]; do
1169 ha_info "---------------8<---------------"
1171 $ha_workloads_only || nodes=$(ha_aim)
1173 ha_info "Failing $nodes"
1174 $ha_workloads_only && ha_info " is skipped: workload only..."
1176 ha_sleep $(ha_rand $ha_max_failover_period)
1177 $ha_workloads_only || ha_power_down $nodes
1179 ha_wait_loads || return
1181 if [ -e $ha_stop_file ]; then
1182 $ha_workloads_only || ha_power_up $nodes
1186 ha_info "Bringing $nodes back"
1187 ha_sleep $(ha_rand 10)
1188 $ha_workloads_only ||
1191 ha_wait_nodes $nodes
1196 # Wait for the failback to start.
1199 ha_wait_loads || return
1201 ha_sleep $(ha_rand 20)
1203 ha_nr_loops=$((ha_nr_loops + 1))
1204 ha_info "Loop $ha_nr_loops done"
1211 ha_process_arguments "$@"
1214 ha_log "${ha_clients[*]} ${ha_servers[*]}" \
1215 "START: $0: $(date +%H:%M:%S' '%s)"
1216 trap ha_trap_exit EXIT
1220 if $ha_test_dir_mdt_index_random &&
1221 [ $ha_test_dir_mdt_index -ne 0 ]; then
1222 mdt_index=$(ha_rand $((ha_test_dir_mdt_index + 1)) )
1224 mdt_index=$ha_test_dir_mdt_index
1228 test_dir=${ha_testdirs[0]}
1229 ha_on ${ha_clients[0]} "$LFS mkdir -i$mdt_index \
1230 -c$ha_test_dir_stripe_count $test_dir"
1231 for ((i=0; i<${#ha_testdirs[@]}; i++)); do
1232 test_dir=${ha_testdirs[i]}
1233 ha_on ${ha_clients[0]} $LFS getdirstripe $test_dir
1234 ha_on ${ha_clients[0]} " \
1235 $LFS setstripe $ha_stripe_params $test_dir"
1241 if $ha_workloads_dry_run; then
1245 ha_dump_logs "${ha_clients[*]} ${ha_servers[*]}"
1250 $ha_lfsck_after && ha_start_lfsck | tee -a $ha_lfsck_log
1252 $ha_lfsck_fail_on_repaired && ha_lfsck_repaired
1254 if [ -e "$ha_fail_file" ]; then
1257 ha_log "${ha_clients[*]} ${ha_servers[*]}" \
1258 "END: $0: $(date +%H:%M:%S' '%s)"