4 # For duration of 24 hours repeatedly failover a random MDS at
5 # 10 minute intervals and verify that no application errors occur.
7 # Test runs one of CLIENT_LOAD progs on remote clients.
9 LUSTRE=${LUSTRE:-`dirname $0`/..}
11 CLEANUP=${CLEANUP:-""}
12 . $LUSTRE/tests/test-framework.sh
16 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
19 TESTSUITELOG=${TESTSUITELOG:-$TMP/$(basename $0 .sh)}
20 DEBUGLOG=$TESTSUITELOG.debug
25 echo "--- env ---" >&2
27 echo "--- env ---" >&2
30 [ "$SHARED_DIRECTORY" ] || \
31 { FAIL_ON_ERROR=true skip_env "$0 Empty SHARED_DIRECTORY" && exit 0; }
33 check_shared_dir $SHARED_DIRECTORY ||
34 error "$SHARED_DIRECTORY isn't a shared directory"
36 [ -n "$CLIENTS" ] || \
37 { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients" && exit 0; }
39 [ $CLIENTCOUNT -ge 3 ] || \
40 { FAIL_ON_ERROR=true skip_env "$0 Need two or more remote clients, have $((CLIENTCOUNT - 1))" && exit 0; }
42 END_RUN_FILE=${END_RUN_FILE:-$SHARED_DIRECTORY/end_run_file}
43 LOAD_PID_FILE=${LOAD_PID_FILE:-$TMP/client-load.pid}
45 remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
46 remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
50 check_and_setup_lustre
51 rm -rf $DIR/[df][0-9]*
53 max_recov_time=$(max_recovery_time)
55 # the test node needs to be insulated from a lustre failure as much as possible,
56 # so not even loading the lustre modules is ideal.
58 # -- remove hostname from clients list
59 zconf_umount $(hostname) $MOUNT
60 NODES_TO_USE=${NODES_TO_USE:-$CLIENTS}
61 NODES_TO_USE=$(exclude_items_from_list $NODES_TO_USE $(hostname))
63 check_progs_installed $NODES_TO_USE ${CLIENT_LOADS[@]}
65 MDTS=$(get_facets MDS)
66 OSTS=$(get_facets OST)
68 ERRORS_OK="" # No application failures should occur during this test.
69 FLAVOR=${FLAVOR:-"MDS"}
71 if [ "$FLAVOR" == "MDS" ]; then
77 if [ "$SLOW" = "no" ]; then
78 DURATION=${DURATION:-$((60 * 30))}
79 SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 5))}
81 DURATION=${DURATION:-$((60 * 60 * 24))}
82 SERVER_FAILOVER_PERIOD=${SERVER_FAILOVER_PERIOD:-$((60 * 10))} # 10 minutes
87 vmstatLOG=${TESTSUITELOG}_$(basename $0 .sh).vmstat
89 server_numfailovers () {
91 local var=${facet}_numfailovers
94 [[ ${!var} ]] && val=${!var}
98 servers_numfailovers () {
102 for facet in ${MDTS//,/ } ${OSTS//,/ }; do
103 echo "$facet: $(server_numfailovers $facet) times"
107 summary_and_cleanup () {
113 # Having not empty END_RUN_FILE means the failed loads only
114 if [ -s $END_RUN_FILE ]; then
115 echo "Found the END_RUN_FILE file: $END_RUN_FILE"
118 read END_RUN_NODE < $END_RUN_FILE
120 # a client load will end (i.e. fail) if it finds
121 # the end run file. that does not mean that that client load
122 # actually failed though. the first node in the END_RUN_NODE is
123 # the one we are really interested in.
124 if [ -n "$END_RUN_NODE" ]; then
125 var=$(node_var_name $END_RUN_NODE)_load
126 echo "Client load failed on node $END_RUN_NODE"
128 echo "client $END_RUN_NODE load stdout and debug files :
129 ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE}
130 ${TESTSUITELOG}_run_${!var}.sh-${END_RUN_NODE}.debug"
135 echo $(date +'%F %H:%M:%S') Terminating clients loads ...
136 echo "$0" >> $END_RUN_FILE
138 [ $rc -eq 0 ] || result=FAIL
140 log "Duration: $DURATION
141 Server failover period: $SERVER_FAILOVER_PERIOD seconds
142 Exited after: $ELAPSED seconds
143 Number of failovers before exit:
144 $(servers_numfailovers)
145 Status: $result: rc=$rc"
147 # stop the vmstats on the OSTs
148 if [ "$VMSTAT" ]; then
149 do_nodes $(comma_list $(osts_nodes)) "test -f /tmp/vmstat.pid && \
150 { kill -s TERM \$(cat /tmp/vmstat.pid); rm -f /tmp/vmstat.pid; \
151 gzip -f9 $vmstatLOG-\$(hostname); }"
154 # make sure the client loads die
155 do_nodes $NODES_TO_USE "set -x; test -f $LOAD_PID_FILE && \
156 { kill -s TERM \$(cat $LOAD_PID_FILE) || true; }"
158 # and free up the pdshes that started them, if any are still around
159 if [ -n "$CLIENT_LOAD_PIDS" ]; then
160 kill $CLIENT_LOAD_PIDS || true
162 kill -9 $CLIENT_LOAD_PIDS || true
164 if [ $rc -ne 0 ]; then
165 # we are interested in only on failed clients and servers
166 local failedclients=$(cat $END_RUN_FILE | grep -v $0)
167 # FIXME: need ostfailover-s nodes also for FLAVOR=OST
168 local product=$(gather_logs $(comma_list $(osts_nodes) \
169 $(mdts_nodes) $mdsfailover_HOST $failedclients) 1)
170 echo logs files $product
173 [ $rc -eq 0 ] && zconf_mount $(hostname) $MOUNT
181 log "-----============= $0 starting =============-----"
183 trap summary_and_cleanup EXIT INT
188 if [ "$VMSTAT" ]; then
189 do_nodes $(comma_list $(osts_nodes)) "vmstat 1 > $vmstatLOG-\$(hostname) 2>/dev/null </dev/null & echo \$! > /tmp/vmstat.pid"
192 # Start client loads.
193 start_client_loads $NODES_TO_USE
195 echo clients load pids:
196 if ! do_nodesv $NODES_TO_USE "cat $LOAD_PID_FILE"; then
200 MINSLEEP=${MINSLEEP:-120}
201 REQFAIL_PERCENT=${REQFAIL_PERCENT:-3} # bug17839 comment 62
202 REQFAIL=${REQFAIL:-$(( DURATION / SERVER_FAILOVER_PERIOD * REQFAIL_PERCENT / 100))}
209 while [ $ELAPSED -lt $DURATION -a ! -e $END_RUN_FILE ]; do
211 # In order to perform the
212 # expected number of failovers, we need to account the following :
213 # 1) the time that has elapsed during the client load checking
214 # 2) time takes for failover
216 it_time_start=$(date +%s)
218 SERVERFACET=$(get_random_entry $SERVERS)
219 var=${SERVERFACET}_numfailovers
221 # Check that our client loads are still running. If any have died,
222 # that means they have died outside of recovery, which is unacceptable.
224 log "==== Checking the clients loads BEFORE failover -- failure NOT OK \
225 ELAPSED=$ELAPSED DURATION=$DURATION PERIOD=$SERVER_FAILOVER_PERIOD"
227 if ! check_client_loads $NODES_TO_USE; then
231 log "Wait $SERVERFACET recovery complete before doing next failover ...."
233 if ! wait_recovery_complete $SERVERFACET ; then
234 echo "$SERVERFACET recovery is not completed!"
238 log "Checking clients are in FULL state before doing next failover"
239 if ! wait_clients_import_state $NODES_TO_USE $SERVERFACET FULL; then
240 echo "Clients import not FULL, please consider to increase SERVER_FAILOVER_PERIOD=$SERVER_FAILOVER_PERIOD !"
243 log "Starting failover on $SERVERFACET"
245 facet_failover "$SERVERFACET" || exit 1
247 # Check that our client loads are still running during failover.
248 # No application failures should occur.
250 log "==== Checking the clients loads AFTER failover -- failure NOT OK"
251 if ! check_client_loads $NODES_TO_USE; then
252 log "Client load failed during failover. Exiting"
256 # Increment the number of failovers
260 CURRENT_TS=$(date +%s)
261 ELAPSED=$((CURRENT_TS - START_TS))
263 sleep=$((SERVER_FAILOVER_PERIOD-(CURRENT_TS - it_time_start)))
265 # keep count the number of itterations when
266 # time spend to failover and two client loads check exceeded
267 # the value ( SERVER_FAILOVER_PERIOD - MINSLEEP )
268 if [ $sleep -lt $MINSLEEP ]; then
269 reqfail=$((reqfail +1))
270 log "WARNING: failover and two check_client_loads time exceeded SERVER_FAILOVER_PERIOD - MINSLEEP !
271 Failed to load the filesystem with I/O for a minimum period of $MINSLEEP $reqfail times ( REQFAIL=$REQFAIL ).
272 This iteration, the load was only applied for sleep=$sleep seconds.
273 Estimated max recovery time : $max_recov_time
274 Probably the hardware is taking excessively long to boot.
275 Try to increase SERVER_FAILOVER_PERIOD (current is $SERVER_FAILOVER_PERIOD), bug 20918"
276 [ $reqfail -gt $REQFAIL ] && exit 6
279 log "$SERVERFACET has failed over ${!var} times, and counting..."
281 if [ $((ELAPSED + sleep)) -ge $DURATION ]; then
285 if [ $sleep -gt 0 ]; then
286 echo "sleeping $sleep seconds ... "