5 LUSTRE=${LUSTRE:-$(dirname $0)/..}
6 . $LUSTRE/tests/test-framework.sh
10 # bug number for skipped test:
11 ALWAYS_EXCEPT="$REPLAY_OST_SINGLE_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
14 # bug number for SLOW test:
15 # time in minutes: 40 min"
16 [ "$SLOW" = "no" ] && EXCEPT_SLOW="5"
20 # While we do not use OSTCOUNT=1 setup anymore,
21 # ost1failover_HOST is used
22 #ostfailover_HOST=${ostfailover_HOST:-$ost_HOST}
23 #failover= must be defined in OST_MKFS_OPTIONS if ostfailover_HOST != ost_HOST
25 require_dsh_ost || exit 0
27 check_and_setup_lustre
29 rm -rf $DIR/[df][0-9]*
31 TDIR=$DIR/d0.${TESTSUITE}
33 $LFS setstripe $TDIR -i 0 -c 1
37 zconf_umount $(hostname) $MOUNT -f
38 # needs to run during initial client->OST connection
39 #define OBD_FAIL_OST_ALL_REPLY_NET 0x211
40 do_facet ost1 "lctl set_param fail_loc=0x80000211"
41 zconf_mount $(hostname) $MOUNT && $LFS df $MOUNT || error "mount fail"
43 run_test 0a "target handle mismatch (bug 5317)"
47 cp /etc/profile $TDIR/$tfile
49 diff /etc/profile $TDIR/$tfile
52 run_test 0b "empty replay"
55 date > $TDIR/$tfile || error "error creating $TDIR/$tfile"
57 $CHECKSTAT -t file $TDIR/$tfile || error "check for file failed"
63 for i in $(seq 10); do
64 echo "tag-$i" > $TDIR/$tfile-$i ||
65 error "create $TDIR/$tfile-$i failed"
68 for i in $(seq 10); do
69 grep -q "tag-$i" $TDIR/$tfile-$i ||
70 error "grep $TDIR/$tfile-$i failed"
74 run_test 2 "|x| 10 open(O_CREAT)s"
77 verify=$ROOT/tmp/verify-$$
78 dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile &
82 wait $ddpid || error "wait for dd failed"
83 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
84 rm -f $verify $TDIR/$tfile
86 run_test 3 "Fail OST during write, with verification"
89 verify=$ROOT/tmp/verify-$$
90 dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile
91 # invalidate cache, so that we're reading over the wire
93 cmp $verify $TDIR/$tfile &
96 wait $cmppid || error "wait on cmp failed"
97 rm -f $verify $TDIR/$tfile
99 run_test 4 "Fail OST during read, with verification"
104 local tmppipe=$TMP/${TESTSUITE}.${TESTNAME}.pipe
107 echo "+ iozone $args"
108 iozone $args > $tmppipe &
112 echo "tmppipe=$tmppipe"
115 # iozone exit code is 0 even if iozone is not completed
116 # need to check iozone output on "complete"
117 local iozonelog=$TMP/${TESTSUITE}.iozone.log
119 cat $tmppipe | while read line ; do
121 echo "$line" >>$iozonelog
127 if ! $(tail -1 $iozonelog | grep -q complete); then
137 if [ -z "$(which iozone 2> /dev/null)" ]; then
138 skip_env "iozone missing"
142 # striping is -c 1, get min of available
143 local minavail=$(lctl get_param -n osc.*[oO][sS][cC][-_]*.kbytesavail |
145 local size=$(( minavail * 3/4 ))
146 local GB=1048576 # 1048576KB == 1GB
148 if (( size > GB )); then
151 # no random I/O (-i 2) as it's very slow with ZFS
152 local iozone_opts="-i 0 -i 1 -+d -r 4 -s $size -f $TDIR/$tfile"
154 iozone_bg $iozone_opts &
157 echo iozone bg pid=$pid
162 wait $pid || error "wait on iozone failed"
166 wait_delete_completed_mds
167 [ $rc -eq 0 ] || error "iozone failed"
170 run_test 5 "Fail OST during iozone"
173 calc_osc_kbytes kbytesfree
177 remote_mds_nodsh && skip "remote MDS with nodsh" && return 0
180 sync && sleep 5 && sync # wait for delete thread
182 # wait till space is returned, following
183 # (( $before > $after_dd)) test counting on that
184 wait_mds_ost_sync || error "first wait_mds_ost_sync failed"
185 wait_destroy_complete || error "first wait_destroy_complete failed"
188 local before=$(kbytesfree)
189 dd if=/dev/urandom bs=4096 count=1280 of=$f || error "dd failed"
190 $LFS getstripe $f || error "$LFS getstripe $f failed"
191 local stripe_index=$(lfs getstripe -i $f)
194 sleep 2 # ensure we have a fresh statfs
197 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
198 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
200 # retry till statfs returns useful results
201 local after_dd=$(kbytesfree)
203 while (( $before <= $after_dd && $i < 20 )); do
207 after_dd=$(kbytesfree)
210 log "before_free: $before after_dd_free: $after_dd took $i seconds"
211 (( $before > $after_dd )) ||
212 error "free grew after dd: before:$before after_dd:$after_dd"
215 fail ost$((stripe_index + 1))
216 wait_recovery_complete ost$((stripe_index + 1)) ||
217 error "OST$((stripe_index + 1)) recovery not completed"
218 $CHECKSTAT -t file $f && return 2 || true
220 # let the delete happen
221 wait_mds_ost_sync || error "second wait_mds_ost_sync failed"
222 wait_delete_completed || error "second wait_delete_completed failed"
223 local after=$(kbytesfree)
224 log "free_before: $before free_after: $after"
225 (( $before <= $after + $(fs_log_size) )) ||
226 error "$before > $after + logsize $(fs_log_size)"
228 run_test 6 "Fail OST before obd_destroy"
232 sync && sleep 5 && sync # wait for delete thread
234 # wait till space is returned, following
235 # (( $before > $after_dd)) test counting on that
236 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
237 wait_destroy_complete || error "wait_destroy_complete failed"
239 local before=$(kbytesfree)
240 dd if=/dev/urandom bs=4096 count=1280 of=$f ||
241 error "dd to file failed: $?"
244 local after_dd=$(kbytesfree)
246 while (( $before <= $after_dd && $i < 10 )); do
250 after_dd=$(kbytesfree)
253 log "before: $before after_dd: $after_dd took $i seconds"
254 (( $before > $after_dd )) ||
255 error "space grew after dd: before:$before after_dd:$after_dd"
259 wait_recovery_complete ost1 || error "OST recovery not done"
260 $CHECKSTAT -t file $f && return 2 || true
262 # let the delete happen
263 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
264 wait_delete_completed || error "wait_delete_completed failed"
265 local after=$(kbytesfree)
266 log "before: $before after: $after"
267 (( $before <= $after + $(fs_log_size) )) ||
268 error "$before > $after + logsize $(fs_log_size)"
270 run_test 7 "Fail OST before obd_destroy"
273 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
274 skip "Need MDS version at least 2.3.0"
275 verify=$ROOT/tmp/verify-$$
276 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
277 error "Create verify file failed"
278 #define OBD_FAIL_OST_DQACQ_NET 0x230
279 do_facet ost1 "lctl set_param fail_loc=0x230"
280 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
282 sleep $TIMEOUT # wait for the io to become redo io
283 if ! ps -p $ddpid > /dev/null 2>&1; then
284 error "redo io finished incorrectly"
286 do_facet ost1 "lctl set_param fail_loc=0"
289 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
290 rm -f $verify $TDIR/$tfile
291 message=$(dmesg | grep "redo for recoverable error -115")
292 [ -z "$message" ] || error "redo error messages found in dmesg"
294 run_test 8a "Verify redo io: redo io when get -EINPROGRESS error"
297 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
298 skip "Need MDS version at least 2.3.0"
299 verify=$ROOT/tmp/verify-$$
300 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
301 error "Create verify file failed"
302 #define OBD_FAIL_OST_DQACQ_NET 0x230
303 do_facet ost1 "lctl set_param fail_loc=0x230"
304 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
306 sleep $TIMEOUT # wait for the io to become redo io
308 do_facet ost1 "lctl set_param fail_loc=0"
309 wait $ddpid || error "dd did not complete"
311 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
312 rm -f $verify $TDIR/$tfile
314 run_test 8b "Verify redo io: redo io should success after recovery"
317 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
318 skip "Need MDS version at least 2.3.0"
319 verify=$ROOT/tmp/verify-$$
320 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
321 error "Create verify file failed"
322 #define OBD_FAIL_OST_DQACQ_NET 0x230
323 do_facet ost1 "lctl set_param fail_loc=0x230"
324 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
326 sleep $TIMEOUT # wait for the io to become redo io
328 # allow recovery to complete
329 sleep $((TIMEOUT + 2))
330 do_facet ost1 "lctl set_param fail_loc=0"
333 cmp $verify $TDIR/$tfile && error "compare files should fail"
334 rm -f $verify $TDIR/$tfile
336 run_test 8c "Verify redo io: redo io should fail after eviction"
339 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
340 skip "Need MDS version at least 2.3.0"
341 #define OBD_FAIL_MDS_DQACQ_NET 0x187
342 do_facet $SINGLEMDS "lctl set_param fail_loc=0x187"
343 # test the non-intent create path
344 mcreate $TDIR/$tfile &
347 if ! ps -p $cpid > /dev/null 2>&1; then
348 error "mknod finished incorrectly"
350 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
351 wait $cpid || error "mcreate did not complete"
352 stat $TDIR/$tfile || error "mknod failed"
356 #define OBD_FAIL_MDS_DQACQ_NET 0x187
357 do_facet $SINGLEMDS "lctl set_param fail_loc=0x187"
358 # test the intent create path
359 openfile -f O_RDWR:O_CREAT $TDIR/$tfile &
362 if ! ps -p $cpid > /dev/null 2>&1; then
363 error "open finished incorrectly"
365 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
366 wait $cpid || error "openfile failed"
367 stat $TDIR/$tfile || error "open failed"
369 run_test 8d "Verify redo creation on -EINPROGRESS"
372 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
373 skip "Need MDS version at least 2.3.0"
374 sleep 1 # ensure we have a fresh statfs
375 #define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
376 do_facet ost1 "lctl set_param fail_loc=0x231"
380 if ! ps -p $dfpid > /dev/null 2>&1; then
381 do_facet ost1 "lctl set_param fail_loc=0"
382 error "df shouldn't have completed!"
385 run_test 8e "Verify that ptlrpc resends request on -EINPROGRESS"
388 [ "$OST1_VERSION" -ge $(version_code 2.6.54) ] ||
389 skip "Need OST version at least 2.6.54"
390 $LFS setstripe -i 0 -c 1 $DIR/$tfile || error "setstripe failed"
392 # LU-1573 - Add duplicate write to generate grants
393 dd if=/dev/zero of=$DIR/$tfile count=1 bs=1M > /dev/null ||
394 error "First write failed"
397 dd if=/dev/zero of=$DIR/$tfile count=1 bs=1M > /dev/null ||
398 error "failed to write"
399 # failover, replay and resend replayed waiting request
400 #define OBD_FAIL_TGT_REPLAY_DELAY2 0x714
401 do_facet ost1 $LCTL set_param fail_loc=0x00000714
402 do_facet ost1 $LCTL set_param fail_val=$TIMEOUT
404 do_facet ost1 $LCTL set_param fail_loc=0
405 do_facet ost1 "dmesg | tail -n 100" |
406 sed -n '/no req deadline/,$ p' | grep -qi 'Already past' &&
410 run_test 9 "Verify that no req deadline happened during recovery"
415 dd if=/dev/zero of=$TDIR/$tfile count=10 || error "dd failed"
417 #define OBD_FAIL_OSC_DELAY_IO 0x414
418 $LCTL set_param fail_val=60 fail_loc=0x414
419 cancel_lru_locks OST0000-osc &
421 facet_failover ost1 || error "failover: $?"
423 #define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
424 $LCTL set_param fail_loc=0x32a
429 run_test 10 "conflicting PW & PR locks on a client"
432 [ $FAILURE_MODE != "HARD" ] &&
433 skip "Test needs FAILURE_MODE HARD" && return 0
434 remote_ost || { skip "need remote OST" && return 0; }
441 mkdir -p $tmp || error "can't create $tmp"
442 mkdir -p $dir || error "can't create $dir"
444 $LFS setstripe -c 1 -i 0 $dir
446 for i in `seq 1 10`; do mkdir $dir/d$i; done
448 #define OBD_FAIL_OST_DELAY_TRANS 0x245
449 do_facet ost1 "$LCTL set_param fail_loc=0x245" ||
450 error "can't set fail_loc"
454 createmany -o $dir/d$i/$(openssl rand -base64 12) 500 &
457 echo "Waiting createmany pids"
460 ls -lR $dir > $tmp/ls_r_out 2>&1&
465 echo "starting wait for ls -l"
467 grep "?\|No such file or directory" $tmp/ls_r_out &&
468 error "Found file without object on OST"
472 run_test 12 "check stat after OST failover"
475 check_and_cleanup_lustre