5 LUSTRE=${LUSTRE:-$(dirname $0)/..}
6 . $LUSTRE/tests/test-framework.sh
10 # bug number for skipped test:
11 ALWAYS_EXCEPT="$REPLAY_OST_SINGLE_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
14 # bug number for SLOW test:
15 # time in minutes: 40 min"
16 [ "$SLOW" = "no" ] && EXCEPT_SLOW="5"
20 # While we do not use OSTCOUNT=1 setup anymore,
21 # ost1failover_HOST is used
22 #ostfailover_HOST=${ostfailover_HOST:-$ost_HOST}
23 #failover= must be defined in OST_MKFS_OPTIONS if ostfailover_HOST != ost_HOST
25 require_dsh_ost || exit 0
27 check_and_setup_lustre
29 rm -rf $DIR/[df][0-9]*
31 TDIR=$DIR/d0.${TESTSUITE}
33 $LFS setstripe $TDIR -i 0 -c 1
37 zconf_umount $(hostname) $MOUNT -f
38 # needs to run during initial client->OST connection
39 #define OBD_FAIL_OST_ALL_REPLY_NET 0x211
40 do_facet ost1 "lctl set_param fail_loc=0x80000211"
41 zconf_mount $(hostname) $MOUNT && $LFS df $MOUNT || error "mount fail"
43 run_test 0a "target handle mismatch (bug 5317)"
47 cp /etc/profile $TDIR/$tfile
49 diff /etc/profile $TDIR/$tfile
52 run_test 0b "empty replay"
55 date > $TDIR/$tfile || error "error creating $TDIR/$tfile"
57 $CHECKSTAT -t file $TDIR/$tfile || error "check for file failed"
63 for i in $(seq 10); do
64 echo "tag-$i" > $TDIR/$tfile-$i ||
65 error "create $TDIR/$tfile-$i failed"
68 for i in $(seq 10); do
69 grep -q "tag-$i" $TDIR/$tfile-$i ||
70 error "grep $TDIR/$tfile-$i failed"
74 run_test 2 "|x| 10 open(O_CREAT)s"
77 verify=$ROOT/tmp/verify-$$
78 dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile &
82 wait $ddpid || error "wait for dd failed"
83 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
84 rm -f $verify $TDIR/$tfile
86 run_test 3 "Fail OST during write, with verification"
89 verify=$ROOT/tmp/verify-$$
90 dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile
91 # invalidate cache, so that we're reading over the wire
93 cmp $verify $TDIR/$tfile &
96 wait $cmppid || error "wait on cmp failed"
97 rm -f $verify $TDIR/$tfile
99 run_test 4 "Fail OST during read, with verification"
104 local tmppipe=$TMP/${TESTSUITE}.${TESTNAME}.pipe
107 echo "+ iozone $args"
108 iozone $args > $tmppipe &
112 echo "tmppipe=$tmppipe"
115 # iozone exit code is 0 even if iozone is not completed
116 # need to check iozone output on "complete"
117 local iozonelog=$TMP/${TESTSUITE}.iozone.log
119 cat $tmppipe | while read line ; do
121 echo "$line" >>$iozonelog
127 if ! $(tail -1 $iozonelog | grep -q complete); then
137 if [ -z "$(which iozone 2> /dev/null)" ]; then
138 skip_env "iozone missing"
142 # striping is -c 1, get min of available
143 local minavail=$(lctl get_param -n osc.*[oO][sS][cC][-_]*.kbytesavail |
145 local size=$(( minavail * 3/4 ))
146 local GB=1048576 # 1048576KB == 1GB
148 if (( size > GB )); then
151 # no random I/O (-i 2) as it's very slow with ZFS
152 local iozone_opts="-i 0 -i 1 -+d -r 4 -s $size -f $TDIR/$tfile"
154 iozone_bg $iozone_opts &
157 echo iozone bg pid=$pid
162 wait $pid || error "wait on iozone failed"
166 wait_delete_completed_mds
167 [ $rc -eq 0 ] || error "iozone failed"
170 run_test 5 "Fail OST during iozone"
173 remote_mds_nodsh && skip "remote MDS with nodsh" && return 0
176 sync && sleep 5 && sync # wait for delete thread
178 # wait till space is returned, following
179 # (( $before > $after_dd)) test counting on that
180 wait_mds_ost_sync || error "first wait_mds_ost_sync failed"
181 wait_destroy_complete || error "first wait_destroy_complete failed"
184 local before=$(calc_osc_kbytes kbytesfree)
185 dd if=/dev/urandom bs=4096 count=1280 of=$f || error "dd failed"
186 $LFS getstripe $f || error "$LFS getstripe $f failed"
187 local stripe_index=$(lfs getstripe -i $f)
190 sleep 2 # ensure we have a fresh statfs
193 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
194 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
196 wait_mds_ost_sync || error "second wait_mds_ost_sync failed"
198 # retry till statfs returns useful results
199 local after_dd=$(calc_osc_kbytes kbytesfree)
201 while (( $before <= $after_dd && $i < 20 )); do
205 after_dd=$(calc_osc_kbytes kbytesfree)
208 log "before_free: $before after_dd_free: $after_dd took $i seconds"
209 (( $before > $after_dd )) ||
210 error "free grew after dd: before:$before after_dd:$after_dd"
213 fail ost$((stripe_index + 1))
214 wait_recovery_complete ost$((stripe_index + 1)) ||
215 error "OST$((stripe_index + 1)) recovery not completed"
216 $CHECKSTAT -t file $f && return 2 || true
218 # let the delete happen
219 wait_mds_ost_sync || error "third wait_mds_ost_sync failed"
220 wait_delete_completed || error "second wait_delete_completed failed"
221 local after=$(calc_osc_kbytes kbytesfree)
222 log "free_before: $before free_after: $after"
223 (( $before <= $after + $(fs_log_size) )) ||
224 error "$before > $after + logsize $(fs_log_size)"
226 run_test 6 "Fail OST before obd_destroy"
230 sync && sleep 5 && sync # wait for delete thread
232 # wait till space is returned, following
233 # (( $before > $after_dd)) test counting on that
234 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
235 wait_destroy_complete || error "wait_destroy_complete failed"
237 local before=$(calc_osc_kbytes kbytesfree)
238 dd if=/dev/urandom bs=4096 count=1280 of=$f ||
239 error "dd to file failed: $?"
242 local after_dd=$(calc_osc_kbytes kbytesfree)
244 while (( $before <= $after_dd && $i < 10 )); do
248 after_dd=$(calc_osc_kbytes kbytesfree)
251 log "before: $before after_dd: $after_dd took $i seconds"
252 (( $before > $after_dd )) ||
253 error "space grew after dd: before:$before after_dd:$after_dd"
257 wait_recovery_complete ost1 || error "OST recovery not done"
258 $CHECKSTAT -t file $f && return 2 || true
260 # let the delete happen
261 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
262 wait_delete_completed || error "wait_delete_completed failed"
263 local after=$(calc_osc_kbytes kbytesfree)
264 log "before: $before after: $after"
265 (( $before <= $after + $(fs_log_size) )) ||
266 error "$before > $after + logsize $(fs_log_size)"
268 run_test 7 "Fail OST before obd_destroy"
271 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
272 skip "Need MDS version at least 2.3.0"
273 verify=$ROOT/tmp/verify-$$
274 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
275 error "Create verify file failed"
276 #define OBD_FAIL_OST_DQACQ_NET 0x230
277 do_facet ost1 "lctl set_param fail_loc=0x230"
278 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
280 sleep $TIMEOUT # wait for the io to become redo io
281 if ! ps -p $ddpid > /dev/null 2>&1; then
282 error "redo io finished incorrectly"
284 do_facet ost1 "lctl set_param fail_loc=0"
287 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
288 rm -f $verify $TDIR/$tfile
289 message=$(dmesg | grep "redo for recoverable error -115")
290 [ -z "$message" ] || error "redo error messages found in dmesg"
292 run_test 8a "Verify redo io: redo io when get -EINPROGRESS error"
295 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
296 skip "Need MDS version at least 2.3.0"
297 verify=$ROOT/tmp/verify-$$
298 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
299 error "Create verify file failed"
300 #define OBD_FAIL_OST_DQACQ_NET 0x230
301 do_facet ost1 "lctl set_param fail_loc=0x230"
302 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
304 sleep $TIMEOUT # wait for the io to become redo io
306 do_facet ost1 "lctl set_param fail_loc=0"
307 wait $ddpid || error "dd did not complete"
309 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
310 rm -f $verify $TDIR/$tfile
312 run_test 8b "Verify redo io: redo io should success after recovery"
315 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
316 skip "Need MDS version at least 2.3.0"
317 verify=$ROOT/tmp/verify-$$
318 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
319 error "Create verify file failed"
320 #define OBD_FAIL_OST_DQACQ_NET 0x230
321 do_facet ost1 "lctl set_param fail_loc=0x230"
322 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
324 sleep $TIMEOUT # wait for the io to become redo io
326 # allow recovery to complete
327 sleep $((TIMEOUT + 2))
328 do_facet ost1 "lctl set_param fail_loc=0"
331 cmp $verify $TDIR/$tfile && error "compare files should fail"
332 rm -f $verify $TDIR/$tfile
334 run_test 8c "Verify redo io: redo io should fail after eviction"
337 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
338 skip "Need MDS version at least 2.3.0"
339 #define OBD_FAIL_MDS_DQACQ_NET 0x187
340 do_facet $SINGLEMDS "lctl set_param fail_loc=0x187"
341 # test the non-intent create path
342 mcreate $TDIR/$tfile &
345 if ! ps -p $cpid > /dev/null 2>&1; then
346 error "mknod finished incorrectly"
348 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
349 wait $cpid || error "mcreate did not complete"
350 stat $TDIR/$tfile || error "mknod failed"
354 #define OBD_FAIL_MDS_DQACQ_NET 0x187
355 do_facet $SINGLEMDS "lctl set_param fail_loc=0x187"
356 # test the intent create path
357 openfile -f O_RDWR:O_CREAT $TDIR/$tfile &
360 if ! ps -p $cpid > /dev/null 2>&1; then
361 error "open finished incorrectly"
363 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
364 wait $cpid || error "openfile failed"
365 stat $TDIR/$tfile || error "open failed"
367 run_test 8d "Verify redo creation on -EINPROGRESS"
370 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
371 skip "Need MDS version at least 2.3.0"
372 sleep 1 # ensure we have a fresh statfs
373 #define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
374 do_facet ost1 "lctl set_param fail_loc=0x231"
378 if ! ps -p $dfpid > /dev/null 2>&1; then
379 do_facet ost1 "lctl set_param fail_loc=0"
380 error "df shouldn't have completed!"
383 run_test 8e "Verify that ptlrpc resends request on -EINPROGRESS"
386 [ "$OST1_VERSION" -ge $(version_code 2.6.54) ] ||
387 skip "Need OST version at least 2.6.54"
388 $LFS setstripe -i 0 -c 1 $DIR/$tfile || error "setstripe failed"
390 # LU-1573 - Add duplicate write to generate grants
391 dd if=/dev/zero of=$DIR/$tfile count=1 bs=1M > /dev/null ||
392 error "First write failed"
395 dd if=/dev/zero of=$DIR/$tfile count=1 bs=1M > /dev/null ||
396 error "failed to write"
397 # failover, replay and resend replayed waiting request
398 #define OBD_FAIL_TGT_REPLAY_DELAY2 0x714
399 do_facet ost1 $LCTL set_param fail_loc=0x00000714
400 do_facet ost1 $LCTL set_param fail_val=$TIMEOUT
402 do_facet ost1 $LCTL set_param fail_loc=0
403 do_facet ost1 "dmesg | tail -n 100" |
404 sed -n '/no req deadline/,$ p' | grep -qi 'Already past' &&
408 run_test 9 "Verify that no req deadline happened during recovery"
413 dd if=/dev/zero of=$TDIR/$tfile count=10 || error "dd failed"
415 #define OBD_FAIL_OSC_DELAY_IO 0x414
416 $LCTL set_param fail_val=60 fail_loc=0x414
417 cancel_lru_locks OST0000-osc &
419 facet_failover ost1 || error "failover: $?"
421 #define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
422 $LCTL set_param fail_loc=0x32a
427 run_test 10 "conflicting PW & PR locks on a client"
430 remote_ost || { skip "need remote OST" && return 0; }
435 mkdir -p $tmp || error "can't create $tmp"
436 mkdir -p $dir || error "can't create $dir"
438 $LFS setstripe -c 1 -i 0 $dir
440 for i in $(seq 1 10); do mkdir $dir/d$i; done
442 # get client connected if was idle
448 for i in $(seq 1 10); do
449 createmany -o $dir/d$i/file 500
452 ls -lR $dir > $tmp/ls_r_out 2>&1&
457 echo "starting wait for ls -l"
459 grep "?\|No such file or directory" $tmp/ls_r_out &&
460 error "Found file without object on OST"
464 run_test 12a "glimpse after OST failover to a missing object"
467 remote_ost || { skip "need remote OST" && return 0; }
472 test_mkdir -p -i 0 $dir || error "can't create $dir"
474 $LFS setstripe -c 1 -i 0 $dir
476 for i in $(seq 1 10); do mkdir $dir/d$i; done
479 for i in $(seq 1 10); do
480 createmany -o $dir/d$i/file 500
483 #define OBD_FAIL_MDS_DELAY_DELORPHAN 0x16e
484 do_facet mds1 "$LCTL set_param fail_loc=0x16e fail_val=10" ||
485 error "can't set fail_loc"
488 dd if=/dev/zero of=$dir/d10/file499 count=1 bs=4K > /dev/null
490 [[ $rc -eq 0 ]] || error "dd failed: $rc"
494 run_test 12b "write after OST failover to a missing object"
497 check_and_cleanup_lustre