5 LUSTRE=${LUSTRE:-$(dirname $0)/..}
6 . $LUSTRE/tests/test-framework.sh
10 # bug number for skipped test:
11 ALWAYS_EXCEPT="$REPLAY_OST_SINGLE_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
14 # bug number for SLOW test:
15 # time in minutes: 40 min"
16 [ "$SLOW" = "no" ] && EXCEPT_SLOW="5"
20 # While we do not use OSTCOUNT=1 setup anymore,
21 # ost1failover_HOST is used
22 #ostfailover_HOST=${ostfailover_HOST:-$ost_HOST}
23 #failover= must be defined in OST_MKFS_OPTIONS if ostfailover_HOST != ost_HOST
25 require_dsh_ost || exit 0
27 check_and_setup_lustre
29 rm -rf $DIR/[df][0-9]*
31 TDIR=$DIR/d0.${TESTSUITE}
33 $LFS setstripe $TDIR -i 0 -c 1
37 zconf_umount $(hostname) $MOUNT -f
38 # needs to run during initial client->OST connection
39 #define OBD_FAIL_OST_ALL_REPLY_NET 0x211
40 do_facet ost1 "lctl set_param fail_loc=0x80000211"
41 zconf_mount $(hostname) $MOUNT && $LFS df $MOUNT || error "mount fail"
43 run_test 0a "target handle mismatch (bug 5317)"
47 cp /etc/profile $TDIR/$tfile
49 diff /etc/profile $TDIR/$tfile
52 run_test 0b "empty replay"
55 date > $TDIR/$tfile || error "error creating $TDIR/$tfile"
57 $CHECKSTAT -t file $TDIR/$tfile || error "check for file failed"
63 for i in $(seq 10); do
64 echo "tag-$i" > $TDIR/$tfile-$i ||
65 error "create $TDIR/$tfile-$i failed"
68 for i in $(seq 10); do
69 grep -q "tag-$i" $TDIR/$tfile-$i ||
70 error "grep $TDIR/$tfile-$i failed"
74 run_test 2 "|x| 10 open(O_CREAT)s"
77 verify=$ROOT/tmp/verify-$$
78 dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile &
82 wait $ddpid || error "wait for dd failed"
83 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
84 rm -f $verify $TDIR/$tfile
86 run_test 3 "Fail OST during write, with verification"
89 verify=$ROOT/tmp/verify-$$
90 dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile
91 # invalidate cache, so that we're reading over the wire
93 cmp $verify $TDIR/$tfile &
96 wait $cmppid || error "wait on cmp failed"
97 rm -f $verify $TDIR/$tfile
99 run_test 4 "Fail OST during read, with verification"
104 local tmppipe=$TMP/${TESTSUITE}.${TESTNAME}.pipe
107 echo "+ iozone $args"
108 iozone $args > $tmppipe &
112 echo "tmppipe=$tmppipe"
115 # iozone exit code is 0 even if iozone is not completed
116 # need to check iozone output on "complete"
117 local iozonelog=$TMP/${TESTSUITE}.iozone.log
119 cat $tmppipe | while read line ; do
121 echo "$line" >>$iozonelog
127 if ! $(tail -1 $iozonelog | grep -q complete); then
137 if [ -z "$(which iozone 2> /dev/null)" ]; then
138 skip_env "iozone missing"
142 # striping is -c 1, get min of available
143 local minavail=$(lctl get_param -n osc.*[oO][sS][cC][-_]*.kbytesavail |
145 local size=$(( minavail * 3/4 ))
146 local GB=1048576 # 1048576KB == 1GB
148 if (( size > GB )); then
151 # no random I/O (-i 2) as it's very slow with ZFS
152 local iozone_opts="-i 0 -i 1 -+d -r 4 -s $size -f $TDIR/$tfile"
154 iozone_bg $iozone_opts &
157 echo iozone bg pid=$pid
162 wait $pid || error "wait on iozone failed"
166 wait_delete_completed_mds
167 [ $rc -eq 0 ] || error "iozone failed"
170 run_test 5 "Fail OST during iozone"
173 calc_osc_kbytes kbytesfree
177 remote_mds_nodsh && skip "remote MDS with nodsh" && return 0
180 sync && sleep 5 && sync # wait for delete thread
182 # wait till space is returned, following
183 # (( $before > $after_dd)) test counting on that
184 wait_mds_ost_sync || error "first wait_mds_ost_sync failed"
185 wait_destroy_complete || error "first wait_destroy_complete failed"
188 local before=$(kbytesfree)
189 dd if=/dev/urandom bs=4096 count=1280 of=$f || error "dd failed"
190 $LFS getstripe $f || error "$LFS getstripe $f failed"
191 local stripe_index=$(lfs getstripe -i $f)
194 sleep 2 # ensure we have a fresh statfs
197 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
198 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
200 wait_mds_ost_sync || error "second wait_mds_ost_sync failed"
202 # retry till statfs returns useful results
203 local after_dd=$(kbytesfree)
205 while (( $before <= $after_dd && $i < 20 )); do
209 after_dd=$(kbytesfree)
212 log "before_free: $before after_dd_free: $after_dd took $i seconds"
213 (( $before > $after_dd )) ||
214 error "free grew after dd: before:$before after_dd:$after_dd"
217 fail ost$((stripe_index + 1))
218 wait_recovery_complete ost$((stripe_index + 1)) ||
219 error "OST$((stripe_index + 1)) recovery not completed"
220 $CHECKSTAT -t file $f && return 2 || true
222 # let the delete happen
223 wait_mds_ost_sync || error "third wait_mds_ost_sync failed"
224 wait_delete_completed || error "second wait_delete_completed failed"
225 local after=$(kbytesfree)
226 log "free_before: $before free_after: $after"
227 (( $before <= $after + $(fs_log_size) )) ||
228 error "$before > $after + logsize $(fs_log_size)"
230 run_test 6 "Fail OST before obd_destroy"
234 sync && sleep 5 && sync # wait for delete thread
236 # wait till space is returned, following
237 # (( $before > $after_dd)) test counting on that
238 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
239 wait_destroy_complete || error "wait_destroy_complete failed"
241 local before=$(kbytesfree)
242 dd if=/dev/urandom bs=4096 count=1280 of=$f ||
243 error "dd to file failed: $?"
246 local after_dd=$(kbytesfree)
248 while (( $before <= $after_dd && $i < 10 )); do
252 after_dd=$(kbytesfree)
255 log "before: $before after_dd: $after_dd took $i seconds"
256 (( $before > $after_dd )) ||
257 error "space grew after dd: before:$before after_dd:$after_dd"
261 wait_recovery_complete ost1 || error "OST recovery not done"
262 $CHECKSTAT -t file $f && return 2 || true
264 # let the delete happen
265 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
266 wait_delete_completed || error "wait_delete_completed failed"
267 local after=$(kbytesfree)
268 log "before: $before after: $after"
269 (( $before <= $after + $(fs_log_size) )) ||
270 error "$before > $after + logsize $(fs_log_size)"
272 run_test 7 "Fail OST before obd_destroy"
275 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
276 skip "Need MDS version at least 2.3.0"
277 verify=$ROOT/tmp/verify-$$
278 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
279 error "Create verify file failed"
280 #define OBD_FAIL_OST_DQACQ_NET 0x230
281 do_facet ost1 "lctl set_param fail_loc=0x230"
282 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
284 sleep $TIMEOUT # wait for the io to become redo io
285 if ! ps -p $ddpid > /dev/null 2>&1; then
286 error "redo io finished incorrectly"
288 do_facet ost1 "lctl set_param fail_loc=0"
291 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
292 rm -f $verify $TDIR/$tfile
293 message=$(dmesg | grep "redo for recoverable error -115")
294 [ -z "$message" ] || error "redo error messages found in dmesg"
296 run_test 8a "Verify redo io: redo io when get -EINPROGRESS error"
299 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
300 skip "Need MDS version at least 2.3.0"
301 verify=$ROOT/tmp/verify-$$
302 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
303 error "Create verify file failed"
304 #define OBD_FAIL_OST_DQACQ_NET 0x230
305 do_facet ost1 "lctl set_param fail_loc=0x230"
306 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
308 sleep $TIMEOUT # wait for the io to become redo io
310 do_facet ost1 "lctl set_param fail_loc=0"
311 wait $ddpid || error "dd did not complete"
313 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
314 rm -f $verify $TDIR/$tfile
316 run_test 8b "Verify redo io: redo io should success after recovery"
319 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
320 skip "Need MDS version at least 2.3.0"
321 verify=$ROOT/tmp/verify-$$
322 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
323 error "Create verify file failed"
324 #define OBD_FAIL_OST_DQACQ_NET 0x230
325 do_facet ost1 "lctl set_param fail_loc=0x230"
326 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
328 sleep $TIMEOUT # wait for the io to become redo io
330 # allow recovery to complete
331 sleep $((TIMEOUT + 2))
332 do_facet ost1 "lctl set_param fail_loc=0"
335 cmp $verify $TDIR/$tfile && error "compare files should fail"
336 rm -f $verify $TDIR/$tfile
338 run_test 8c "Verify redo io: redo io should fail after eviction"
341 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
342 skip "Need MDS version at least 2.3.0"
343 #define OBD_FAIL_MDS_DQACQ_NET 0x187
344 do_facet $SINGLEMDS "lctl set_param fail_loc=0x187"
345 # test the non-intent create path
346 mcreate $TDIR/$tfile &
349 if ! ps -p $cpid > /dev/null 2>&1; then
350 error "mknod finished incorrectly"
352 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
353 wait $cpid || error "mcreate did not complete"
354 stat $TDIR/$tfile || error "mknod failed"
358 #define OBD_FAIL_MDS_DQACQ_NET 0x187
359 do_facet $SINGLEMDS "lctl set_param fail_loc=0x187"
360 # test the intent create path
361 openfile -f O_RDWR:O_CREAT $TDIR/$tfile &
364 if ! ps -p $cpid > /dev/null 2>&1; then
365 error "open finished incorrectly"
367 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
368 wait $cpid || error "openfile failed"
369 stat $TDIR/$tfile || error "open failed"
371 run_test 8d "Verify redo creation on -EINPROGRESS"
374 [[ "$MDS1_VERSION" -ge $(version_code 2.3.0) ]] ||
375 skip "Need MDS version at least 2.3.0"
376 sleep 1 # ensure we have a fresh statfs
377 #define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
378 do_facet ost1 "lctl set_param fail_loc=0x231"
382 if ! ps -p $dfpid > /dev/null 2>&1; then
383 do_facet ost1 "lctl set_param fail_loc=0"
384 error "df shouldn't have completed!"
387 run_test 8e "Verify that ptlrpc resends request on -EINPROGRESS"
390 [ "$OST1_VERSION" -ge $(version_code 2.6.54) ] ||
391 skip "Need OST version at least 2.6.54"
392 $LFS setstripe -i 0 -c 1 $DIR/$tfile || error "setstripe failed"
394 # LU-1573 - Add duplicate write to generate grants
395 dd if=/dev/zero of=$DIR/$tfile count=1 bs=1M > /dev/null ||
396 error "First write failed"
399 dd if=/dev/zero of=$DIR/$tfile count=1 bs=1M > /dev/null ||
400 error "failed to write"
401 # failover, replay and resend replayed waiting request
402 #define OBD_FAIL_TGT_REPLAY_DELAY2 0x714
403 do_facet ost1 $LCTL set_param fail_loc=0x00000714
404 do_facet ost1 $LCTL set_param fail_val=$TIMEOUT
406 do_facet ost1 $LCTL set_param fail_loc=0
407 do_facet ost1 "dmesg | tail -n 100" |
408 sed -n '/no req deadline/,$ p' | grep -qi 'Already past' &&
412 run_test 9 "Verify that no req deadline happened during recovery"
417 dd if=/dev/zero of=$TDIR/$tfile count=10 || error "dd failed"
419 #define OBD_FAIL_OSC_DELAY_IO 0x414
420 $LCTL set_param fail_val=60 fail_loc=0x414
421 cancel_lru_locks OST0000-osc &
423 facet_failover ost1 || error "failover: $?"
425 #define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
426 $LCTL set_param fail_loc=0x32a
431 run_test 10 "conflicting PW & PR locks on a client"
434 remote_ost || { skip "need remote OST" && return 0; }
439 mkdir -p $tmp || error "can't create $tmp"
440 mkdir -p $dir || error "can't create $dir"
442 $LFS setstripe -c 1 -i 0 $dir
444 for i in $(seq 1 10); do mkdir $dir/d$i; done
446 # get client connected if was idle
452 for i in $(seq 1 10); do
453 createmany -o $dir/d$i/file 500
456 ls -lR $dir > $tmp/ls_r_out 2>&1&
461 echo "starting wait for ls -l"
463 grep "?\|No such file or directory" $tmp/ls_r_out &&
464 error "Found file without object on OST"
468 run_test 12a "glimpse after OST failover to a missing object"
471 remote_ost || { skip "need remote OST" && return 0; }
476 test_mkdir -p -i 0 $dir || error "can't create $dir"
478 $LFS setstripe -c 1 -i 0 $dir
480 for i in $(seq 1 10); do mkdir $dir/d$i; done
483 for i in $(seq 1 10); do
484 createmany -o $dir/d$i/file 500
487 #define OBD_FAIL_MDS_DELAY_DELORPHAN 0x16e
488 do_facet mds1 "$LCTL set_param fail_loc=0x16e fail_val=10" ||
489 error "can't set fail_loc"
492 dd if=/dev/zero of=$dir/d10/file499 count=1 bs=4K > /dev/null
494 [[ $rc -eq 0 ]] || error "dd failed: $rc"
498 run_test 12b "write after OST failover to a missing object"
501 check_and_cleanup_lustre