5 LUSTRE=${LUSTRE:-$(dirname $0)/..}
6 . $LUSTRE/tests/test-framework.sh
10 # bug number for skipped test:
11 ALWAYS_EXCEPT="$REPLAY_OST_SINGLE_EXCEPT"
12 # UPDATE THE COMMENT ABOVE WITH BUG NUMBERS WHEN CHANGING ALWAYS_EXCEPT!
14 # bug number for SLOW test:
15 # time in minutes: 40 min"
16 [ "$SLOW" = "no" ] && EXCEPT_SLOW="5"
18 if [ $(facet_fstype $SINGLEMDS) = "zfs" ]; then
19 # bug number for slow tests: LU-2887
20 # time in minutes: 32 12.5 min"
21 [ "$SLOW" = "no" ] && EXCEPT_SLOW+=" 8a 8b"
26 # While we do not use OSTCOUNT=1 setup anymore,
27 # ost1failover_HOST is used
28 #ostfailover_HOST=${ostfailover_HOST:-$ost_HOST}
29 #failover= must be defined in OST_MKFS_OPTIONS if ostfailover_HOST != ost_HOST
31 require_dsh_ost || exit 0
33 check_and_setup_lustre
35 rm -rf $DIR/[df][0-9]*
37 TDIR=$DIR/d0.${TESTSUITE}
39 $LFS setstripe $TDIR -i 0 -c 1
43 zconf_umount $(hostname) $MOUNT -f
44 # needs to run during initial client->OST connection
45 #define OBD_FAIL_OST_ALL_REPLY_NET 0x211
46 do_facet ost1 "lctl set_param fail_loc=0x80000211"
47 zconf_mount $(hostname) $MOUNT && $LFS df $MOUNT || error "mount fail"
49 run_test 0a "target handle mismatch (bug 5317)"
53 cp /etc/profile $TDIR/$tfile
55 diff /etc/profile $TDIR/$tfile
58 run_test 0b "empty replay"
61 date > $TDIR/$tfile || error "error creating $TDIR/$tfile"
63 $CHECKSTAT -t file $TDIR/$tfile || error "check for file failed"
69 for i in $(seq 10); do
70 echo "tag-$i" > $TDIR/$tfile-$i ||
71 error "create $TDIR/$tfile-$i failed"
74 for i in $(seq 10); do
75 grep -q "tag-$i" $TDIR/$tfile-$i ||
76 error "grep $TDIR/$tfile-$i failed"
80 run_test 2 "|x| 10 open(O_CREAT)s"
83 verify=$ROOT/tmp/verify-$$
84 dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile &
88 wait $ddpid || error "wait for dd failed"
89 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
90 rm -f $verify $TDIR/$tfile
92 run_test 3 "Fail OST during write, with verification"
95 verify=$ROOT/tmp/verify-$$
96 dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile
97 # invalidate cache, so that we're reading over the wire
99 cmp $verify $TDIR/$tfile &
102 wait $cmppid || error "wait on cmp failed"
103 rm -f $verify $TDIR/$tfile
105 run_test 4 "Fail OST during read, with verification"
110 local tmppipe=$TMP/${TESTSUITE}.${TESTNAME}.pipe
113 echo "+ iozone $args"
114 iozone $args > $tmppipe &
118 echo "tmppipe=$tmppipe"
121 # iozone exit code is 0 even if iozone is not completed
122 # need to check iozone output on "complete"
123 local iozonelog=$TMP/${TESTSUITE}.iozone.log
125 cat $tmppipe | while read line ; do
127 echo "$line" >>$iozonelog
133 if ! $(tail -1 $iozonelog | grep -q complete); then
143 if [ -z "$(which iozone 2> /dev/null)" ]; then
144 skip_env "iozone missing"
148 # striping is -c 1, get min of available
149 local minavail=$(lctl get_param -n osc.*[oO][sS][cC][-_]*.kbytesavail |
151 local size=$(( minavail * 3/4 ))
152 local GB=1048576 # 1048576KB == 1GB
154 if (( size > GB )); then
157 # no random I/O (-i 2) as it's very slow with ZFS
158 local iozone_opts="-i 0 -i 1 -+d -r 4 -s $size -f $TDIR/$tfile"
160 iozone_bg $iozone_opts &
163 echo iozone bg pid=$pid
168 wait $pid || error "wait on iozone failed"
172 wait_delete_completed_mds
173 [ $rc -eq 0 ] || error "iozone failed"
176 run_test 5 "Fail OST during iozone"
179 calc_osc_kbytes kbytesfree
183 remote_mds_nodsh && skip "remote MDS with nodsh" && return 0
186 sync && sleep 5 && sync # wait for delete thread
188 # wait till space is returned, following
189 # (( $before > $after_dd)) test counting on that
190 wait_mds_ost_sync || error "first wait_mds_ost_sync failed"
191 wait_destroy_complete || error "first wait_destroy_complete failed"
194 local before=$(kbytesfree)
195 dd if=/dev/urandom bs=4096 count=1280 of=$f || error "dd failed"
196 $LFS getstripe $f || error "$LFS getstripe $f failed"
197 local stripe_index=$(lfs getstripe -i $f)
200 sleep 2 # ensure we have a fresh statfs
203 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
204 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
206 # retry till statfs returns useful results
207 local after_dd=$(kbytesfree)
209 while (( $before <= $after_dd && $i < 20 )); do
213 after_dd=$(kbytesfree)
216 log "before_free: $before after_dd_free: $after_dd took $i seconds"
217 (( $before > $after_dd )) ||
218 error "free grew after dd: before:$before after_dd:$after_dd"
221 fail ost$((stripe_index + 1))
222 wait_recovery_complete ost$((stripe_index + 1)) ||
223 error "OST$((stripe_index + 1)) recovery not completed"
224 $CHECKSTAT -t file $f && return 2 || true
226 # let the delete happen
227 wait_mds_ost_sync || error "second wait_mds_ost_sync failed"
228 wait_delete_completed || error "second wait_delete_completed failed"
229 local after=$(kbytesfree)
230 log "free_before: $before free_after: $after"
231 (( $before <= $after + $(fs_log_size) )) ||
232 error "$before > $after + logsize $(fs_log_size)"
234 run_test 6 "Fail OST before obd_destroy"
238 sync && sleep 5 && sync # wait for delete thread
240 # wait till space is returned, following
241 # (( $before > $after_dd)) test counting on that
242 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
243 wait_destroy_complete || error "wait_destroy_complete failed"
245 local before=$(kbytesfree)
246 dd if=/dev/urandom bs=4096 count=1280 of=$f ||
247 error "dd to file failed: $?"
250 local after_dd=$(kbytesfree)
252 while (( $before <= $after_dd && $i < 10 )); do
256 after_dd=$(kbytesfree)
259 log "before: $before after_dd: $after_dd took $i seconds"
260 (( $before > $after_dd )) ||
261 error "space grew after dd: before:$before after_dd:$after_dd"
265 wait_recovery_complete ost1 || error "OST recovery not done"
266 $CHECKSTAT -t file $f && return 2 || true
268 # let the delete happen
269 wait_mds_ost_sync || error "wait_mds_ost_sync failed"
270 wait_delete_completed || error "wait_delete_completed failed"
271 local after=$(kbytesfree)
272 log "before: $before after: $after"
273 (( $before <= $after + $(fs_log_size) )) ||
274 error "$before > $after + logsize $(fs_log_size)"
276 run_test 7 "Fail OST before obd_destroy"
279 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.0) ]] ||
280 { skip "Need MDS version at least 2.3.0"; return; }
281 verify=$ROOT/tmp/verify-$$
282 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
283 error "Create verify file failed"
284 #define OBD_FAIL_OST_DQACQ_NET 0x230
285 do_facet ost1 "lctl set_param fail_loc=0x230"
286 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
288 sleep $TIMEOUT # wait for the io to become redo io
289 if ! ps -p $ddpid > /dev/null 2>&1; then
290 error "redo io finished incorrectly"
292 do_facet ost1 "lctl set_param fail_loc=0"
295 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
296 rm -f $verify $TDIR/$tfile
297 message=$(dmesg | grep "redo for recoverable error -115")
298 [ -z "$message" ] || error "redo error messages found in dmesg"
300 run_test 8a "Verify redo io: redo io when get -EINPROGRESS error"
303 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.0) ]] ||
304 { skip "Need MDS version at least 2.3.0"; return; }
305 verify=$ROOT/tmp/verify-$$
306 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
307 error "Create verify file failed"
308 #define OBD_FAIL_OST_DQACQ_NET 0x230
309 do_facet ost1 "lctl set_param fail_loc=0x230"
310 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
312 sleep $TIMEOUT # wait for the io to become redo io
314 do_facet ost1 "lctl set_param fail_loc=0"
315 wait $ddpid || error "dd did not complete"
317 cmp $verify $TDIR/$tfile || error "compare $verify $TDIR/$tfile failed"
318 rm -f $verify $TDIR/$tfile
320 run_test 8b "Verify redo io: redo io should success after recovery"
323 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.0) ]] ||
324 { skip "Need MDS version at least 2.3.0"; return; }
325 verify=$ROOT/tmp/verify-$$
326 dd if=/dev/urandom of=$verify bs=4096 count=1280 ||
327 error "Create verify file failed"
328 #define OBD_FAIL_OST_DQACQ_NET 0x230
329 do_facet ost1 "lctl set_param fail_loc=0x230"
330 dd if=$verify of=$TDIR/$tfile bs=4096 count=1280 oflag=sync &
332 sleep $TIMEOUT # wait for the io to become redo io
334 # allow recovery to complete
335 sleep $((TIMEOUT + 2))
336 do_facet ost1 "lctl set_param fail_loc=0"
339 cmp $verify $TDIR/$tfile && error "compare files should fail"
340 rm -f $verify $TDIR/$tfile
342 run_test 8c "Verify redo io: redo io should fail after eviction"
345 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.0) ]] ||
346 { skip "Need MDS version at least 2.3.0"; return; }
347 #define OBD_FAIL_MDS_DQACQ_NET 0x187
348 do_facet $SINGLEMDS "lctl set_param fail_loc=0x187"
349 # test the non-intent create path
350 mcreate $TDIR/$tfile &
353 if ! ps -p $cpid > /dev/null 2>&1; then
354 error "mknod finished incorrectly"
356 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
357 wait $cpid || error "mcreate did not complete"
358 stat $TDIR/$tfile || error "mknod failed"
362 #define OBD_FAIL_MDS_DQACQ_NET 0x187
363 do_facet $SINGLEMDS "lctl set_param fail_loc=0x187"
364 # test the intent create path
365 openfile -f O_RDWR:O_CREAT $TDIR/$tfile &
368 if ! ps -p $cpid > /dev/null 2>&1; then
369 error "open finished incorrectly"
371 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
372 wait $cpid || error "openfile failed"
373 stat $TDIR/$tfile || error "open failed"
375 run_test 8d "Verify redo creation on -EINPROGRESS"
378 [[ $(lustre_version_code $SINGLEMDS) -ge $(version_code 2.3.0) ]] ||
379 { skip "Need MDS version at least 2.3.0"; return; }
380 sleep 1 # ensure we have a fresh statfs
381 #define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
382 do_facet ost1 "lctl set_param fail_loc=0x231"
386 if ! ps -p $dfpid > /dev/null 2>&1; then
387 do_facet ost1 "lctl set_param fail_loc=0"
388 error "df shouldn't have completed!"
391 run_test 8e "Verify that ptlrpc resends request on -EINPROGRESS"
394 [ $(lustre_version_code ost1) -ge $(version_code 2.6.54) ] ||
395 { skip "Need OST version at least 2.6.54"; return; }
396 $LFS setstripe -i 0 -c 1 $DIR/$tfile || error "setstripe failed"
398 # LU-1573 - Add duplicate write to generate grants
399 dd if=/dev/zero of=$DIR/$tfile count=1 bs=1M > /dev/null ||
400 error "First write failed"
403 dd if=/dev/zero of=$DIR/$tfile count=1 bs=1M > /dev/null ||
404 error "failed to write"
405 # failover, replay and resend replayed waiting request
406 #define OBD_FAIL_TGT_REPLAY_DELAY2 0x714
407 do_facet ost1 $LCTL set_param fail_loc=0x00000714
408 do_facet ost1 $LCTL set_param fail_val=$TIMEOUT
410 do_facet ost1 $LCTL set_param fail_loc=0
411 do_facet ost1 "dmesg | tail -n 100" |
412 sed -n '/no req deadline/,$ p' | grep -q 'Already past' &&
416 run_test 9 "Verify that no req deadline happened during recovery"
421 dd if=/dev/zero of=$TDIR/$tfile count=10 || error "dd failed"
423 #define OBD_FAIL_OSC_DELAY_IO 0x414
424 $LCTL set_param fail_val=60 fail_loc=0x414
425 cancel_lru_locks OST0000-osc &
427 facet_failover ost1 || error "failover: $?"
429 #define OBD_FAIL_LDLM_GRANT_CHECK 0x32a
430 $LCTL set_param fail_loc=0x32a
435 run_test 10 "conflicting PW & PR locks on a client"
438 [ $FAILURE_MODE != "HARD" ] &&
439 skip "Test needs FAILURE_MODE HARD" && return 0
440 remote_ost || { skip "need remote OST" && return 0; }
447 mkdir -p $tmp || error "can't create $tmp"
448 mkdir -p $dir || error "can't create $dir"
450 $LFS setstripe -c 1 -i 0 $dir
452 for i in `seq 1 10`; do mkdir $dir/d$i; done
454 #define OBD_FAIL_OST_DELAY_TRANS 0x245
455 do_facet ost1 "$LCTL set_param fail_loc=0x245" ||
456 error "can't set fail_loc"
460 createmany -o $dir/d$i/$(openssl rand -base64 12) 500 &
463 echo "Waiting createmany pids"
466 ls -lR $dir > $tmp/ls_r_out 2>&1&
471 echo "starting wait for ls -l"
473 grep "?\|No such file or directory" $tmp/ls_r_out &&
474 error "Found file without object on OST"
478 run_test 12 "check stat after OST failover"
481 check_and_cleanup_lustre