7 # This test needs to be run on the client
10 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
13 . $LUSTRE/tests/test-framework.sh
15 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
16 CHECK_GRANT=${CHECK_GRANT:-"yes"}
17 GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
19 remote_mds_nodsh && log "SKIP: remote MDS with nodsh" && exit 0
23 ALWAYS_EXCEPT="61d $REPLAY_SINGLE_EXCEPT"
25 if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then
26 CONFIG_EXCEPTIONS="0b 42 47 61a 61c"
27 echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. "
28 echo "Except the tests: $CONFIG_EXCEPTIONS"
29 ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
32 # 63 min 7 min AT AT AT AT"
33 [ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68"
37 check_and_setup_lustre
42 rm -rf $DIR/[df][0-9]*
44 test_0a() { # was test_0
47 replay_barrier $SINGLEMDS
51 run_test 0a "empty replay"
54 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
56 # this test attempts to trigger a race in the precreation code,
57 # and must run before any other objects are created on the filesystem
59 createmany -o $DIR/$tfile 20 || return 1
60 unlinkmany $DIR/$tfile 20 || return 2
62 run_test 0b "ensure object created after recover exists. (3284)"
68 lctl set_param -n seq.cli-srv-$mds-mdc-*.width=$width
74 lctl get_param -n seq.cli-srv-$mds-mdc-*.width
77 # This test should pass for single-mds and multi-mds configs.
78 # But for different configurations it tests different things.
82 # (1) fld_create replay should happen;
84 # (2) fld_create replay should not return -EEXISTS, if it does
85 # this means sequence manager recovery code is buggy and allocated
86 # same sequence two times after recovery.
90 # (1) fld_create replay may not happen, because its home MDS is
91 # MDS2 which is not involved to revovery;
93 # (2) as fld_create does not happen on MDS1, it does not make any
96 local label=`mdsdevlabel 1`
97 [ -z "$label" ] && echo "No label for mds1" && return 1
99 replay_barrier $SINGLEMDS
100 local sw=`seq_get_width $label`
102 # make seq manager switch to next sequence each
103 # time as new fid is needed.
104 seq_set_width $label 1
106 # make sure that fld has created at least one new
108 touch $DIR/$tfile || return 2
109 seq_set_width $label $sw
111 # fail $SINGLEMDS and start recovery, replay RPCs, etc.
114 # wait for recovery finish
118 # flush fld cache and dentry cache to make it lookup
119 # created entry instead of revalidating existent one
121 zconf_mount `hostname` $MOUNT
123 # issue lookup which should call fld lookup which
124 # should fail if client did not replay fld create
125 # correctly and server has no fld entry
126 touch $DIR/$tfile || return 3
127 rm $DIR/$tfile || return 4
129 run_test 0c "fld create"
132 replay_barrier $SINGLEMDS
135 $CHECKSTAT -t file $DIR/$tfile || return 1
138 run_test 1 "simple create"
141 replay_barrier $SINGLEMDS
144 $CHECKSTAT -t file $DIR/$tfile || return 1
151 replay_barrier $SINGLEMDS
154 $CHECKSTAT -t file $DIR/$tfile || return 1
160 replay_barrier $SINGLEMDS
162 o_directory $DIR/$tfile
164 $CHECKSTAT -t file $DIR/$tfile || return 2
167 run_test 3a "replay failed open(O_DIRECTORY)"
170 replay_barrier $SINGLEMDS
171 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
172 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000114"
174 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
176 $CHECKSTAT -t file $DIR/$tfile && return 2
179 run_test 3b "replay failed open -ENOMEM"
182 replay_barrier $SINGLEMDS
183 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
184 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000128"
186 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
189 $CHECKSTAT -t file $DIR/$tfile && return 2
192 run_test 3c "replay failed open -ENOMEM"
194 test_4a() { # was test_4
195 replay_barrier $SINGLEMDS
196 for i in `seq 10`; do
197 echo "tag-$i" > $DIR/$tfile-$i
200 for i in `seq 10`; do
201 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
204 run_test 4a "|x| 10 open(O_CREAT)s"
207 replay_barrier $SINGLEMDS
210 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
212 run_test 4b "|x| rm 10 files"
214 # The idea is to get past the first block of precreated files on both
215 # osts, and then replay.
217 replay_barrier $SINGLEMDS
218 for i in `seq 220`; do
219 echo "tag-$i" > $DIR/$tfile-$i
222 for i in `seq 220`; do
223 grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
227 # waiting for commitment of removal
229 run_test 5 "|x| 220 open(O_CREAT)"
232 test_6a() { # was test_6
234 replay_barrier $SINGLEMDS
235 mcreate $DIR/$tdir/$tfile
237 $CHECKSTAT -t dir $DIR/$tdir || return 1
238 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
240 # waiting for log process thread
242 run_test 6a "mkdir + contained create"
246 replay_barrier $SINGLEMDS
249 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
251 run_test 6b "|X| rmdir"
255 replay_barrier $SINGLEMDS
256 mcreate $DIR/$tdir/$tfile
258 $CHECKSTAT -t dir $DIR/$tdir || return 1
259 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
262 run_test 7 "mkdir |X| contained create"
265 # make sure no side-effect from previous test.
267 replay_barrier $SINGLEMDS
268 multiop_bg_pause $DIR/$tfile mo_c || return 4
272 $CHECKSTAT -t file $DIR/$tfile || return 1
273 kill -USR1 $MULTIPID || return 2
274 wait $MULTIPID || return 3
277 run_test 8 "creat open |X| close"
280 replay_barrier $SINGLEMDS
282 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
284 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
286 echo " old_inum == $old_inum, new_inum == $new_inum"
287 if [ $old_inum -eq $new_inum ] ;
289 echo " old_inum and new_inum match"
291 echo "!!!! old_inum and new_inum NOT match"
296 run_test 9 "|X| create (same inum/gen)"
300 replay_barrier $SINGLEMDS
301 mv $DIR/$tfile $DIR/$tfile-2
304 $CHECKSTAT $DIR/$tfile && return 1
305 $CHECKSTAT $DIR/$tfile-2 ||return 2
309 run_test 10 "create |X| rename unlink"
313 echo "old" > $DIR/$tfile
314 mv $DIR/$tfile $DIR/$tfile-2
315 replay_barrier $SINGLEMDS
316 echo "new" > $DIR/$tfile
318 grep old $DIR/$tfile-2
320 grep new $DIR/$tfile || return 1
321 grep old $DIR/$tfile-2 || return 2
323 run_test 11 "create open write rename |X| create-old-name read"
327 multiop_bg_pause $DIR/$tfile o_tSc || return 3
330 replay_barrier $SINGLEMDS
332 wait $pid || return 1
335 [ -e $DIR/$tfile ] && return 2
338 run_test 12 "open, unlink |X| close"
341 # 1777 - replay open after committed chmod that would make
342 # a regular open a failure
345 multiop_bg_pause $DIR/$tfile O_wc || return 3
348 $CHECKSTAT -p 0 $DIR/$tfile
349 replay_barrier $SINGLEMDS
352 wait $pid || return 1
354 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
357 run_test 13 "open chmod 0 |x| write close"
360 multiop_bg_pause $DIR/$tfile O_tSc || return 4
363 replay_barrier $SINGLEMDS
364 kill -USR1 $pid || return 1
365 wait $pid || return 2
368 [ -e $DIR/$tfile ] && return 3
371 run_test 14 "open(O_CREAT), unlink |X| close"
374 multiop_bg_pause $DIR/$tfile O_tSc || return 5
377 replay_barrier $SINGLEMDS
378 touch $DIR/g11 || return 1
380 wait $pid || return 2
383 [ -e $DIR/$tfile ] && return 3
384 touch $DIR/h11 || return 4
387 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
391 replay_barrier $SINGLEMDS
394 mcreate $DIR/$tfile-2
396 [ -e $DIR/$tfile ] && return 1
397 [ -e $DIR/$tfile-2 ] || return 2
398 munlink $DIR/$tfile-2 || return 3
400 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
403 replay_barrier $SINGLEMDS
404 multiop_bg_pause $DIR/$tfile O_c || return 4
407 kill -USR1 $pid || return 1
408 wait $pid || return 2
409 $CHECKSTAT -t file $DIR/$tfile || return 3
412 run_test 17 "|X| open(O_CREAT), |replay| close"
415 replay_barrier $SINGLEMDS
416 multiop_bg_pause $DIR/$tfile O_tSc || return 8
419 touch $DIR/$tfile-2 || return 1
420 echo "pid: $pid will close"
422 wait $pid || return 2
425 [ -e $DIR/$tfile ] && return 3
426 [ -e $DIR/$tfile-2 ] || return 4
427 # this touch frequently fails
428 touch $DIR/$tfile-3 || return 5
429 munlink $DIR/$tfile-2 || return 6
430 munlink $DIR/$tfile-3 || return 7
433 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
435 # bug 1855 (a simpler form of test_11 above)
437 replay_barrier $SINGLEMDS
439 echo "old" > $DIR/$tfile
440 mv $DIR/$tfile $DIR/$tfile-2
441 grep old $DIR/$tfile-2
443 grep old $DIR/$tfile-2 || return 2
445 run_test 19 "|X| mcreate, open, write, rename "
447 test_20a() { # was test_20
448 replay_barrier $SINGLEMDS
449 multiop_bg_pause $DIR/$tfile O_tSc || return 3
455 wait $pid || return 1
456 [ -e $DIR/$tfile ] && return 2
459 run_test 20a "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
461 test_20b() { # bug 10480
462 BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
464 dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
466 while [ ! -e $DIR/$tfile ] ; do
467 usleep 60 # give dd a chance to start
470 lfs getstripe $DIR/$tfile || return 1
471 rm -f $DIR/$tfile || return 2 # make it an orphan
473 df -P $DIR || df -P $DIR || true # reconnect
475 fail $SINGLEMDS # start orphan recovery
476 df -P $DIR || df -P $DIR || true # reconnect
477 wait_mds_recovery_done || error "MDS recovery not done"
479 # FIXME just because recovery is done doesn't mean we've finished
480 # orphan cleanup. Fake it with a sleep for now...
482 AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
483 log "before $BEFOREUSED, after $AFTERUSED"
484 [ $AFTERUSED -gt $((BEFOREUSED + 20)) ] && \
485 error "after $AFTERUSED > before $BEFOREUSED"
488 run_test 20b "write, unlink, eviction, replay, (test mds_cleanup_orphans)"
490 test_20c() { # bug 10480
491 multiop_bg_pause $DIR/$tfile Ow_c || return 1
498 df -P $DIR || df -P $DIR || true # reconnect
501 test -s $DIR/$tfile || error "File was truncated"
503 wait $pid || return 1
506 run_test 20c "check that client eviction does not affect file content"
509 replay_barrier $SINGLEMDS
510 multiop_bg_pause $DIR/$tfile O_tSc || return 5
513 touch $DIR/g11 || return 1
517 wait $pid || return 2
518 [ -e $DIR/$tfile ] && return 3
519 touch $DIR/h11 || return 4
522 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
525 multiop_bg_pause $DIR/$tfile O_tSc || return 3
528 replay_barrier $SINGLEMDS
533 wait $pid || return 1
534 [ -e $DIR/$tfile ] && return 2
537 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
540 multiop_bg_pause $DIR/$tfile O_tSc || return 5
543 replay_barrier $SINGLEMDS
545 touch $DIR/g11 || return 1
549 wait $pid || return 2
550 [ -e $DIR/$tfile ] && return 3
551 touch $DIR/h11 || return 4
554 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
557 multiop_bg_pause $DIR/$tfile O_tSc || return 3
560 replay_barrier $SINGLEMDS
564 wait $pid || return 1
565 [ -e $DIR/$tfile ] && return 2
568 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
571 multiop_bg_pause $DIR/$tfile O_tSc || return 3
575 replay_barrier $SINGLEMDS
578 wait $pid || return 1
579 [ -e $DIR/$tfile ] && return 2
582 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
585 replay_barrier $SINGLEMDS
586 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
588 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
593 wait $pid2 || return 1
597 wait $pid1 || return 2
598 [ -e $DIR/$tfile-1 ] && return 3
599 [ -e $DIR/$tfile-2 ] && return 4
602 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
605 replay_barrier $SINGLEMDS
606 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
608 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
615 wait $pid1 || return 1
617 wait $pid2 || return 2
618 [ -e $DIR/$tfile-1 ] && return 3
619 [ -e $DIR/$tfile-2 ] && return 4
622 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
625 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
627 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
629 replay_barrier $SINGLEMDS
633 wait $pid2 || return 1
637 wait $pid1 || return 2
638 [ -e $DIR/$tfile-1 ] && return 3
639 [ -e $DIR/$tfile-2 ] && return 4
642 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
645 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
647 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
649 replay_barrier $SINGLEMDS
655 wait $pid1 || return 1
657 wait $pid2 || return 2
658 [ -e $DIR/$tfile-1 ] && return 3
659 [ -e $DIR/$tfile-2 ] && return 4
662 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
665 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
667 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
672 replay_barrier $SINGLEMDS
675 wait $pid1 || return 1
677 wait $pid2 || return 2
678 [ -e $DIR/$tfile-1 ] && return 3
679 [ -e $DIR/$tfile-2 ] && return 4
682 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
685 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
687 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
691 replay_barrier $SINGLEMDS
695 wait $pid1 || return 1
697 wait $pid2 || return 2
698 [ -e $DIR/$tfile-1 ] && return 3
699 [ -e $DIR/$tfile-2 ] && return 4
702 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
704 # tests for bug 2104; completion without crashing is success. The close is
705 # stale, but we always return 0 for close, so the app never sees it.
707 multiop_bg_pause $DIR/$tfile O_c || return 2
709 multiop_bg_pause $DIR/$tfile O_c || return 3
712 df $MOUNT || sleep 1 && df $MOUNT || return 1
715 wait $pid1 || return 4
716 wait $pid2 || return 5
719 run_test 32 "close() notices client eviction; close() after client eviction"
721 # Abort recovery before client complete
722 test_33a() { # was test_33
723 replay_barrier $SINGLEMDS
724 createmany -o $DIR/$tfile-%d 100
725 fail_abort $SINGLEMDS
726 # this file should be gone, because the replay was aborted
727 $CHECKSTAT -t file $DIR/$tfile-* && return 3
728 unlinkmany $DIR/$tfile-%d 0 100
731 run_test 33a "abort recovery before client does replay"
734 test_33b() { # was test_33a
735 replay_barrier $SINGLEMDS
736 createmany -o $DIR/$tfile-%d 10
737 fail_abort $SINGLEMDS
738 unlinkmany $DIR/$tfile-%d 0 10
739 # recreate shouldn't fail
740 createmany -o $DIR/$tfile-%d 10 || return 3
741 unlinkmany $DIR/$tfile-%d 0 10
744 run_test 33b "fid shouldn't be reused after abort recovery"
747 multiop_bg_pause $DIR/$tfile O_c || return 2
751 replay_barrier $SINGLEMDS
752 fail_abort $SINGLEMDS
754 wait $pid || return 3
755 [ -e $DIR/$tfile ] && return 1
759 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
761 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
765 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
766 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
771 # give a chance to remove from MDS
772 fail_abort $SINGLEMDS
773 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
775 run_test 35 "test recovery from llog for unlink op"
777 # b=2432 resent cancel after replay uses wrong cookie,
778 # so don't resend cancels
780 replay_barrier $SINGLEMDS
782 checkstat $DIR/$tfile
783 facet_failover $SINGLEMDS
785 if dmesg | grep "unknown lock cookie"; then
786 echo "cancel after replay failed"
790 run_test 36 "don't resend cancel"
793 # directory orphans can't be unlinked from PENDING directory
795 rmdir $DIR/$tfile 2>/dev/null
796 multiop_bg_pause $DIR/$tfile dD_c || return 2
800 replay_barrier $SINGLEMDS
801 # clear the dmesg buffer so we only see errors from this recovery
803 fail_abort $SINGLEMDS
805 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
806 wait $pid || return 3
810 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
813 createmany -o $DIR/$tfile-%d 800
814 unlinkmany $DIR/$tfile-%d 0 400
815 replay_barrier $SINGLEMDS
817 unlinkmany $DIR/$tfile-%d 400 400
819 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
821 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
823 test_39() { # bug 4176
824 createmany -o $DIR/$tfile-%d 800
825 replay_barrier $SINGLEMDS
826 unlinkmany $DIR/$tfile-%d 0 400
828 unlinkmany $DIR/$tfile-%d 400 400
830 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
832 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
835 lctl get_param -n osc.*.stats | awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
840 $LCTL mark multiop $MOUNT/$tfile OS_c
841 multiop $MOUNT/$tfile OS_c &
843 writeme -s $MOUNT/${tfile}-2 &
846 facet_failover $SINGLEMDS
847 #define OBD_FAIL_MDS_CONNECT_NET 0x117
848 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117"
850 stat1=`count_ost_writes`
852 stat2=`count_ost_writes`
853 echo "$stat1, $stat2"
854 if [ $stat1 -lt $stat2 ]; then
855 echo "writes continuing during recovery"
858 echo "writes not continuing during recovery, bug 2477"
861 echo "waiting for writeme $WRITE_PID"
865 echo "waiting for multiop $PID"
866 wait $PID || return 2
867 do_facet client munlink $MOUNT/$tfile || return 3
868 do_facet client munlink $MOUNT/${tfile}-2 || return 3
871 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
875 # make sure that a read to one osc doesn't try to double-unlock its page just
876 # because another osc is invalid. trigger_group_io used to mistakenly return
877 # an error if any oscs were invalid even after having successfully put rpcs
878 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
879 # the page, guarnateeing that the unlock from the RPC completion would
880 # assert on trying to unlock the unlocked page.
882 [ $OSTCOUNT -lt 2 ] && \
883 skip "skipping test 41: we don't have a second OST to test with" && \
886 local f=$MOUNT/$tfile
887 # make sure the start of the file is ost1
888 lfs setstripe $f -s $((128 * 1024)) -i 0
889 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
891 # fail ost2 and read from ost1
892 local osc2dev=`do_facet $SINGLEMDS "lctl get_param -n devices | grep ${ost2_svc}-osc-MDT0000" | awk '{print $1}'`
893 [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && lctl get_param -n devices && return 4
894 do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate || return 1
895 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
896 do_facet $SINGLEMDS $LCTL --device $osc2dev activate || return 2
899 run_test 41 "read from a valid osc while other oscs are invalid"
901 # test MDS recovery after ost failure
903 blocks=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
904 createmany -o $DIR/$tfile-%d 800
906 unlinkmany $DIR/$tfile-%d 0 400
908 lctl set_param debug=-1
911 # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287)
912 #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
913 #[ $blocks_after -lt $blocks ] || return 1
914 echo wait for MDS to timeout and recover
915 sleep $((TIMEOUT * 2))
917 unlinkmany $DIR/$tfile-%d 400 400
918 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
920 run_test 42 "recovery after ost failure"
922 # timeout in MDS/OST recovery RPC will LBUG MDS
923 test_43() { # bug 2530
924 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
926 replay_barrier $SINGLEMDS
928 # OBD_FAIL_OST_CREATE_NET 0x204
929 do_facet ost1 "lctl set_param fail_loc=0x80000204"
932 do_facet ost1 "lctl set_param fail_loc=0"
936 run_test 43 "mds osc import failure during recovery; don't LBUG"
938 test_44a() { # was test_44
941 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
942 [ "$mdcdev" ] || exit 2
944 # adaptive timeouts slow this way down
945 if at_is_valid && at_is_enabled; then
946 at_max_saved=$(at_max_get mds)
950 for i in `seq 1 10`; do
951 echo "$i of 10 ($(date +%s))"
952 do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
953 #define OBD_FAIL_TGT_CONN_RACE 0x701
954 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
955 $LCTL --device $mdcdev recover
958 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
959 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
962 run_test 44a "race in target handle connect"
965 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
966 [ "$mdcdev" ] || exit 2
967 for i in `seq 1 10`; do
968 echo "$i of 10 ($(date +%s))"
969 do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
970 #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
971 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
972 $LCTL --device $mdcdev recover
975 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
978 run_test 44b "race in target handle connect"
980 # Handle failed close
982 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
983 [ "$mdcdev" ] || exit 2
984 $LCTL --device $mdcdev recover
986 multiop_bg_pause $DIR/$tfile O_c || return 1
989 # This will cause the CLOSE to fail before even
990 # allocating a reply buffer
991 $LCTL --device $mdcdev deactivate || return 4
995 wait $pid || return 1
997 $LCTL --device $mdcdev activate || return 5
1000 $CHECKSTAT -t file $DIR/$tfile || return 2
1003 run_test 45 "Handle failed close"
1007 drop_reply "touch $DIR/$tfile"
1009 # ironically, the previous test, 45, will cause a real forced close,
1010 # so just look for one for this test
1011 dmesg | grep -i "force closing client file handle for $tfile" && return 1
1014 run_test 46 "Don't leak file handle after open resend (3325)"
1016 test_47() { # bug 2824
1017 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1019 # create some files to make sure precreate has been done on all
1020 # OSTs. (just in case this test is run independently)
1021 createmany -o $DIR/$tfile 20 || return 1
1023 # OBD_FAIL_OST_CREATE_NET 0x204
1025 do_facet ost1 "lctl set_param fail_loc=0x80000204"
1026 df $MOUNT || return 2
1028 # let the MDS discover the OST failure, attempt to recover, fail
1029 # and recover again.
1030 sleep $((3 * TIMEOUT))
1032 # Without 2824, this createmany would hang
1033 createmany -o $DIR/$tfile 20 || return 3
1034 unlinkmany $DIR/$tfile 20 || return 4
1036 do_facet ost1 "lctl set_param fail_loc=0"
1039 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
1042 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1043 [ "$OSTCOUNT" -lt "2" ] && skip "$OSTCOUNT < 2 OSTs -- skipping" && return
1045 replay_barrier $SINGLEMDS
1046 createmany -o $DIR/$tfile 20 || return 1
1047 # OBD_FAIL_OST_EROFS 0x216
1048 facet_failover $SINGLEMDS
1049 do_facet ost1 "lctl set_param fail_loc=0x80000216"
1050 df $MOUNT || return 2
1052 createmany -o $DIR/$tfile 20 20 || return 2
1053 unlinkmany $DIR/$tfile 40 || return 3
1056 run_test 48 "MDS->OSC failure during precreate cleanup (2824)"
1059 local oscdev=`do_facet $SINGLEMDS lctl get_param -n devices | grep ${ost1_svc}-osc-MDT0000 | awk '{print $1}'`
1060 [ "$oscdev" ] || return 1
1061 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 2
1062 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 3
1063 # give the mds_lov_sync threads a chance to run
1066 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1068 # b3764 timed out lock replay
1071 cancel_lru_locks mdc
1073 multiop $DIR/$tfile s || return 1
1074 replay_barrier $SINGLEMDS
1075 #define OBD_FAIL_LDLM_REPLY 0x30c
1076 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
1077 fail $SINGLEMDS || return 2
1078 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1080 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1082 run_test 52 "time out lock replay (3764)"
1084 # bug 3462 - simultaneous MDC requests
1086 mkdir -p $DIR/${tdir}-1
1087 mkdir -p $DIR/${tdir}-2
1088 multiop $DIR/${tdir}-1/f O_c &
1090 # give multiop a change to open
1093 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1094 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1095 kill -USR1 $close_pid
1096 cancel_lru_locks mdc # force the close
1097 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1099 mcreate $DIR/${tdir}-2/f || return 1
1101 # close should still be here
1102 [ -d /proc/$close_pid ] || return 2
1104 replay_barrier_nodf $SINGLEMDS
1106 wait $close_pid || return 3
1108 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1109 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1110 rm -rf $DIR/${tdir}-*
1112 run_test 53a "|X| close request while two MDC requests in flight"
1115 mkdir -p $DIR/${tdir}-1
1116 mkdir -p $DIR/${tdir}-2
1117 multiop $DIR/${tdir}-1/f O_c &
1120 #define OBD_FAIL_MDS_REINT_NET 0x107
1121 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1122 mcreate $DIR/${tdir}-2/f &
1126 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1127 kill -USR1 $close_pid
1128 cancel_lru_locks mdc # force the close
1129 wait $close_pid || return 1
1130 # open should still be here
1131 [ -d /proc/$open_pid ] || return 2
1133 replay_barrier_nodf $SINGLEMDS
1135 wait $open_pid || return 3
1137 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1138 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1139 rm -rf $DIR/${tdir}-*
1141 run_test 53b "|X| open request while two MDC requests in flight"
1144 mkdir -p $DIR/${tdir}-1
1145 mkdir -p $DIR/${tdir}-2
1146 multiop $DIR/${tdir}-1/f O_c &
1149 #define OBD_FAIL_MDS_REINT_NET 0x107
1150 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1151 mcreate $DIR/${tdir}-2/f &
1155 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1156 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1157 kill -USR1 $close_pid
1158 cancel_lru_locks mdc # force the close
1160 replay_barrier_nodf $SINGLEMDS
1161 fail_nodf $SINGLEMDS
1162 wait $open_pid || return 1
1164 # close should be gone
1165 [ -d /proc/$close_pid ] && return 2
1166 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1168 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1169 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1170 rm -rf $DIR/${tdir}-*
1172 run_test 53c "|X| open request and close request while two MDC requests in flight"
1175 mkdir -p $DIR/${tdir}-1
1176 mkdir -p $DIR/${tdir}-2
1177 multiop $DIR/${tdir}-1/f O_c &
1179 # give multiop a chance to open
1182 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13f
1183 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013f"
1184 kill -USR1 $close_pid
1185 cancel_lru_locks mdc # force the close
1186 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1187 mcreate $DIR/${tdir}-2/f || return 1
1189 # close should still be here
1190 [ -d /proc/$close_pid ] || return 2
1192 wait $close_pid || return 3
1194 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1195 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1196 rm -rf $DIR/${tdir}-*
1198 run_test 53d "|X| close reply while two MDC requests in flight"
1201 mkdir -p $DIR/${tdir}-1
1202 mkdir -p $DIR/${tdir}-2
1203 multiop $DIR/${tdir}-1/f O_c &
1206 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1207 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1208 mcreate $DIR/${tdir}-2/f &
1212 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1213 kill -USR1 $close_pid
1214 cancel_lru_locks mdc # force the close
1215 wait $close_pid || return 1
1216 # open should still be here
1217 [ -d /proc/$open_pid ] || return 2
1219 replay_barrier_nodf $SINGLEMDS
1221 wait $open_pid || return 3
1223 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1224 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1225 rm -rf $DIR/${tdir}-*
1227 run_test 53e "|X| open reply while two MDC requests in flight"
1230 mkdir -p $DIR/${tdir}-1
1231 mkdir -p $DIR/${tdir}-2
1232 multiop $DIR/${tdir}-1/f O_c &
1235 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1236 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1237 mcreate $DIR/${tdir}-2/f &
1241 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13f
1242 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013f"
1243 kill -USR1 $close_pid
1244 cancel_lru_locks mdc # force the close
1246 replay_barrier_nodf $SINGLEMDS
1247 fail_nodf $SINGLEMDS
1248 wait $open_pid || return 1
1250 # close should be gone
1251 [ -d /proc/$close_pid ] && return 2
1252 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1254 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1255 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1256 rm -rf $DIR/${tdir}-*
1258 run_test 53f "|X| open reply and close reply while two MDC requests in flight"
1261 mkdir -p $DIR/${tdir}-1
1262 mkdir -p $DIR/${tdir}-2
1263 multiop $DIR/${tdir}-1/f O_c &
1266 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1267 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1268 mcreate $DIR/${tdir}-2/f &
1272 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1273 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1274 kill -USR1 $close_pid
1275 cancel_lru_locks mdc # force the close
1277 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1278 replay_barrier_nodf $SINGLEMDS
1279 fail_nodf $SINGLEMDS
1280 wait $open_pid || return 1
1282 # close should be gone
1283 [ -d /proc/$close_pid ] && return 2
1285 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1286 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1287 rm -rf $DIR/${tdir}-*
1289 run_test 53g "|X| drop open reply and close request while close and open are both in flight"
1292 mkdir -p $DIR/${tdir}-1
1293 mkdir -p $DIR/${tdir}-2
1294 multiop $DIR/${tdir}-1/f O_c &
1297 #define OBD_FAIL_MDS_REINT_NET 0x107
1298 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1299 mcreate $DIR/${tdir}-2/f &
1303 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13f
1304 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013f"
1305 kill -USR1 $close_pid
1306 cancel_lru_locks mdc # force the close
1309 replay_barrier_nodf $SINGLEMDS
1310 fail_nodf $SINGLEMDS
1311 wait $open_pid || return 1
1313 # close should be gone
1314 [ -d /proc/$close_pid ] && return 2
1315 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1317 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1318 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1319 rm -rf $DIR/${tdir}-*
1321 run_test 53h "|X| open request and close reply while two MDC requests in flight"
1323 #b_cray 54 "|X| open request and close reply while two MDC requests in flight"
1325 #b3761 ASSERTION(hash != 0) failed
1327 # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
1328 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012b"
1330 # give touch a chance to run
1332 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1336 run_test 55 "let MDS_CHECK_RESENT return the original return code instead of 0"
1338 #b3440 ASSERTION(rec->ur_fid2->id) failed
1340 ln -s foo $DIR/$tfile
1341 replay_barrier $SINGLEMDS
1342 #drop_reply "cat $DIR/$tfile"
1346 run_test 56 "don't replay a symlink open request (3440)"
1348 #recovery one mds-ost setattr from llog
1350 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1351 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1353 replay_barrier $SINGLEMDS
1356 $CHECKSTAT -t file $DIR/$tfile || return 1
1357 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1360 run_test 57 "test recovery from llog for setattr op"
1362 #recovery many mds-ost setattr from llog
1365 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1366 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1367 createmany -o $DIR/$tdir/$tfile-%d 2500
1368 replay_barrier $SINGLEMDS
1371 $CHECKSTAT -t file $DIR/$tdir/$tfile-* >/dev/null || return 1
1372 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1373 unlinkmany $DIR/$tdir/$tfile-%d 2500
1376 run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)"
1379 mount_client $MOUNT2
1381 touch $DIR/$tdir/$tfile
1382 replay_barrier $SINGLEMDS
1383 setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile
1385 VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
1386 [ x$VAL = x"bar" ] || return 1
1387 rm -f $DIR/$tdir/$tfile
1389 zconf_umount `hostname` $MOUNT2
1391 run_test 58b "test replay of setxattr op"
1393 test_58c() { # bug 16570
1394 mount_client $MOUNT2
1396 touch $DIR/$tdir/$tfile
1397 drop_request "setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile" || \
1399 VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
1400 [ x$VAL = x"bar" ] || return 2
1401 drop_reint_reply "setfattr -n trusted.foo1 -v bar1 $DIR/$tdir/$tfile" || \
1403 VAL=`getfattr --absolute-names --only-value -n trusted.foo1 $MOUNT2/$tdir/$tfile`
1404 [ x$VAL = x"bar1" ] || return 4
1405 rm -f $DIR/$tdir/$tfile
1407 zconf_umount `hostname` $MOUNT2
1409 run_test 58c "resend/reconstruct setxattr op"
1411 # log_commit_thread vs filter_destroy race used to lead to import use after free
1414 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1417 createmany -o $DIR/$tdir/$tfile-%d 200
1419 unlinkmany $DIR/$tdir/$tfile-%d 200
1420 #define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
1421 do_facet ost1 "lctl set_param fail_loc=0x507"
1424 do_facet ost1 "lctl set_param fail_loc=0x0"
1428 run_test 59 "test log_commit_thread vs filter_destroy race"
1433 createmany -o $DIR/$tdir/$tfile-%d 2000
1435 #define OBD_FAIL_OBD_LOG_CANCEL_REP 0x606
1436 do_facet $SINGLEMDS "lctl set_param fail_loc=0x606"
1437 unlinkmany $DIR/$tdir/$tfile-%d 2000
1439 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1440 do_facet $SINGLEMDS $LCTL dk | grep -q "RESENT cancel req" || return 1
1443 run_test 59b "resent handle in llog_origin_handle_cancel"
1445 # race between add unlink llog vs cat log init in post_recovery (only for b1_6)
1446 # bug 12086: should no oops and No ctxt error for this test
1449 createmany -o $DIR/$tdir/$tfile-%d 200
1450 replay_barrier $SINGLEMDS
1451 unlinkmany $DIR/$tdir/$tfile-%d 0 100
1453 unlinkmany $DIR/$tdir/$tfile-%d 100 100
1454 local no_ctxt=`dmesg | grep "No ctxt"`
1455 [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery"
1457 run_test 60 "test llog post recovery init vs llog unlink"
1459 #test race llog recovery thread vs llog cleanup
1460 test_61a() { # was test_61
1461 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1464 createmany -o $DIR/$tdir/$tfile-%d 800
1466 # OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
1467 unlinkmany $DIR/$tdir/$tfile-%d 800
1468 set_nodes_failloc "$(osts_nodes)" 0x80000221
1473 set_nodes_failloc "$(osts_nodes)" 0x0
1475 $CHECKSTAT -t file $DIR/$tdir/$tfile-* && return 1
1478 run_test 61a "test race llog recovery vs llog cleanup"
1480 #test race mds llog sync vs llog cleanup
1482 # OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x140
1483 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000140"
1484 facet_failover $SINGLEMDS
1487 do_facet client dd if=/dev/zero of=$DIR/$tfile bs=4k count=1 || return 1
1489 run_test 61b "test race mds llog sync vs llog cleanup"
1491 #test race cancel cookie cb vs llog cleanup
1493 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1495 # OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
1497 set_nodes_failloc "$(osts_nodes)" 0x80000222
1501 set_nodes_failloc "$(osts_nodes)" 0x0
1503 run_test 61c "test race mds llog sync vs llog cleanup"
1505 test_62() { # Bug 15756 - don't mis-drop resent replay
1507 replay_barrier $SINGLEMDS
1508 createmany -o $DIR/$tdir/$tfile- 25
1509 #define OBD_FAIL_TGT_REPLAY_DROP 0x706
1510 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000707"
1511 facet_failover $SINGLEMDS
1512 df $MOUNT || return 1
1513 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1514 unlinkmany $DIR/$tdir/$tfile- 25 || return 2
1517 run_test 62 "don't mis-drop resent replay"
1519 #Adaptive Timeouts (bug 3055)
1521 # Suppose that all osts have the same at_max
1522 for facet in mds client ost; do
1523 eval AT_MAX_SAVE_${facet}=$(at_max_get $facet)
1528 local at_max_new=600
1529 if ! at_is_valid; then
1530 skip "AT env is invalid"
1536 for facet in mds client ost; do
1537 at_max=$(at_max_get $facet)
1538 if [ $at_max -ne $at_max_new ]; then
1539 echo "AT value on $facet is $at_max, set it by force temporarily to $at_max_new"
1540 at_max_set $at_max_new $facet
1545 if [ -z "$ATOLDBASE" ]; then
1546 local at_history=$(do_facet mds "find /sys/ -name at_history")
1547 [ -z "$at_history" ] && skip "missing /sys/.../at_history " && return 1
1548 ATOLDBASE=$(do_facet mds "cat $at_history")
1549 # speed up the timebase so we can check decreasing AT
1550 do_facet mds "echo 8 >> $at_history"
1551 do_facet ost1 "echo 8 >> $at_history"
1553 # sleep for a while to cool down, should be > 8s and also allow
1554 # at least one ping to be sent. simply use TIMEOUT to be safe.
1559 test_65a() #bug 3055
1561 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1563 at_start || return 0
1564 $LCTL dk > /dev/null
1566 sysctl -w lnet.debug="+other"
1567 # Slow down a request to the current service time, this is critical
1568 # because previous tests may have caused this value to increase.
1569 REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts |
1570 awk '/portal 12/ {print $5}'`
1571 REQ_DELAY=$((${REQ_DELAY} + 5))
1573 do_facet mds lctl set_param fail_val=$((${REQ_DELAY} * 1000))
1574 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1575 do_facet mds sysctl -w lustre.fail_loc=0x8000050a
1576 createmany -o $DIR/$tfile 10 > /dev/null
1577 unlinkmany $DIR/$tfile 10 > /dev/null
1578 # check for log message
1579 $LCTL dk | grep "Early reply #" || error "No early reply"
1581 # client should show REQ_DELAY estimates
1582 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1584 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1586 run_test 65a "AT: verify early replies"
1588 test_65b() #bug 3055
1590 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1592 at_start || return 0
1595 sysctl -w lnet.debug="other trace"
1596 $LCTL dk > /dev/null
1597 # Slow down a request to the current service time, this is critical
1598 # because previous tests may have caused this value to increase.
1599 REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
1600 awk '/portal 6/ {print $5}'`
1601 REQ_DELAY=$((${REQ_DELAY} + 5))
1603 do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
1604 #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
1605 do_facet ost1 sysctl -w lustre.fail_loc=0x224
1608 lfs setstripe $DIR/$tfile --index=0 --count=1
1609 # force some real bulk transfer
1610 multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
1612 do_facet ost1 sysctl -w lustre.fail_loc=0
1613 # check for log message
1614 $LCTL dk | grep "Early reply #" || error "No early reply"
1616 # client should show REQ_DELAY estimates
1617 lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
1619 run_test 65b "AT: verify early replies on packed reply / bulk"
1621 test_66a() #bug 3055
1623 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1625 at_start || return 0
1626 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1627 # adjust 5s at a time so no early reply is sent (within deadline)
1628 do_facet mds "sysctl -w lustre.fail_val=5000"
1629 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1630 do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
1631 createmany -o $DIR/$tfile 20 > /dev/null
1632 unlinkmany $DIR/$tfile 20 > /dev/null
1633 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1634 do_facet mds "sysctl -w lustre.fail_val=10000"
1635 do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
1636 createmany -o $DIR/$tfile 20 > /dev/null
1637 unlinkmany $DIR/$tfile 20 > /dev/null
1638 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1639 do_facet mds "sysctl -w lustre.fail_loc=0"
1641 createmany -o $DIR/$tfile 20 > /dev/null
1642 unlinkmany $DIR/$tfile 20 > /dev/null
1643 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1644 CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $5}')
1645 WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $7}')
1646 echo "Current MDT timeout $CUR, worst $WORST"
1647 [ $CUR -lt $WORST ] || error "Current $CUR should be less than worst $WORST"
1649 run_test 66a "AT: verify MDT service time adjusts with no early replies"
1651 test_66b() #bug 3055
1653 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1655 at_start || return 0
1656 ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
1657 sysctl -w lustre.fail_val=$(($ORIG + 5))
1658 #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
1659 sysctl -w lustre.fail_loc=0x50c
1660 ls $DIR/$tfile > /dev/null 2>&1
1661 sysctl -w lustre.fail_loc=0
1662 CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
1663 WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}')
1664 echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
1665 [ $WORST -gt $ORIG ] || error "Worst $WORST should be worse than orig $ORIG"
1667 run_test 66b "AT: verify net latency adjusts"
1669 test_67a() #bug 3055
1671 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1673 at_start || return 0
1674 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1675 # sleeping threads may drive values above this
1676 do_facet ost1 "sysctl -w lustre.fail_val=400"
1677 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1678 do_facet ost1 "sysctl -w lustre.fail_loc=0x50a"
1679 createmany -o $DIR/$tfile 20 > /dev/null
1680 unlinkmany $DIR/$tfile 20 > /dev/null
1681 do_facet ost1 "sysctl -w lustre.fail_loc=0"
1682 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1683 ATTEMPTS=$(($CONN2 - $CONN1))
1684 echo "$ATTEMPTS osc reconnect attemps on gradual slow"
1685 [ $ATTEMPTS -gt 0 ] && error_ignore 13721 "AT should have prevented reconnect"
1688 run_test 67a "AT: verify slow request processing doesn't induce reconnects"
1690 test_67b() #bug 3055
1692 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1694 at_start || return 0
1695 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1696 #define OBD_FAIL_OST_PAUSE_CREATE 0x223
1697 do_facet ost1 "sysctl -w lustre.fail_val=20000"
1698 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
1699 cp /etc/profile $DIR/$tfile || error "cp failed"
1701 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1703 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1704 ATTEMPTS=$(($CONN2 - $CONN1))
1705 echo "$ATTEMPTS osc reconnect attemps on instant slow"
1706 # do it again; should not timeout
1707 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
1708 cp /etc/profile $DIR/$tfile || error "cp failed"
1709 do_facet ost1 "sysctl -w lustre.fail_loc=0"
1711 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1712 CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1713 ATTEMPTS=$(($CONN3 - $CONN2))
1714 echo "$ATTEMPTS osc reconnect attemps on 2nd slow"
1715 [ $ATTEMPTS -gt 0 ] && error "AT should have prevented reconnect"
1718 run_test 67b "AT: verify instant slowdown doesn't induce reconnects"
1720 test_68 () #bug 13813
1722 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1724 at_start || return 0
1725 local ldlm_enqueue_min=$(find /sys -name ldlm_enqueue_min)
1726 [ -z "$ldlm_enqueue_min" ] && skip "missing /sys/.../ldlm_enqueue_min" && return 0
1727 local ENQ_MIN=$(cat $ldlm_enqueue_min)
1728 echo $TIMEOUT >> $ldlm_enqueue_min
1729 rm -f $DIR/${tfile}_[1-2]
1730 lfs setstripe $DIR/$tfile --index=0 --count=1
1731 #define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
1732 sysctl -w lustre.fail_val=$(($TIMEOUT - 1))
1733 sysctl -w lustre.fail_loc=0x80000312
1734 cp /etc/profile $DIR/${tfile}_1 || error "1st cp failed $?"
1735 sysctl -w lustre.fail_val=$((TIMEOUT * 3 / 2))
1736 sysctl -w lustre.fail_loc=0x80000312
1737 cp /etc/profile $DIR/${tfile}_2 || error "2nd cp failed $?"
1738 sysctl -w lustre.fail_loc=0
1739 echo $ENQ_MIN >> $ldlm_enqueue_min
1742 run_test 68 "AT: verify slowing locks"
1744 if [ -n "$ATOLDBASE" ]; then
1745 at_history=$(do_facet mds "find /sys/ -name at_history")
1746 do_facet mds "echo $ATOLDBASE >> $at_history" || true
1747 do_facet ost1 "echo $ATOLDBASE >> $at_history" || true
1750 if [ $AT_MAX_SET -ne 0 ]; then
1751 for facet in mds client ost; do
1752 var=AT_MAX_SAVE_${facet}
1753 echo restore AT on $facet to saved value ${!var}
1754 at_max_set ${!var} $facet
1755 AT_NEW=$(at_max_get $facet)
1756 echo Restored AT value on $facet $AT_NEW
1757 [ $AT_NEW -ne ${!var} ] && \
1758 error "$facet : AT value was not restored SAVED ${!var} NEW $AT_NEW"
1762 # end of AT tests includes above lines
1765 # start multi-client tests
1767 [ -z "$CLIENTS" ] && \
1768 { skip "Need two or more clients." && return; }
1769 [ $CLIENTCOUNT -lt 2 ] && \
1770 { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
1772 echo "mount clients $CLIENTS ..."
1773 zconf_mount_clients $CLIENTS $DIR
1775 local clients=${CLIENTS//,/ }
1776 echo "Write/read files on $DIR ; clients $CLIENTS ... "
1777 for CLIENT in $clients; do
1778 do_node $CLIENT dd bs=1M count=10 if=/dev/zero \
1779 of=$DIR/${tfile}_${CLIENT} 2>/dev/null || \
1780 error "dd failed on $CLIENT"
1783 local prev_client=$(echo $clients | sed 's/^.* \(.\+\)$/\1/')
1784 for C in ${CLIENTS//,/ }; do
1785 do_node $prev_client dd if=$DIR/${tfile}_${C} of=/dev/null 2>/dev/null || \
1786 error "dd if=$DIR/${tfile}_${C} failed on $prev_client"
1792 run_test 70a "check multi client t-f"
1795 [ -z "$CLIENTS" ] && \
1796 { skip "Need two or more clients." && return; }
1797 [ $CLIENTCOUNT -lt 2 ] && \
1798 { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
1800 zconf_mount_clients $CLIENTS $DIR
1802 local duration="-t 60"
1803 local cmd="rundbench 1 $duration "
1805 for CLIENT in ${CLIENTS//,/ }; do
1806 $PDSH $CLIENT "set -x; PATH=:$PATH:$LUSTRE/utils:$LUSTRE/tests/:${DBENCH_LIB} DBENCH_LIB=${DBENCH_LIB} $cmd" &
1808 echo $PID >pid.$CLIENT
1809 echo "Started load PID=`cat pid.$CLIENT`"
1812 replay_barrier $SINGLEMDS
1813 sleep 3 # give clients a time to do operations
1815 log "$TESTNAME fail mds 1"
1818 # wait for client to reconnect to MDS
1821 for CLIENT in ${CLIENTS//,/ }; do
1822 PID=`cat pid.$CLIENT`
1825 echo "load on ${CLIENT} returned $rc"
1829 run_test 70b "mds recovery; $CLIENTCOUNT clients"
1830 # end multi-client tests
1833 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1837 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
1838 rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
1842 run_test 80a "CMD: unlink cross-node dir (fail mds with inode)"
1845 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1849 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
1850 rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
1854 run_test 80b "CMD: unlink cross-node dir (fail mds with name)"
1857 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1860 createmany -o $DIR/$tdir/f 3000 || error "createmany failed"
1862 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed"
1863 $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed"
1865 rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed"
1867 stat $DIR/$tdir/f1002
1869 run_test 81a "CMD: unlink cross-node file (fail mds with name)"
1872 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1876 mkdir $dir || error "mkdir $dir failed"
1880 $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
1882 run_test 82a "CMD: mkdir cross-node dir (fail mds with inode)"
1885 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1889 mkdir $dir || error "mkdir $dir failed"
1893 $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
1895 run_test 82b "CMD: mkdir cross-node dir (fail mds with name)"
1897 equals_msg `basename $0`: test complete, cleaning up
1898 check_and_cleanup_lustre
1899 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true