7 # This test needs to be run on the client
10 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
13 . $LUSTRE/tests/test-framework.sh
15 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
16 CHECK_GRANT=${CHECK_GRANT:-"yes"}
17 GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
19 remote_mds_nodsh && log "SKIP: remote MDS with nodsh" && exit 0
22 # bug number: 17466 18857,15962
23 ALWAYS_EXCEPT="61d 33a 33b $REPLAY_SINGLE_EXCEPT"
25 if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then
26 CONFIG_EXCEPTIONS="0b 42 47 61a 61c"
27 echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. "
28 echo "Except the tests: $CONFIG_EXCEPTIONS"
29 ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
32 # 63 min 7 min AT AT AT AT"
33 [ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68"
37 check_and_setup_lustre
42 rm -rf $DIR/[df][0-9]*
44 test_0a() { # was test_0
47 replay_barrier $SINGLEMDS
51 run_test 0a "empty replay"
54 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
56 # this test attempts to trigger a race in the precreation code,
57 # and must run before any other objects are created on the filesystem
59 createmany -o $DIR/$tfile 20 || return 1
60 unlinkmany $DIR/$tfile 20 || return 2
62 run_test 0b "ensure object created after recover exists. (3284)"
68 lctl set_param -n seq.cli-srv-$mds-mdc-*.width=$width
74 lctl get_param -n seq.cli-srv-$mds-mdc-*.width
77 # This test should pass for single-mds and multi-mds configs.
78 # But for different configurations it tests different things.
82 # (1) fld_create replay should happen;
84 # (2) fld_create replay should not return -EEXISTS, if it does
85 # this means sequence manager recovery code is buggy and allocated
86 # same sequence two times after recovery.
90 # (1) fld_create replay may not happen, because its home MDS is
91 # MDS2 which is not involved to revovery;
93 # (2) as fld_create does not happen on MDS1, it does not make any
96 local label=`mdsdevlabel 1`
97 [ -z "$label" ] && echo "No label for mds1" && return 1
99 replay_barrier $SINGLEMDS
100 local sw=`seq_get_width $label`
102 # make seq manager switch to next sequence each
103 # time as new fid is needed.
104 seq_set_width $label 1
106 # make sure that fld has created at least one new
108 touch $DIR/$tfile || return 2
109 seq_set_width $label $sw
111 # fail $SINGLEMDS and start recovery, replay RPCs, etc.
114 # wait for recovery finish
118 # flush fld cache and dentry cache to make it lookup
119 # created entry instead of revalidating existent one
121 zconf_mount `hostname` $MOUNT
123 # issue lookup which should call fld lookup which
124 # should fail if client did not replay fld create
125 # correctly and server has no fld entry
126 touch $DIR/$tfile || return 3
127 rm $DIR/$tfile || return 4
129 start_full_debug_logging
130 run_test 0c "fld create"
131 stop_full_debug_logging
134 replay_barrier $SINGLEMDS
137 $CHECKSTAT -t file $DIR/$tfile || return 1
140 run_test 1 "simple create"
143 replay_barrier $SINGLEMDS
146 $CHECKSTAT -t file $DIR/$tfile || return 1
153 replay_barrier $SINGLEMDS
156 $CHECKSTAT -t file $DIR/$tfile || return 1
162 local file=$DIR/$tfile
163 replay_barrier $SINGLEMDS
165 openfile -f O_DIRECTORY $file
167 $CHECKSTAT -t file $file || return 2
170 run_test 3a "replay failed open(O_DIRECTORY)"
173 replay_barrier $SINGLEMDS
174 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
175 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000114"
177 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
179 $CHECKSTAT -t file $DIR/$tfile && return 2
182 run_test 3b "replay failed open -ENOMEM"
185 replay_barrier $SINGLEMDS
186 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
187 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000128"
189 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
192 $CHECKSTAT -t file $DIR/$tfile && return 2
195 run_test 3c "replay failed open -ENOMEM"
197 test_4a() { # was test_4
198 replay_barrier $SINGLEMDS
199 for i in `seq 10`; do
200 echo "tag-$i" > $DIR/$tfile-$i
203 for i in `seq 10`; do
204 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
207 run_test 4a "|x| 10 open(O_CREAT)s"
210 replay_barrier $SINGLEMDS
213 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
215 run_test 4b "|x| rm 10 files"
217 # The idea is to get past the first block of precreated files on both
218 # osts, and then replay.
220 replay_barrier $SINGLEMDS
221 for i in `seq 220`; do
222 echo "tag-$i" > $DIR/$tfile-$i
225 for i in `seq 220`; do
226 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
230 # waiting for commitment of removal
232 run_test 5 "|x| 220 open(O_CREAT)"
235 test_6a() { # was test_6
237 replay_barrier $SINGLEMDS
238 mcreate $DIR/$tdir/$tfile
240 $CHECKSTAT -t dir $DIR/$tdir || return 1
241 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
243 # waiting for log process thread
245 run_test 6a "mkdir + contained create"
249 replay_barrier $SINGLEMDS
252 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
254 run_test 6b "|X| rmdir"
258 replay_barrier $SINGLEMDS
259 mcreate $DIR/$tdir/$tfile
261 $CHECKSTAT -t dir $DIR/$tdir || return 1
262 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
265 run_test 7 "mkdir |X| contained create"
268 # make sure no side-effect from previous test.
270 replay_barrier $SINGLEMDS
271 multiop_bg_pause $DIR/$tfile mo_c || return 4
275 $CHECKSTAT -t file $DIR/$tfile || return 1
276 kill -USR1 $MULTIPID || return 2
277 wait $MULTIPID || return 3
280 run_test 8 "creat open |X| close"
283 replay_barrier $SINGLEMDS
285 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
287 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
289 echo " old_inum == $old_inum, new_inum == $new_inum"
290 if [ $old_inum -eq $new_inum ] ;
292 echo " old_inum and new_inum match"
294 echo "!!!! old_inum and new_inum NOT match"
299 run_test 9 "|X| create (same inum/gen)"
303 replay_barrier $SINGLEMDS
304 mv $DIR/$tfile $DIR/$tfile-2
307 $CHECKSTAT $DIR/$tfile && return 1
308 $CHECKSTAT $DIR/$tfile-2 ||return 2
312 run_test 10 "create |X| rename unlink"
316 echo "old" > $DIR/$tfile
317 mv $DIR/$tfile $DIR/$tfile-2
318 replay_barrier $SINGLEMDS
319 echo "new" > $DIR/$tfile
321 grep old $DIR/$tfile-2
323 grep new $DIR/$tfile || return 1
324 grep old $DIR/$tfile-2 || return 2
326 run_test 11 "create open write rename |X| create-old-name read"
330 multiop_bg_pause $DIR/$tfile o_tSc || return 3
333 replay_barrier $SINGLEMDS
335 wait $pid || return 1
338 [ -e $DIR/$tfile ] && return 2
341 run_test 12 "open, unlink |X| close"
344 # 1777 - replay open after committed chmod that would make
345 # a regular open a failure
348 multiop_bg_pause $DIR/$tfile O_wc || return 3
351 $CHECKSTAT -p 0 $DIR/$tfile
352 replay_barrier $SINGLEMDS
355 wait $pid || return 1
357 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
358 rm $DIR/$tfile || return 4
361 run_test 13 "open chmod 0 |x| write close"
364 multiop_bg_pause $DIR/$tfile O_tSc || return 4
367 replay_barrier $SINGLEMDS
368 kill -USR1 $pid || return 1
369 wait $pid || return 2
372 [ -e $DIR/$tfile ] && return 3
375 run_test 14 "open(O_CREAT), unlink |X| close"
378 multiop_bg_pause $DIR/$tfile O_tSc || return 5
381 replay_barrier $SINGLEMDS
382 touch $DIR/g11 || return 1
384 wait $pid || return 2
387 [ -e $DIR/$tfile ] && return 3
388 touch $DIR/h11 || return 4
391 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
395 replay_barrier $SINGLEMDS
398 mcreate $DIR/$tfile-2
400 [ -e $DIR/$tfile ] && return 1
401 [ -e $DIR/$tfile-2 ] || return 2
402 munlink $DIR/$tfile-2 || return 3
404 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
407 replay_barrier $SINGLEMDS
408 multiop_bg_pause $DIR/$tfile O_c || return 4
411 kill -USR1 $pid || return 1
412 wait $pid || return 2
413 $CHECKSTAT -t file $DIR/$tfile || return 3
416 run_test 17 "|X| open(O_CREAT), |replay| close"
419 replay_barrier $SINGLEMDS
420 multiop_bg_pause $DIR/$tfile O_tSc || return 8
423 touch $DIR/$tfile-2 || return 1
424 echo "pid: $pid will close"
426 wait $pid || return 2
429 [ -e $DIR/$tfile ] && return 3
430 [ -e $DIR/$tfile-2 ] || return 4
431 # this touch frequently fails
432 touch $DIR/$tfile-3 || return 5
433 munlink $DIR/$tfile-2 || return 6
434 munlink $DIR/$tfile-3 || return 7
437 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
439 # bug 1855 (a simpler form of test_11 above)
441 replay_barrier $SINGLEMDS
443 echo "old" > $DIR/$tfile
444 mv $DIR/$tfile $DIR/$tfile-2
445 grep old $DIR/$tfile-2
447 grep old $DIR/$tfile-2 || return 2
449 run_test 19 "|X| mcreate, open, write, rename "
451 test_20a() { # was test_20
452 replay_barrier $SINGLEMDS
453 multiop_bg_pause $DIR/$tfile O_tSc || return 3
459 wait $pid || return 1
460 [ -e $DIR/$tfile ] && return 2
463 run_test 20a "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
465 test_20b() { # bug 10480
466 BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
468 dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
470 while [ ! -e $DIR/$tfile ] ; do
471 usleep 60 # give dd a chance to start
474 lfs getstripe $DIR/$tfile || return 1
475 rm -f $DIR/$tfile || return 2 # make it an orphan
477 df -P $DIR || df -P $DIR || true # reconnect
479 fail $SINGLEMDS # start orphan recovery
480 df -P $DIR || df -P $DIR || true # reconnect
481 wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
482 wait_mds_ost_sync || return 3
483 AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
484 log "before $BEFOREUSED, after $AFTERUSED"
485 [ $AFTERUSED -gt $((BEFOREUSED + 20)) ] && \
486 error "after $AFTERUSED > before $BEFOREUSED"
489 run_test 20b "write, unlink, eviction, replay, (test mds_cleanup_orphans)"
491 test_20c() { # bug 10480
492 multiop_bg_pause $DIR/$tfile Ow_c || return 1
499 df -P $DIR || df -P $DIR || true # reconnect
502 test -s $DIR/$tfile || error "File was truncated"
504 wait $pid || return 1
507 run_test 20c "check that client eviction does not affect file content"
510 replay_barrier $SINGLEMDS
511 multiop_bg_pause $DIR/$tfile O_tSc || return 5
514 touch $DIR/g11 || return 1
518 wait $pid || return 2
519 [ -e $DIR/$tfile ] && return 3
520 touch $DIR/h11 || return 4
523 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
526 multiop_bg_pause $DIR/$tfile O_tSc || return 3
529 replay_barrier $SINGLEMDS
534 wait $pid || return 1
535 [ -e $DIR/$tfile ] && return 2
538 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
541 multiop_bg_pause $DIR/$tfile O_tSc || return 5
544 replay_barrier $SINGLEMDS
546 touch $DIR/g11 || return 1
550 wait $pid || return 2
551 [ -e $DIR/$tfile ] && return 3
552 touch $DIR/h11 || return 4
555 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
558 multiop_bg_pause $DIR/$tfile O_tSc || return 3
561 replay_barrier $SINGLEMDS
565 wait $pid || return 1
566 [ -e $DIR/$tfile ] && return 2
569 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
572 multiop_bg_pause $DIR/$tfile O_tSc || return 3
576 replay_barrier $SINGLEMDS
579 wait $pid || return 1
580 [ -e $DIR/$tfile ] && return 2
583 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
586 replay_barrier $SINGLEMDS
587 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
589 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
594 wait $pid2 || return 1
598 wait $pid1 || return 2
599 [ -e $DIR/$tfile-1 ] && return 3
600 [ -e $DIR/$tfile-2 ] && return 4
603 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
606 replay_barrier $SINGLEMDS
607 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
609 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
616 wait $pid1 || return 1
618 wait $pid2 || return 2
619 [ -e $DIR/$tfile-1 ] && return 3
620 [ -e $DIR/$tfile-2 ] && return 4
623 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
626 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
628 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
630 replay_barrier $SINGLEMDS
634 wait $pid2 || return 1
638 wait $pid1 || return 2
639 [ -e $DIR/$tfile-1 ] && return 3
640 [ -e $DIR/$tfile-2 ] && return 4
643 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
646 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
648 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
650 replay_barrier $SINGLEMDS
656 wait $pid1 || return 1
658 wait $pid2 || return 2
659 [ -e $DIR/$tfile-1 ] && return 3
660 [ -e $DIR/$tfile-2 ] && return 4
663 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
666 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
668 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
673 replay_barrier $SINGLEMDS
676 wait $pid1 || return 1
678 wait $pid2 || return 2
679 [ -e $DIR/$tfile-1 ] && return 3
680 [ -e $DIR/$tfile-2 ] && return 4
683 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
686 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
688 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
692 replay_barrier $SINGLEMDS
696 wait $pid1 || return 1
698 wait $pid2 || return 2
699 [ -e $DIR/$tfile-1 ] && return 3
700 [ -e $DIR/$tfile-2 ] && return 4
703 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
705 # tests for bug 2104; completion without crashing is success. The close is
706 # stale, but we always return 0 for close, so the app never sees it.
708 multiop_bg_pause $DIR/$tfile O_c || return 2
710 multiop_bg_pause $DIR/$tfile O_c || return 3
713 df $MOUNT || sleep 1 && df $MOUNT || return 1
716 wait $pid1 || return 4
717 wait $pid2 || return 5
720 run_test 32 "close() notices client eviction; close() after client eviction"
722 # Abort recovery before client complete
723 test_33a() { # was test_33
724 replay_barrier $SINGLEMDS
725 createmany -o $DIR/$tfile-%d 100
726 fail_abort $SINGLEMDS
727 # this file should be gone, because the replay was aborted
728 $CHECKSTAT -t file $DIR/$tfile-* && return 3
729 unlinkmany $DIR/$tfile-%d 0 100
732 run_test 33a "abort recovery before client does replay"
734 # Stale FID sequence bug 15962
735 test_33b() { # was test_33a
736 replay_barrier $SINGLEMDS
737 createmany -o $DIR/$tfile-%d 10
738 fail_abort $SINGLEMDS
739 unlinkmany $DIR/$tfile-%d 0 10
740 # recreate shouldn't fail
741 createmany -o $DIR/$tfile-%d 10 || return 3
742 unlinkmany $DIR/$tfile-%d 0 10
745 run_test 33b "fid shouldn't be reused after abort recovery"
748 multiop_bg_pause $DIR/$tfile O_c || return 2
752 replay_barrier $SINGLEMDS
753 fail_abort $SINGLEMDS
755 wait $pid || return 3
756 [ -e $DIR/$tfile ] && return 1
760 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
762 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
766 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
767 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
772 # give a chance to remove from MDS
773 fail_abort $SINGLEMDS
774 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
776 run_test 35 "test recovery from llog for unlink op"
778 # b=2432 resent cancel after replay uses wrong cookie,
779 # so don't resend cancels
781 replay_barrier $SINGLEMDS
783 checkstat $DIR/$tfile
784 facet_failover $SINGLEMDS
786 if dmesg | grep "unknown lock cookie"; then
787 echo "cancel after replay failed"
791 run_test 36 "don't resend cancel"
794 # directory orphans can't be unlinked from PENDING directory
796 rmdir $DIR/$tfile 2>/dev/null
797 multiop_bg_pause $DIR/$tfile dD_c || return 2
801 replay_barrier $SINGLEMDS
802 # clear the dmesg buffer so we only see errors from this recovery
804 fail_abort $SINGLEMDS
806 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
807 wait $pid || return 3
811 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
814 createmany -o $DIR/$tfile-%d 800
815 unlinkmany $DIR/$tfile-%d 0 400
816 replay_barrier $SINGLEMDS
818 unlinkmany $DIR/$tfile-%d 400 400
820 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
822 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
824 test_39() { # bug 4176
825 createmany -o $DIR/$tfile-%d 800
826 replay_barrier $SINGLEMDS
827 unlinkmany $DIR/$tfile-%d 0 400
829 unlinkmany $DIR/$tfile-%d 400 400
831 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
833 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
836 lctl get_param -n osc.*.stats | awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
841 $LCTL mark multiop $MOUNT/$tfile OS_c
842 multiop $MOUNT/$tfile OS_c &
844 writeme -s $MOUNT/${tfile}-2 &
847 facet_failover $SINGLEMDS
848 #define OBD_FAIL_MDS_CONNECT_NET 0x117
849 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117"
851 stat1=`count_ost_writes`
853 stat2=`count_ost_writes`
854 echo "$stat1, $stat2"
855 if [ $stat1 -lt $stat2 ]; then
856 echo "writes continuing during recovery"
859 echo "writes not continuing during recovery, bug 2477"
862 echo "waiting for writeme $WRITE_PID"
866 echo "waiting for multiop $PID"
867 wait $PID || return 2
868 do_facet client munlink $MOUNT/$tfile || return 3
869 do_facet client munlink $MOUNT/${tfile}-2 || return 3
872 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
876 # make sure that a read to one osc doesn't try to double-unlock its page just
877 # because another osc is invalid. trigger_group_io used to mistakenly return
878 # an error if any oscs were invalid even after having successfully put rpcs
879 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
880 # the page, guarnateeing that the unlock from the RPC completion would
881 # assert on trying to unlock the unlocked page.
883 [ $OSTCOUNT -lt 2 ] && \
884 skip_env "skipping test 41: we don't have a second OST to test with" && \
887 local f=$MOUNT/$tfile
888 # make sure the start of the file is ost1
889 lfs setstripe $f -s $((128 * 1024)) -i 0
890 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
892 # fail ost2 and read from ost1
893 local osc2dev=`do_facet $SINGLEMDS "lctl get_param -n devices | grep ${ost2_svc}-osc-MDT0000" | awk '{print $1}'`
894 [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && lctl get_param -n devices && return 4
895 do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate || return 1
896 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
897 do_facet $SINGLEMDS $LCTL --device $osc2dev activate || return 2
900 run_test 41 "read from a valid osc while other oscs are invalid"
902 # test MDS recovery after ost failure
904 blocks=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
905 createmany -o $DIR/$tfile-%d 800
907 unlinkmany $DIR/$tfile-%d 0 400
909 lctl set_param debug=-1
912 # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287)
913 #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
914 #[ $blocks_after -lt $blocks ] || return 1
915 echo wait for MDS to timeout and recover
916 sleep $((TIMEOUT * 2))
918 unlinkmany $DIR/$tfile-%d 400 400
919 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
921 run_test 42 "recovery after ost failure"
923 # timeout in MDS/OST recovery RPC will LBUG MDS
924 test_43() { # bug 2530
925 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
927 replay_barrier $SINGLEMDS
929 # OBD_FAIL_OST_CREATE_NET 0x204
930 do_facet ost1 "lctl set_param fail_loc=0x80000204"
933 do_facet ost1 "lctl set_param fail_loc=0"
937 run_test 43 "mds osc import failure during recovery; don't LBUG"
939 test_44a() { # was test_44
942 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
943 [ "$mdcdev" ] || exit 2
945 # adaptive timeouts slow this way down
946 if at_is_enabled; then
947 at_max_saved=$(at_max_get mds)
951 for i in `seq 1 10`; do
952 echo "$i of 10 ($(date +%s))"
953 do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
954 #define OBD_FAIL_TGT_CONN_RACE 0x701
955 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
956 $LCTL --device $mdcdev recover
959 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
960 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
963 run_test 44a "race in target handle connect"
966 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
967 [ "$mdcdev" ] || exit 2
968 for i in `seq 1 10`; do
969 echo "$i of 10 ($(date +%s))"
970 do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
971 #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
972 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
973 $LCTL --device $mdcdev recover
976 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
979 run_test 44b "race in target handle connect"
981 # Handle failed close
983 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
984 [ "$mdcdev" ] || exit 2
985 $LCTL --device $mdcdev recover
987 multiop_bg_pause $DIR/$tfile O_c || return 1
990 # This will cause the CLOSE to fail before even
991 # allocating a reply buffer
992 $LCTL --device $mdcdev deactivate || return 4
996 wait $pid || return 1
998 $LCTL --device $mdcdev activate || return 5
1001 $CHECKSTAT -t file $DIR/$tfile || return 2
1004 run_test 45 "Handle failed close"
1008 drop_reply "touch $DIR/$tfile"
1010 # ironically, the previous test, 45, will cause a real forced close,
1011 # so just look for one for this test
1012 dmesg | grep -i "force closing client file handle for $tfile" && return 1
1015 run_test 46 "Don't leak file handle after open resend (3325)"
1017 test_47() { # bug 2824
1018 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1020 # create some files to make sure precreate has been done on all
1021 # OSTs. (just in case this test is run independently)
1022 createmany -o $DIR/$tfile 20 || return 1
1024 # OBD_FAIL_OST_CREATE_NET 0x204
1026 do_facet ost1 "lctl set_param fail_loc=0x80000204"
1027 df $MOUNT || return 2
1029 # let the MDS discover the OST failure, attempt to recover, fail
1030 # and recover again.
1031 sleep $((3 * TIMEOUT))
1033 # Without 2824, this createmany would hang
1034 createmany -o $DIR/$tfile 20 || return 3
1035 unlinkmany $DIR/$tfile 20 || return 4
1037 do_facet ost1 "lctl set_param fail_loc=0"
1040 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
1043 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1044 [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2 OSTs -- skipping" && return
1046 replay_barrier $SINGLEMDS
1047 createmany -o $DIR/$tfile 20 || return 1
1048 # OBD_FAIL_OST_EROFS 0x216
1049 facet_failover $SINGLEMDS
1050 do_facet ost1 "lctl set_param fail_loc=0x80000216"
1051 df $MOUNT || return 2
1053 createmany -o $DIR/$tfile 20 20 || return 2
1054 unlinkmany $DIR/$tfile 40 || return 3
1057 run_test 48 "MDS->OSC failure during precreate cleanup (2824)"
1060 local oscdev=`do_facet $SINGLEMDS lctl get_param -n devices | grep ${ost1_svc}-osc-MDT0000 | awk '{print $1}'`
1061 [ "$oscdev" ] || return 1
1062 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 2
1063 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 3
1064 # give the mds_lov_sync threads a chance to run
1067 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1069 # b3764 timed out lock replay
1072 cancel_lru_locks mdc
1074 multiop $DIR/$tfile s || return 1
1075 replay_barrier $SINGLEMDS
1076 #define OBD_FAIL_LDLM_REPLY 0x30c
1077 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
1078 fail $SINGLEMDS || return 2
1079 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1081 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1083 run_test 52 "time out lock replay (3764)"
1085 # bug 3462 - simultaneous MDC requests
1087 mkdir -p $DIR/${tdir}-1
1088 mkdir -p $DIR/${tdir}-2
1089 multiop $DIR/${tdir}-1/f O_c &
1091 # give multiop a change to open
1094 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1095 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1096 kill -USR1 $close_pid
1097 cancel_lru_locks mdc # force the close
1098 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1100 mcreate $DIR/${tdir}-2/f || return 1
1102 # close should still be here
1103 [ -d /proc/$close_pid ] || return 2
1105 replay_barrier_nodf $SINGLEMDS
1107 wait $close_pid || return 3
1109 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1110 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1111 rm -rf $DIR/${tdir}-*
1113 run_test 53a "|X| close request while two MDC requests in flight"
1116 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1118 mkdir -p $DIR/${tdir}-1
1119 mkdir -p $DIR/${tdir}-2
1120 multiop_bg_pause $DIR/${tdir}-1/f O_c || return 6
1123 #define OBD_FAIL_MDS_REINT_NET 0x107
1124 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1125 mcreate $DIR/${tdir}-2/f &
1129 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1130 kill -USR1 $close_pid
1131 cancel_lru_locks mdc # force the close
1132 wait $close_pid || return 1
1133 # open should still be here
1134 [ -d /proc/$open_pid ] || return 2
1136 replay_barrier_nodf $SINGLEMDS
1138 wait $open_pid || return 3
1140 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1141 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1142 rm -rf $DIR/${tdir}-*
1144 run_test 53b "|X| open request while two MDC requests in flight"
1147 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1149 mkdir -p $DIR/${tdir}-1
1150 mkdir -p $DIR/${tdir}-2
1151 multiop $DIR/${tdir}-1/f O_c &
1154 #define OBD_FAIL_MDS_REINT_NET 0x107
1155 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1156 mcreate $DIR/${tdir}-2/f &
1160 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1161 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1162 kill -USR1 $close_pid
1163 cancel_lru_locks mdc # force the close
1165 replay_barrier_nodf $SINGLEMDS
1166 fail_nodf $SINGLEMDS
1167 wait $open_pid || return 1
1169 # close should be gone
1170 [ -d /proc/$close_pid ] && return 2
1171 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1173 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1174 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1175 rm -rf $DIR/${tdir}-*
1177 run_test 53c "|X| open request and close request while two MDC requests in flight"
1180 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1182 mkdir -p $DIR/${tdir}-1
1183 mkdir -p $DIR/${tdir}-2
1184 multiop $DIR/${tdir}-1/f O_c &
1186 # give multiop a chance to open
1189 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1190 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1191 kill -USR1 $close_pid
1192 cancel_lru_locks mdc # force the close
1193 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1194 mcreate $DIR/${tdir}-2/f || return 1
1196 # close should still be here
1197 [ -d /proc/$close_pid ] || return 2
1199 wait $close_pid || return 3
1201 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1202 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1203 rm -rf $DIR/${tdir}-*
1205 run_test 53d "|X| close reply while two MDC requests in flight"
1208 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1210 mkdir -p $DIR/${tdir}-1
1211 mkdir -p $DIR/${tdir}-2
1212 multiop $DIR/${tdir}-1/f O_c &
1215 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1216 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1217 mcreate $DIR/${tdir}-2/f &
1221 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1222 kill -USR1 $close_pid
1223 cancel_lru_locks mdc # force the close
1224 wait $close_pid || return 1
1225 # open should still be here
1226 [ -d /proc/$open_pid ] || return 2
1228 replay_barrier_nodf $SINGLEMDS
1230 wait $open_pid || return 3
1232 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1233 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1234 rm -rf $DIR/${tdir}-*
1236 run_test 53e "|X| open reply while two MDC requests in flight"
1239 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1241 mkdir -p $DIR/${tdir}-1
1242 mkdir -p $DIR/${tdir}-2
1243 multiop $DIR/${tdir}-1/f O_c &
1246 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1247 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1248 mcreate $DIR/${tdir}-2/f &
1252 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1253 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1254 kill -USR1 $close_pid
1255 cancel_lru_locks mdc # force the close
1257 replay_barrier_nodf $SINGLEMDS
1258 fail_nodf $SINGLEMDS
1259 wait $open_pid || return 1
1261 # close should be gone
1262 [ -d /proc/$close_pid ] && return 2
1263 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1265 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1266 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1267 rm -rf $DIR/${tdir}-*
1269 run_test 53f "|X| open reply and close reply while two MDC requests in flight"
1272 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1274 mkdir -p $DIR/${tdir}-1
1275 mkdir -p $DIR/${tdir}-2
1276 multiop $DIR/${tdir}-1/f O_c &
1279 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1280 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1281 mcreate $DIR/${tdir}-2/f &
1285 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1286 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1287 kill -USR1 $close_pid
1288 cancel_lru_locks mdc # force the close
1290 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1291 replay_barrier_nodf $SINGLEMDS
1292 fail_nodf $SINGLEMDS
1293 wait $open_pid || return 1
1295 # close should be gone
1296 [ -d /proc/$close_pid ] && return 2
1298 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1299 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1300 rm -rf $DIR/${tdir}-*
1302 run_test 53g "|X| drop open reply and close request while close and open are both in flight"
1305 rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2
1307 mkdir -p $DIR/${tdir}-1
1308 mkdir -p $DIR/${tdir}-2
1309 multiop $DIR/${tdir}-1/f O_c &
1312 #define OBD_FAIL_MDS_REINT_NET 0x107
1313 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1314 mcreate $DIR/${tdir}-2/f &
1318 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b
1319 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b"
1320 kill -USR1 $close_pid
1321 cancel_lru_locks mdc # force the close
1324 replay_barrier_nodf $SINGLEMDS
1325 fail_nodf $SINGLEMDS
1326 wait $open_pid || return 1
1328 # close should be gone
1329 [ -d /proc/$close_pid ] && return 2
1330 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1332 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1333 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1334 rm -rf $DIR/${tdir}-*
1336 run_test 53h "|X| open request and close reply while two MDC requests in flight"
1338 #b_cray 54 "|X| open request and close reply while two MDC requests in flight"
1340 #b3761 ASSERTION(hash != 0) failed
1342 # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
1343 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012b"
1345 # give touch a chance to run
1347 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1351 run_test 55 "let MDS_CHECK_RESENT return the original return code instead of 0"
1353 #b3440 ASSERTION(rec->ur_fid2->id) failed
1355 ln -s foo $DIR/$tfile
1356 replay_barrier $SINGLEMDS
1357 #drop_reply "cat $DIR/$tfile"
1361 run_test 56 "don't replay a symlink open request (3440)"
1363 #recovery one mds-ost setattr from llog
1365 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1366 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1368 replay_barrier $SINGLEMDS
1371 $CHECKSTAT -t file $DIR/$tfile || return 1
1372 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1375 run_test 57 "test recovery from llog for setattr op"
1377 #recovery many mds-ost setattr from llog
1380 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1381 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1382 createmany -o $DIR/$tdir/$tfile-%d 2500
1383 replay_barrier $SINGLEMDS
1386 $CHECKSTAT -t file $DIR/$tdir/$tfile-* >/dev/null || return 1
1387 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1388 unlinkmany $DIR/$tdir/$tfile-%d 2500
1391 run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)"
1394 mount_client $MOUNT2
1396 touch $DIR/$tdir/$tfile
1397 replay_barrier $SINGLEMDS
1398 setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile
1400 VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
1401 [ x$VAL = x"bar" ] || return 1
1402 rm -f $DIR/$tdir/$tfile
1404 zconf_umount `hostname` $MOUNT2
1406 run_test 58b "test replay of setxattr op"
1408 test_58c() { # bug 16570
1409 mount_client $MOUNT2
1411 touch $DIR/$tdir/$tfile
1412 drop_request "setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile" || \
1414 VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
1415 [ x$VAL = x"bar" ] || return 2
1416 drop_reint_reply "setfattr -n trusted.foo1 -v bar1 $DIR/$tdir/$tfile" || \
1418 VAL=`getfattr --absolute-names --only-value -n trusted.foo1 $MOUNT2/$tdir/$tfile`
1419 [ x$VAL = x"bar1" ] || return 4
1420 rm -f $DIR/$tdir/$tfile
1422 zconf_umount `hostname` $MOUNT2
1424 run_test 58c "resend/reconstruct setxattr op"
1426 # log_commit_thread vs filter_destroy race used to lead to import use after free
1429 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1432 createmany -o $DIR/$tdir/$tfile-%d 200
1434 unlinkmany $DIR/$tdir/$tfile-%d 200
1435 #define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
1436 do_facet ost1 "lctl set_param fail_loc=0x507"
1439 do_facet ost1 "lctl set_param fail_loc=0x0"
1443 run_test 59 "test log_commit_thread vs filter_destroy race"
1445 # race between add unlink llog vs cat log init in post_recovery (only for b1_6)
1446 # bug 12086: should no oops and No ctxt error for this test
1449 createmany -o $DIR/$tdir/$tfile-%d 200
1450 replay_barrier $SINGLEMDS
1451 unlinkmany $DIR/$tdir/$tfile-%d 0 100
1453 unlinkmany $DIR/$tdir/$tfile-%d 100 100
1454 local no_ctxt=`dmesg | grep "No ctxt"`
1455 [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery"
1457 run_test 60 "test llog post recovery init vs llog unlink"
1459 #test race llog recovery thread vs llog cleanup
1460 test_61a() { # was test_61
1461 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1464 createmany -o $DIR/$tdir/$tfile-%d 800
1466 # OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
1467 unlinkmany $DIR/$tdir/$tfile-%d 800
1468 set_nodes_failloc "$(osts_nodes)" 0x80000221
1473 set_nodes_failloc "$(osts_nodes)" 0x0
1475 $CHECKSTAT -t file $DIR/$tdir/$tfile-* && return 1
1478 run_test 61a "test race llog recovery vs llog cleanup"
1480 #test race mds llog sync vs llog cleanup
1482 # OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a
1483 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013a"
1484 facet_failover $SINGLEMDS
1487 do_facet client dd if=/dev/zero of=$DIR/$tfile bs=4k count=1 || return 1
1489 run_test 61b "test race mds llog sync vs llog cleanup"
1491 #test race cancel cookie cb vs llog cleanup
1493 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1495 # OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
1497 set_nodes_failloc "$(osts_nodes)" 0x80000222
1501 set_nodes_failloc "$(osts_nodes)" 0x0
1503 run_test 61c "test race mds llog sync vs llog cleanup"
1505 test_61d() { # bug 16002 # bug 17466
1506 shutdown_facet $SINGLEMDS
1507 #define OBD_FAIL_OBD_LLOG_SETUP 0x605
1508 do_facet $SINGLEMDS "lctl set_param fail_loc=0x605"
1509 start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS && error "mds start should have failed"
1510 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1511 start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS || error "cannot restart mds"
1513 run_test 61d "error in llog_setup should cleanup the llog context correctly"
1515 test_62() { # Bug 15756 - don't mis-drop resent replay
1517 replay_barrier $SINGLEMDS
1518 createmany -o $DIR/$tdir/$tfile- 25
1519 #define OBD_FAIL_TGT_REPLAY_DROP 0x707
1520 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000707"
1521 facet_failover $SINGLEMDS
1522 df $MOUNT || return 1
1523 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1524 unlinkmany $DIR/$tdir/$tfile- 25 || return 2
1527 run_test 62 "don't mis-drop resent replay"
1529 #Adaptive Timeouts (bug 3055)
1537 echo "Cleaning up AT ..."
1538 if [ -n "$ATOLDBASE" ]; then
1539 local at_history=$($LCTL get_param -n at_history)
1540 do_facet mds "lctl set_param at_history=$at_history" || true
1541 do_facet ost1 "lctl set_param at_history=$at_history" || true
1544 if [ $AT_MAX_SET -ne 0 ]; then
1545 for facet in mds client ost; do
1546 var=AT_MAX_SAVE_${facet}
1547 echo restore AT on $facet to saved value ${!var}
1548 at_max_set ${!var} $facet
1549 at_new=$(at_max_get $facet)
1550 echo Restored AT value on $facet $at_new
1551 [ $at_new -eq ${!var} ] || \
1552 error "$facet : AT value was not restored SAVED ${!var} NEW $at_new"
1559 local at_max_new=600
1561 # Save at_max original values
1563 if [ $AT_MAX_SET -eq 0 ]; then
1564 # Suppose that all osts have the same at_max
1565 for facet in mds client ost; do
1566 eval AT_MAX_SAVE_${facet}=$(at_max_get $facet)
1570 for facet in mds client ost; do
1571 at_max=$(at_max_get $facet)
1572 if [ $at_max -ne $at_max_new ]; then
1573 echo "AT value on $facet is $at_max, set it by force temporarily to $at_max_new"
1574 at_max_set $at_max_new $facet
1579 if [ -z "$ATOLDBASE" ]; then
1580 ATOLDBASE=$(do_facet mds "lctl get_param -n at_history")
1581 # speed up the timebase so we can check decreasing AT
1582 do_facet mds "lctl set_param at_history=8" || true
1583 do_facet ost1 "lctl set_param at_history=8" || true
1585 # sleep for a while to cool down, should be > 8s and also allow
1586 # at least one ping to be sent. simply use TIMEOUT to be safe.
1591 test_65a() #bug 3055
1593 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1595 at_start || return 0
1596 $LCTL dk > /dev/null
1598 sysctl -w lnet.debug="+other"
1599 # Slow down a request to the current service time, this is critical
1600 # because previous tests may have caused this value to increase.
1601 REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts |
1602 awk '/portal 12/ {print $5}'`
1603 REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
1605 do_facet mds lctl set_param fail_val=$((${REQ_DELAY} * 1000))
1606 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1607 do_facet mds sysctl -w lustre.fail_loc=0x8000050a
1608 createmany -o $DIR/$tfile 10 > /dev/null
1609 unlinkmany $DIR/$tfile 10 > /dev/null
1610 # check for log message
1611 $LCTL dk | grep "Early reply #" || error "No early reply"
1613 # client should show REQ_DELAY estimates
1614 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1616 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1618 run_test 65a "AT: verify early replies"
1620 test_65b() #bug 3055
1622 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1624 at_start || return 0
1627 sysctl -w lnet.debug="other trace"
1628 $LCTL dk > /dev/null
1629 # Slow down a request to the current service time, this is critical
1630 # because previous tests may have caused this value to increase.
1631 REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
1632 awk '/portal 6/ {print $5}'`
1633 REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5))
1635 do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
1636 #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
1637 do_facet ost1 sysctl -w lustre.fail_loc=0x224
1640 lfs setstripe $DIR/$tfile --index=0 --count=1
1641 # force some real bulk transfer
1642 multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
1644 do_facet ost1 sysctl -w lustre.fail_loc=0
1645 # check for log message
1646 $LCTL dk | grep "Early reply #" || error "No early reply"
1648 # client should show REQ_DELAY estimates
1649 lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
1651 run_test 65b "AT: verify early replies on packed reply / bulk"
1653 test_66a() #bug 3055
1655 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1657 at_start || return 0
1658 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1659 # adjust 5s at a time so no early reply is sent (within deadline)
1660 do_facet mds "sysctl -w lustre.fail_val=5000"
1661 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1662 do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
1663 createmany -o $DIR/$tfile 20 > /dev/null
1664 unlinkmany $DIR/$tfile 20 > /dev/null
1665 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1666 do_facet mds "sysctl -w lustre.fail_val=10000"
1667 do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
1668 createmany -o $DIR/$tfile 20 > /dev/null
1669 unlinkmany $DIR/$tfile 20 > /dev/null
1670 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1671 do_facet mds "sysctl -w lustre.fail_loc=0"
1673 createmany -o $DIR/$tfile 20 > /dev/null
1674 unlinkmany $DIR/$tfile 20 > /dev/null
1675 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1676 CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $5}')
1677 WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $7}')
1678 echo "Current MDT timeout $CUR, worst $WORST"
1679 [ $CUR -lt $WORST ] || error "Current $CUR should be less than worst $WORST"
1681 run_test 66a "AT: verify MDT service time adjusts with no early replies"
1683 test_66b() #bug 3055
1685 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1687 at_start || return 0
1688 ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
1689 sysctl -w lustre.fail_val=$(($ORIG + 5))
1690 #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
1691 sysctl -w lustre.fail_loc=0x50c
1692 ls $DIR/$tfile > /dev/null 2>&1
1693 sysctl -w lustre.fail_loc=0
1694 CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
1695 WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}')
1696 echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
1697 [ $WORST -gt $ORIG ] || error "Worst $WORST should be worse than orig $ORIG"
1699 run_test 66b "AT: verify net latency adjusts"
1701 test_67a() #bug 3055
1703 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1705 at_start || return 0
1706 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1707 # sleeping threads may drive values above this
1708 do_facet ost1 "sysctl -w lustre.fail_val=400"
1709 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1710 do_facet ost1 "sysctl -w lustre.fail_loc=0x50a"
1711 createmany -o $DIR/$tfile 20 > /dev/null
1712 unlinkmany $DIR/$tfile 20 > /dev/null
1713 do_facet ost1 "sysctl -w lustre.fail_loc=0"
1714 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1715 ATTEMPTS=$(($CONN2 - $CONN1))
1716 echo "$ATTEMPTS osc reconnect attempts on gradual slow"
1717 [ $ATTEMPTS -gt 0 ] && error_ignore 13721 "AT should have prevented reconnect"
1720 run_test 67a "AT: verify slow request processing doesn't induce reconnects"
1722 test_67b() #bug 3055
1724 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1726 at_start || return 0
1727 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1728 #define OBD_FAIL_OST_PAUSE_CREATE 0x223
1729 do_facet ost1 "sysctl -w lustre.fail_val=20000"
1730 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
1731 cp /etc/profile $DIR/$tfile || error "cp failed"
1733 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1735 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1736 ATTEMPTS=$(($CONN2 - $CONN1))
1737 echo "$ATTEMPTS osc reconnect attempts on instant slow"
1738 # do it again; should not timeout
1739 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
1740 cp /etc/profile $DIR/$tfile || error "cp failed"
1741 do_facet ost1 "sysctl -w lustre.fail_loc=0"
1743 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1744 CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1745 ATTEMPTS=$(($CONN3 - $CONN2))
1746 echo "$ATTEMPTS osc reconnect attempts on 2nd slow"
1747 [ $ATTEMPTS -gt 0 ] && error "AT should have prevented reconnect"
1750 run_test 67b "AT: verify instant slowdown doesn't induce reconnects"
1752 test_68 () #bug 13813
1754 remote_ost_nodsh && skip "remote OST with nodsh" && return 0
1756 at_start || return 0
1757 local ldlm_enqueue_min=$(find /sys -name ldlm_enqueue_min)
1758 [ -z "$ldlm_enqueue_min" ] && skip "missing /sys/.../ldlm_enqueue_min" && return 0
1759 local ldlm_enqueue_min_r=$(do_facet ost1 "find /sys -name ldlm_enqueue_min")
1760 [ -z "$ldlm_enqueue_min_r" ] && skip "missing /sys/.../ldlm_enqueue_min in the ost1" && return 0
1761 local ENQ_MIN=$(cat $ldlm_enqueue_min)
1762 local ENQ_MIN_R=$(do_facet ost1 "cat $ldlm_enqueue_min_r")
1763 echo $TIMEOUT >> $ldlm_enqueue_min
1764 do_facet ost1 "echo $TIMEOUT >> $ldlm_enqueue_min_r"
1768 lfs setstripe $DIR/$tdir --index=0 --count=1
1769 #define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
1770 sysctl -w lustre.fail_val=$(($TIMEOUT - 1))
1771 sysctl -w lustre.fail_loc=0x80000312
1772 cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?"
1773 sysctl -w lustre.fail_val=$((TIMEOUT * 5 / 4))
1774 sysctl -w lustre.fail_loc=0x80000312
1775 cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?"
1776 sysctl -w lustre.fail_loc=0
1778 echo $ENQ_MIN >> $ldlm_enqueue_min
1779 do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r"
1783 run_test 68 "AT: verify slowing locks"
1786 # end of AT tests includes above lines
1789 # start multi-client tests
1791 [ -z "$CLIENTS" ] && \
1792 { skip "Need two or more clients." && return; }
1793 [ $CLIENTCOUNT -lt 2 ] && \
1794 { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
1796 echo "mount clients $CLIENTS ..."
1797 zconf_mount_clients $CLIENTS $DIR
1799 local clients=${CLIENTS//,/ }
1800 echo "Write/read files on $DIR ; clients $CLIENTS ... "
1801 for CLIENT in $clients; do
1802 do_node $CLIENT dd bs=1M count=10 if=/dev/zero \
1803 of=$DIR/${tfile}_${CLIENT} 2>/dev/null || \
1804 error "dd failed on $CLIENT"
1807 local prev_client=$(echo $clients | sed 's/^.* \(.\+\)$/\1/')
1808 for C in ${CLIENTS//,/ }; do
1809 do_node $prev_client dd if=$DIR/${tfile}_${C} of=/dev/null 2>/dev/null || \
1810 error "dd if=$DIR/${tfile}_${C} failed on $prev_client"
1816 run_test 70a "check multi client t-f"
1819 local clients=${CLIENTS:-$HOSTNAME}
1821 zconf_mount_clients $clients $DIR
1824 [ "$SLOW" = "no" ] && duration=60
1825 local cmd="rundbench 1 -t $duration"
1827 do_nodes --verbose $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \
1828 PATH=:$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \
1829 DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \
1832 log "Started rundbench load PID=$PID ..."
1835 START_TS=$(date +%s)
1836 CURRENT_TS=$START_TS
1837 while [ $ELAPSED -lt $duration ]; do
1839 replay_barrier $SINGLEMDS
1840 sleep 1 # give clients a time to do operations
1841 # Increment the number of failovers
1842 NUM_FAILOVERS=$((NUM_FAILOVERS+1))
1843 log "$TESTNAME fail mds1 $NUM_FAILOVERS times"
1845 CURRENT_TS=$(date +%s)
1846 ELAPSED=$((CURRENT_TS - START_TS))
1848 wait $PID || error "rundbench load on $CLIENTS failed!"
1850 run_test 70b "mds recovery; $CLIENTCOUNT clients"
1851 # end multi-client tests
1854 multiop_bg_pause $DIR/$tfile O_tSc || return 3
1858 replay_barrier $SINGLEMDS
1859 #define OBD_FAIL_LDLM_ENQUEUE 0x302
1860 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000302"
1863 wait $pid || return 1
1864 [ -e $DIR/$tfile ] && return 2
1867 run_test 73a "open(O_CREAT), unlink, replay, reconnect before open replay , close"
1870 multiop_bg_pause $DIR/$tfile O_tSc || return 3
1874 replay_barrier $SINGLEMDS
1875 #define OBD_FAIL_LDLM_REPLY 0x30c
1876 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
1879 wait $pid || return 1
1880 [ -e $DIR/$tfile ] && return 2
1883 run_test 73b "open(O_CREAT), unlink, replay, reconnect at open_replay reply, close"
1886 multiop_bg_pause $DIR/$tfile O_tSc || return 3
1890 replay_barrier $SINGLEMDS
1891 #define OBD_FAIL_TGT_LAST_REPLAY 0x710
1892 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000710"
1895 wait $pid || return 1
1896 [ -e $DIR/$tfile ] && return 2
1899 run_test 73c "open(O_CREAT), unlink, replay, reconnect at last_replay, close"
1903 local clients=${CLIENTS:-$HOSTNAME}
1906 zconf_umount_clients $clients $MOUNT
1907 facet_failover $SINGLEMDS
1908 zconf_mount_clients $clients $MOUNT
1910 touch $DIR/$tfile || return 1
1911 rm $DIR/$tfile || return 2
1912 client_df || error "df failed: $?"
1915 run_test 74 "Ensure applications don't fail waiting for OST recovery"
1918 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1922 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
1923 rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
1925 stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!"
1928 run_test 80a "CMD: unlink cross-node dir (fail mds with inode)"
1931 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1935 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
1936 rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
1938 stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!"
1941 run_test 80b "CMD: unlink cross-node dir (fail mds with name)"
1944 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1947 createmany -o $DIR/$tdir/f 3000 || error "createmany failed"
1949 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed"
1950 $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed"
1952 rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed"
1954 stat $DIR/$tdir/f1002
1956 run_test 81a "CMD: unlink cross-node file (fail mds with name)"
1959 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1963 mkdir $dir || error "mkdir $dir failed"
1967 $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
1969 run_test 82a "CMD: mkdir cross-node dir (fail mds with inode)"
1972 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1976 mkdir $dir || error "mkdir $dir failed"
1980 $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
1982 run_test 82b "CMD: mkdir cross-node dir (fail mds with name)"
1986 createmany -o $DIR/$tdir/$tfile- 10 || return 1
1987 #define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140
1988 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000140"
1989 unlinkmany $DIR/$tdir/$tfile- 10 || return 2
1991 run_test 83a "fail log_add during unlink recovery"
1995 createmany -o $DIR/$tdir/$tfile- 10 || return 1
1996 replay_barrier $SINGLEMDS
1997 unlinkmany $DIR/$tdir/$tfile- 10 || return 2
1998 #define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140
1999 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000140"
2002 run_test 83b "fail log_add during unlink recovery"
2005 #define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x143
2006 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000143"
2007 createmany -o $DIR/$tfile- 1 &
2011 df -P $DIR || df -P $DIR || true # reconnect
2013 run_test 84a "stale open during export disconnect"
2015 equals_msg `basename $0`: test complete, cleaning up
2016 check_and_cleanup_lustre
2017 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true