7 # This test needs to be run on the client
10 LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
13 . $LUSTRE/tests/test-framework.sh
15 . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
16 CHECK_GRANT=${CHECK_GRANT:-"yes"}
17 GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""}
22 ALWAYS_EXCEPT="$REPLAY_SINGLE_EXCEPT"
24 if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then
25 CONFIG_EXCEPTIONS="0b 42 47 61a 61c"
26 echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. "
27 echo "Except the tests: $CONFIG_EXCEPTIONS"
28 ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
31 # 63 min 7 min AT AT AT AT"
32 [ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68"
36 cleanup_and_setup_lustre
41 rm -rf $DIR/[df][0-9]*
43 test_0a() { # was test_0
46 replay_barrier $SINGLEMDS
50 run_test 0a "empty replay"
53 # this test attempts to trigger a race in the precreation code,
54 # and must run before any other objects are created on the filesystem
56 createmany -o $DIR/$tfile 20 || return 1
57 unlinkmany $DIR/$tfile 20 || return 2
59 run_test 0b "ensure object created after recover exists. (3284)"
65 lctl set_param -n seq.cli-srv-$mds-mdc-*.width=$width
71 lctl get_param -n seq.cli-srv-$mds-mdc-*.width
74 # This test should pass for single-mds and multi-mds configs.
75 # But for different configurations it tests different things.
79 # (1) fld_create replay should happen;
81 # (2) fld_create replay should not return -EEXISTS, if it does
82 # this means sequence manager recovery code is buggy and allocated
83 # same sequence two times after recovery.
87 # (1) fld_create replay may not happen, because its home MDS is
88 # MDS2 which is not involved to revovery;
90 # (2) as fld_create does not happen on MDS1, it does not make any
93 local label=`mdsdevlabel 1`
94 [ -z "$label" ] && echo "No label for mds1" && return 1
96 replay_barrier $SINGLEMDS
97 local sw=`seq_get_width $label`
99 # make seq manager switch to next sequence each
100 # time as new fid is needed.
101 seq_set_width $label 1
103 # make sure that fld has created at least one new
105 touch $DIR/$tfile || return 2
106 seq_set_width $label $sw
108 # fail $SINGLEMDS and start recovery, replay RPCs, etc.
111 # wait for recovery finish
115 # flush fld cache and dentry cache to make it lookup
116 # created entry instead of revalidating existent one
118 zconf_mount `hostname` $MOUNT
120 # issue lookup which should call fld lookup which
121 # should fail if client did not replay fld create
122 # correctly and server has no fld entry
123 touch $DIR/$tfile || return 3
124 rm $DIR/$tfile || return 4
126 run_test 0c "fld create"
129 replay_barrier $SINGLEMDS
132 $CHECKSTAT -t file $DIR/$tfile || return 1
135 run_test 1 "simple create"
138 replay_barrier $SINGLEMDS
141 $CHECKSTAT -t file $DIR/$tfile || return 1
148 replay_barrier $SINGLEMDS
151 $CHECKSTAT -t file $DIR/$tfile || return 1
157 replay_barrier $SINGLEMDS
159 o_directory $DIR/$tfile
161 $CHECKSTAT -t file $DIR/$tfile || return 2
164 run_test 3a "replay failed open(O_DIRECTORY)"
167 replay_barrier $SINGLEMDS
168 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
169 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000114"
171 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
173 $CHECKSTAT -t file $DIR/$tfile && return 2
176 run_test 3b "replay failed open -ENOMEM"
179 replay_barrier $SINGLEMDS
180 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
181 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000128"
183 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
186 $CHECKSTAT -t file $DIR/$tfile && return 2
189 run_test 3c "replay failed open -ENOMEM"
191 test_4a() { # was test_4
192 replay_barrier $SINGLEMDS
193 for i in `seq 10`; do
194 echo "tag-$i" > $DIR/$tfile-$i
197 for i in `seq 10`; do
198 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
201 run_test 4a "|x| 10 open(O_CREAT)s"
204 replay_barrier $SINGLEMDS
207 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
209 run_test 4b "|x| rm 10 files"
211 # The idea is to get past the first block of precreated files on both
212 # osts, and then replay.
214 replay_barrier $SINGLEMDS
215 for i in `seq 220`; do
216 echo "tag-$i" > $DIR/$tfile-$i
219 for i in `seq 220`; do
220 grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
224 # waiting for commitment of removal
226 run_test 5 "|x| 220 open(O_CREAT)"
229 test_6a() { # was test_6
231 replay_barrier $SINGLEMDS
232 mcreate $DIR/$tdir/$tfile
234 $CHECKSTAT -t dir $DIR/$tdir || return 1
235 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
237 # waiting for log process thread
239 run_test 6a "mkdir + contained create"
243 replay_barrier $SINGLEMDS
246 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
248 run_test 6b "|X| rmdir"
252 replay_barrier $SINGLEMDS
253 mcreate $DIR/$tdir/$tfile
255 $CHECKSTAT -t dir $DIR/$tdir || return 1
256 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
259 run_test 7 "mkdir |X| contained create"
262 # make sure no side-effect from previous test.
264 replay_barrier $SINGLEMDS
265 multiop_bg_pause $DIR/$tfile mo_c || return 4
269 $CHECKSTAT -t file $DIR/$tfile || return 1
270 kill -USR1 $MULTIPID || return 2
271 wait $MULTIPID || return 3
274 run_test 8 "creat open |X| close"
277 replay_barrier $SINGLEMDS
279 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
281 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
283 echo " old_inum == $old_inum, new_inum == $new_inum"
284 if [ $old_inum -eq $new_inum ] ;
286 echo " old_inum and new_inum match"
288 echo "!!!! old_inum and new_inum NOT match"
293 run_test 9 "|X| create (same inum/gen)"
297 replay_barrier $SINGLEMDS
298 mv $DIR/$tfile $DIR/$tfile-2
301 $CHECKSTAT $DIR/$tfile && return 1
302 $CHECKSTAT $DIR/$tfile-2 ||return 2
306 run_test 10 "create |X| rename unlink"
310 echo "old" > $DIR/$tfile
311 mv $DIR/$tfile $DIR/$tfile-2
312 replay_barrier $SINGLEMDS
313 echo "new" > $DIR/$tfile
315 grep old $DIR/$tfile-2
317 grep new $DIR/$tfile || return 1
318 grep old $DIR/$tfile-2 || return 2
320 run_test 11 "create open write rename |X| create-old-name read"
324 multiop_bg_pause $DIR/$tfile o_tSc || return 3
327 replay_barrier $SINGLEMDS
329 wait $pid || return 1
332 [ -e $DIR/$tfile ] && return 2
335 run_test 12 "open, unlink |X| close"
338 # 1777 - replay open after committed chmod that would make
339 # a regular open a failure
342 multiop_bg_pause $DIR/$tfile O_wc || return 3
345 $CHECKSTAT -p 0 $DIR/$tfile
346 replay_barrier $SINGLEMDS
349 wait $pid || return 1
351 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
354 run_test 13 "open chmod 0 |x| write close"
357 multiop_bg_pause $DIR/$tfile O_tSc || return 4
360 replay_barrier $SINGLEMDS
361 kill -USR1 $pid || return 1
362 wait $pid || return 2
365 [ -e $DIR/$tfile ] && return 3
368 run_test 14 "open(O_CREAT), unlink |X| close"
371 multiop_bg_pause $DIR/$tfile O_tSc || return 5
374 replay_barrier $SINGLEMDS
375 touch $DIR/g11 || return 1
377 wait $pid || return 2
380 [ -e $DIR/$tfile ] && return 3
381 touch $DIR/h11 || return 4
384 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
388 replay_barrier $SINGLEMDS
391 mcreate $DIR/$tfile-2
393 [ -e $DIR/$tfile ] && return 1
394 [ -e $DIR/$tfile-2 ] || return 2
395 munlink $DIR/$tfile-2 || return 3
397 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
400 replay_barrier $SINGLEMDS
401 multiop_bg_pause $DIR/$tfile O_c || return 4
404 kill -USR1 $pid || return 1
405 wait $pid || return 2
406 $CHECKSTAT -t file $DIR/$tfile || return 3
409 run_test 17 "|X| open(O_CREAT), |replay| close"
412 replay_barrier $SINGLEMDS
413 multiop_bg_pause $DIR/$tfile O_tSc || return 8
416 touch $DIR/$tfile-2 || return 1
417 echo "pid: $pid will close"
419 wait $pid || return 2
422 [ -e $DIR/$tfile ] && return 3
423 [ -e $DIR/$tfile-2 ] || return 4
424 # this touch frequently fails
425 touch $DIR/$tfile-3 || return 5
426 munlink $DIR/$tfile-2 || return 6
427 munlink $DIR/$tfile-3 || return 7
430 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
432 # bug 1855 (a simpler form of test_11 above)
434 replay_barrier $SINGLEMDS
436 echo "old" > $DIR/$tfile
437 mv $DIR/$tfile $DIR/$tfile-2
438 grep old $DIR/$tfile-2
440 grep old $DIR/$tfile-2 || return 2
442 run_test 19 "|X| mcreate, open, write, rename "
444 test_20a() { # was test_20
445 replay_barrier $SINGLEMDS
446 multiop_bg_pause $DIR/$tfile O_tSc || return 3
452 wait $pid || return 1
453 [ -e $DIR/$tfile ] && return 2
456 run_test 20a "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
458 test_20b() { # bug 10480
459 BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
461 dd if=/dev/zero of=$DIR/$tfile bs=4k count=10000 &
463 while [ ! -e $DIR/$tfile ] ; do
464 usleep 60 # give dd a chance to start
467 lfs getstripe $DIR/$tfile || return 1
468 rm -f $DIR/$tfile || return 2 # make it an orphan
470 df -P $DIR || df -P $DIR || true # reconnect
472 fail $SINGLEMDS # start orphan recovery
473 df -P $DIR || df -P $DIR || true # reconnect
474 wait_mds_recovery_done || error "MDS recovery not done"
476 # FIXME just because recovery is done doesn't mean we've finished
477 # orphan cleanup. Fake it with a sleep for now...
479 AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
480 log "before $BEFOREUSED, after $AFTERUSED"
481 [ $AFTERUSED -gt $((BEFOREUSED + 20)) ] && \
482 error "after $AFTERUSED > before $BEFOREUSED"
485 run_test 20b "write, unlink, eviction, replay, (test mds_cleanup_orphans)"
487 test_20c() { # bug 10480
488 multiop_bg_pause $DIR/$tfile Ow_c || return 1
495 df -P $DIR || df -P $DIR || true # reconnect
498 test -s $DIR/$tfile || error "File was truncated"
500 wait $pid || return 1
503 run_test 20c "check that client eviction does not affect file content"
506 replay_barrier $SINGLEMDS
507 multiop_bg_pause $DIR/$tfile O_tSc || return 5
510 touch $DIR/g11 || return 1
514 wait $pid || return 2
515 [ -e $DIR/$tfile ] && return 3
516 touch $DIR/h11 || return 4
519 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
522 multiop_bg_pause $DIR/$tfile O_tSc || return 3
525 replay_barrier $SINGLEMDS
530 wait $pid || return 1
531 [ -e $DIR/$tfile ] && return 2
534 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
537 multiop_bg_pause $DIR/$tfile O_tSc || return 5
540 replay_barrier $SINGLEMDS
542 touch $DIR/g11 || return 1
546 wait $pid || return 2
547 [ -e $DIR/$tfile ] && return 3
548 touch $DIR/h11 || return 4
551 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
554 multiop_bg_pause $DIR/$tfile O_tSc || return 3
557 replay_barrier $SINGLEMDS
561 wait $pid || return 1
562 [ -e $DIR/$tfile ] && return 2
565 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
568 multiop_bg_pause $DIR/$tfile O_tSc || return 3
572 replay_barrier $SINGLEMDS
575 wait $pid || return 1
576 [ -e $DIR/$tfile ] && return 2
579 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
582 replay_barrier $SINGLEMDS
583 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
585 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
590 wait $pid2 || return 1
594 wait $pid1 || return 2
595 [ -e $DIR/$tfile-1 ] && return 3
596 [ -e $DIR/$tfile-2 ] && return 4
599 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
602 replay_barrier $SINGLEMDS
603 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
605 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
612 wait $pid1 || return 1
614 wait $pid2 || return 2
615 [ -e $DIR/$tfile-1 ] && return 3
616 [ -e $DIR/$tfile-2 ] && return 4
619 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
622 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
624 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
626 replay_barrier $SINGLEMDS
630 wait $pid2 || return 1
634 wait $pid1 || return 2
635 [ -e $DIR/$tfile-1 ] && return 3
636 [ -e $DIR/$tfile-2 ] && return 4
639 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
642 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
644 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
646 replay_barrier $SINGLEMDS
652 wait $pid1 || return 1
654 wait $pid2 || return 2
655 [ -e $DIR/$tfile-1 ] && return 3
656 [ -e $DIR/$tfile-2 ] && return 4
659 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
662 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
664 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
669 replay_barrier $SINGLEMDS
672 wait $pid1 || return 1
674 wait $pid2 || return 2
675 [ -e $DIR/$tfile-1 ] && return 3
676 [ -e $DIR/$tfile-2 ] && return 4
679 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
682 multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5
684 multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6
688 replay_barrier $SINGLEMDS
692 wait $pid1 || return 1
694 wait $pid2 || return 2
695 [ -e $DIR/$tfile-1 ] && return 3
696 [ -e $DIR/$tfile-2 ] && return 4
699 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
701 # tests for bug 2104; completion without crashing is success. The close is
702 # stale, but we always return 0 for close, so the app never sees it.
704 multiop_bg_pause $DIR/$tfile O_c || return 2
706 multiop_bg_pause $DIR/$tfile O_c || return 3
709 df $MOUNT || sleep 1 && df $MOUNT || return 1
712 wait $pid1 || return 4
713 wait $pid2 || return 5
716 run_test 32 "close() notices client eviction; close() after client eviction"
718 # Abort recovery before client complete
719 test_33a() { # was test_33
720 replay_barrier $SINGLEMDS
721 createmany -o $DIR/$tfile-%d 100
722 fail_abort $SINGLEMDS
723 # this file should be gone, because the replay was aborted
724 $CHECKSTAT -t file $DIR/$tfile-* && return 3
725 unlinkmany $DIR/$tfile-%d 0 100
728 run_test 33a "abort recovery before client does replay"
731 test_33b() { # was test_33a
732 replay_barrier $SINGLEMDS
733 createmany -o $DIR/$tfile-%d 10
734 fail_abort $SINGLEMDS
735 unlinkmany $DIR/$tfile-%d 0 10
736 # recreate shouldn't fail
737 createmany -o $DIR/$tfile-%d 10 || return 3
738 unlinkmany $DIR/$tfile-%d 0 10
741 run_test 33b "fid shouldn't be reused after abort recovery"
744 multiop_bg_pause $DIR/$tfile O_c || return 2
748 replay_barrier $SINGLEMDS
749 fail_abort $SINGLEMDS
751 wait $pid || return 3
752 [ -e $DIR/$tfile ] && return 1
756 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
758 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
762 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
763 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119"
768 # give a chance to remove from MDS
769 fail_abort $SINGLEMDS
770 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
772 run_test 35 "test recovery from llog for unlink op"
774 # b=2432 resent cancel after replay uses wrong cookie,
775 # so don't resend cancels
777 replay_barrier $SINGLEMDS
779 checkstat $DIR/$tfile
780 facet_failover $SINGLEMDS
782 if dmesg | grep "unknown lock cookie"; then
783 echo "cancel after replay failed"
787 run_test 36 "don't resend cancel"
790 # directory orphans can't be unlinked from PENDING directory
792 rmdir $DIR/$tfile 2>/dev/null
793 multiop_bg_pause $DIR/$tfile dD_c || return 2
797 replay_barrier $SINGLEMDS
798 # clear the dmesg buffer so we only see errors from this recovery
800 fail_abort $SINGLEMDS
802 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
803 wait $pid || return 3
807 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
810 createmany -o $DIR/$tfile-%d 800
811 unlinkmany $DIR/$tfile-%d 0 400
812 replay_barrier $SINGLEMDS
814 unlinkmany $DIR/$tfile-%d 400 400
816 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
818 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
820 test_39() { # bug 4176
821 createmany -o $DIR/$tfile-%d 800
822 replay_barrier $SINGLEMDS
823 unlinkmany $DIR/$tfile-%d 0 400
825 unlinkmany $DIR/$tfile-%d 400 400
827 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
829 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
832 lctl get_param -n osc.*.stats | awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
837 $LCTL mark multiop $MOUNT/$tfile OS_c
838 multiop $MOUNT/$tfile OS_c &
840 writeme -s $MOUNT/${tfile}-2 &
843 facet_failover $SINGLEMDS
844 #define OBD_FAIL_MDS_CONNECT_NET 0x117
845 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117"
847 stat1=`count_ost_writes`
849 stat2=`count_ost_writes`
850 echo "$stat1, $stat2"
851 if [ $stat1 -lt $stat2 ]; then
852 echo "writes continuing during recovery"
855 echo "writes not continuing during recovery, bug 2477"
858 echo "waiting for writeme $WRITE_PID"
862 echo "waiting for multiop $PID"
863 wait $PID || return 2
864 do_facet client munlink $MOUNT/$tfile || return 3
865 do_facet client munlink $MOUNT/${tfile}-2 || return 3
868 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
872 # make sure that a read to one osc doesn't try to double-unlock its page just
873 # because another osc is invalid. trigger_group_io used to mistakenly return
874 # an error if any oscs were invalid even after having successfully put rpcs
875 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
876 # the page, guarnateeing that the unlock from the RPC completion would
877 # assert on trying to unlock the unlocked page.
879 [ $OSTCOUNT -lt 2 ] && \
880 skip "skipping test 41: we don't have a second OST to test with" && \
883 local f=$MOUNT/$tfile
884 # make sure the start of the file is ost1
885 lfs setstripe $f -s $((128 * 1024)) -i 0
886 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
888 # fail ost2 and read from ost1
889 local osc2dev=`do_facet $SINGLEMDS "lctl get_param -n devices | grep ${ost2_svc}-osc-MDT0000" | awk '{print $1}'`
890 [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && lctl get_param -n devices && return 4
891 do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate || return 1
892 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
893 do_facet $SINGLEMDS $LCTL --device $osc2dev activate || return 2
896 run_test 41 "read from a valid osc while other oscs are invalid"
898 # test MDS recovery after ost failure
900 blocks=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
901 createmany -o $DIR/$tfile-%d 800
903 unlinkmany $DIR/$tfile-%d 0 400
905 lctl set_param debug=-1
908 # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287)
909 #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'`
910 #[ $blocks_after -lt $blocks ] || return 1
911 echo wait for MDS to timeout and recover
912 sleep $((TIMEOUT * 2))
914 unlinkmany $DIR/$tfile-%d 400 400
915 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
917 run_test 42 "recovery after ost failure"
919 # timeout in MDS/OST recovery RPC will LBUG MDS
920 test_43() { # bug 2530
921 replay_barrier $SINGLEMDS
923 # OBD_FAIL_OST_CREATE_NET 0x204
924 do_facet ost1 "lctl set_param fail_loc=0x80000204"
927 do_facet ost1 "lctl set_param fail_loc=0"
931 run_test 43 "mds osc import failure during recovery; don't LBUG"
933 test_44a() { # was test_44
936 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
937 [ "$mdcdev" ] || exit 2
939 # adaptive timeouts slow this way down
940 if at_is_valid && at_is_enabled; then
941 at_max_saved=$(at_max_get mds)
945 for i in `seq 1 10`; do
946 echo "$i of 10 ($(date +%s))"
947 do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
948 #define OBD_FAIL_TGT_CONN_RACE 0x701
949 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701"
950 $LCTL --device $mdcdev recover
953 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
954 [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds
957 run_test 44a "race in target handle connect"
960 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
961 [ "$mdcdev" ] || exit 2
962 for i in `seq 1 10`; do
963 echo "$i of 10 ($(date +%s))"
964 do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service"
965 #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704
966 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704"
967 $LCTL --device $mdcdev recover
970 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
973 run_test 44b "race in target handle connect"
975 # Handle failed close
977 mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'`
978 [ "$mdcdev" ] || exit 2
979 $LCTL --device $mdcdev recover
981 multiop_bg_pause $DIR/$tfile O_c || return 1
984 # This will cause the CLOSE to fail before even
985 # allocating a reply buffer
986 $LCTL --device $mdcdev deactivate || return 4
990 wait $pid || return 1
992 $LCTL --device $mdcdev activate || return 5
995 $CHECKSTAT -t file $DIR/$tfile || return 2
998 run_test 45 "Handle failed close"
1002 drop_reply "touch $DIR/$tfile"
1004 # ironically, the previous test, 45, will cause a real forced close,
1005 # so just look for one for this test
1006 dmesg | grep -i "force closing client file handle for $tfile" && return 1
1009 run_test 46 "Don't leak file handle after open resend (3325)"
1011 test_47() { # bug 2824
1012 # create some files to make sure precreate has been done on all
1013 # OSTs. (just in case this test is run independently)
1014 createmany -o $DIR/$tfile 20 || return 1
1016 # OBD_FAIL_OST_CREATE_NET 0x204
1018 do_facet ost1 "lctl set_param fail_loc=0x80000204"
1019 df $MOUNT || return 2
1021 # let the MDS discover the OST failure, attempt to recover, fail
1022 # and recover again.
1023 sleep $((3 * TIMEOUT))
1025 # Without 2824, this createmany would hang
1026 createmany -o $DIR/$tfile 20 || return 3
1027 unlinkmany $DIR/$tfile 20 || return 4
1029 do_facet ost1 "lctl set_param fail_loc=0"
1032 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
1035 replay_barrier $SINGLEMDS
1036 createmany -o $DIR/$tfile 20 || return 1
1037 # OBD_FAIL_OST_EROFS 0x216
1039 do_facet ost1 "lctl set_param fail_loc=0x80000216"
1040 df $MOUNT || return 2
1042 createmany -o $DIR/$tfile 20 20 || return 2
1043 unlinkmany $DIR/$tfile 40 || return 3
1045 do_facet ost1 "lctl set_param fail_loc=0"
1048 run_test 48 "MDS->OSC failure during precreate cleanup (2824)"
1051 local oscdev=`do_facet $SINGLEMDS lctl get_param -n devices | grep ${ost1_svc}-osc-MDT0000 | awk '{print $1}'`
1052 [ "$oscdev" ] || return 1
1053 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 2
1054 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 3
1055 # give the mds_lov_sync threads a chance to run
1058 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1060 # b3764 timed out lock replay
1063 cancel_lru_locks mdc
1065 multiop $DIR/$tfile s || return 1
1066 replay_barrier $SINGLEMDS
1067 #define OBD_FAIL_LDLM_REPLY 0x30c
1068 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c"
1069 fail $SINGLEMDS || return 2
1070 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1072 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1074 run_test 52 "time out lock replay (3764)"
1076 # bug 3462 - simultaneous MDC requests
1078 mkdir -p $DIR/${tdir}-1
1079 mkdir -p $DIR/${tdir}-2
1080 multiop $DIR/${tdir}-1/f O_c &
1082 # give multiop a change to open
1085 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1086 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1087 kill -USR1 $close_pid
1088 cancel_lru_locks mdc # force the close
1089 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1091 mcreate $DIR/${tdir}-2/f || return 1
1093 # close should still be here
1094 [ -d /proc/$close_pid ] || return 2
1096 replay_barrier_nodf $SINGLEMDS
1098 wait $close_pid || return 3
1100 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1101 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1102 rm -rf $DIR/${tdir}-*
1104 run_test 53a "|X| close request while two MDC requests in flight"
1107 mkdir -p $DIR/${tdir}-1
1108 mkdir -p $DIR/${tdir}-2
1109 multiop $DIR/${tdir}-1/f O_c &
1112 #define OBD_FAIL_MDS_REINT_NET 0x107
1113 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1114 mcreate $DIR/${tdir}-2/f &
1118 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1119 kill -USR1 $close_pid
1120 cancel_lru_locks mdc # force the close
1121 wait $close_pid || return 1
1122 # open should still be here
1123 [ -d /proc/$open_pid ] || return 2
1125 replay_barrier_nodf $SINGLEMDS
1127 wait $open_pid || return 3
1129 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1130 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1131 rm -rf $DIR/${tdir}-*
1133 run_test 53b "|X| open request while two MDC requests in flight"
1136 mkdir -p $DIR/${tdir}-1
1137 mkdir -p $DIR/${tdir}-2
1138 multiop $DIR/${tdir}-1/f O_c &
1141 #define OBD_FAIL_MDS_REINT_NET 0x107
1142 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1143 mcreate $DIR/${tdir}-2/f &
1147 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1148 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1149 kill -USR1 $close_pid
1150 cancel_lru_locks mdc # force the close
1152 replay_barrier_nodf $SINGLEMDS
1153 fail_nodf $SINGLEMDS
1154 wait $open_pid || return 1
1156 # close should be gone
1157 [ -d /proc/$close_pid ] && return 2
1158 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1160 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1161 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1162 rm -rf $DIR/${tdir}-*
1164 run_test 53c "|X| open request and close request while two MDC requests in flight"
1167 mkdir -p $DIR/${tdir}-1
1168 mkdir -p $DIR/${tdir}-2
1169 multiop $DIR/${tdir}-1/f O_c &
1171 # give multiop a chance to open
1174 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13f
1175 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013f"
1176 kill -USR1 $close_pid
1177 cancel_lru_locks mdc # force the close
1178 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1179 mcreate $DIR/${tdir}-2/f || return 1
1181 # close should still be here
1182 [ -d /proc/$close_pid ] || return 2
1184 wait $close_pid || return 3
1186 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1187 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1188 rm -rf $DIR/${tdir}-*
1190 run_test 53d "|X| close reply while two MDC requests in flight"
1193 mkdir -p $DIR/${tdir}-1
1194 mkdir -p $DIR/${tdir}-2
1195 multiop $DIR/${tdir}-1/f O_c &
1198 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1199 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1200 mcreate $DIR/${tdir}-2/f &
1204 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1205 kill -USR1 $close_pid
1206 cancel_lru_locks mdc # force the close
1207 wait $close_pid || return 1
1208 # open should still be here
1209 [ -d /proc/$open_pid ] || return 2
1211 replay_barrier_nodf $SINGLEMDS
1213 wait $open_pid || return 3
1215 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4
1216 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5
1217 rm -rf $DIR/${tdir}-*
1219 run_test 53e "|X| open reply while two MDC requests in flight"
1222 mkdir -p $DIR/${tdir}-1
1223 mkdir -p $DIR/${tdir}-2
1224 multiop $DIR/${tdir}-1/f O_c &
1227 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1228 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1229 mcreate $DIR/${tdir}-2/f &
1233 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13f
1234 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013f"
1235 kill -USR1 $close_pid
1236 cancel_lru_locks mdc # force the close
1238 replay_barrier_nodf $SINGLEMDS
1239 fail_nodf $SINGLEMDS
1240 wait $open_pid || return 1
1242 # close should be gone
1243 [ -d /proc/$close_pid ] && return 2
1244 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1246 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1247 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1248 rm -rf $DIR/${tdir}-*
1250 run_test 53f "|X| open reply and close reply while two MDC requests in flight"
1253 mkdir -p $DIR/${tdir}-1
1254 mkdir -p $DIR/${tdir}-2
1255 multiop $DIR/${tdir}-1/f O_c &
1258 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
1259 do_facet $SINGLEMDS "lctl set_param fail_loc=0x119"
1260 mcreate $DIR/${tdir}-2/f &
1264 #define OBD_FAIL_MDS_CLOSE_NET 0x115
1265 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115"
1266 kill -USR1 $close_pid
1267 cancel_lru_locks mdc # force the close
1269 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1270 replay_barrier_nodf $SINGLEMDS
1271 fail_nodf $SINGLEMDS
1272 wait $open_pid || return 1
1274 # close should be gone
1275 [ -d /proc/$close_pid ] && return 2
1277 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1278 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1279 rm -rf $DIR/${tdir}-*
1281 run_test 53g "|X| drop open reply and close request while close and open are both in flight"
1284 mkdir -p $DIR/${tdir}-1
1285 mkdir -p $DIR/${tdir}-2
1286 multiop $DIR/${tdir}-1/f O_c &
1289 #define OBD_FAIL_MDS_REINT_NET 0x107
1290 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107"
1291 mcreate $DIR/${tdir}-2/f &
1295 #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13f
1296 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013f"
1297 kill -USR1 $close_pid
1298 cancel_lru_locks mdc # force the close
1301 replay_barrier_nodf $SINGLEMDS
1302 fail_nodf $SINGLEMDS
1303 wait $open_pid || return 1
1305 # close should be gone
1306 [ -d /proc/$close_pid ] && return 2
1307 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1309 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1310 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4
1311 rm -rf $DIR/${tdir}-*
1313 run_test 53h "|X| open request and close reply while two MDC requests in flight"
1315 #b_cray 54 "|X| open request and close reply while two MDC requests in flight"
1317 #b3761 ASSERTION(hash != 0) failed
1319 # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
1320 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012b"
1322 # give touch a chance to run
1324 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1328 run_test 55 "let MDS_CHECK_RESENT return the original return code instead of 0"
1330 #b3440 ASSERTION(rec->ur_fid2->id) failed
1332 ln -s foo $DIR/$tfile
1333 replay_barrier $SINGLEMDS
1334 #drop_reply "cat $DIR/$tfile"
1338 run_test 56 "don't replay a symlink open request (3440)"
1340 #recovery one mds-ost setattr from llog
1342 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1343 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1345 replay_barrier $SINGLEMDS
1348 $CHECKSTAT -t file $DIR/$tfile || return 1
1349 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1352 run_test 57 "test recovery from llog for setattr op"
1354 #recovery many mds-ost setattr from llog
1357 #define OBD_FAIL_MDS_OST_SETATTR 0x12c
1358 do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c"
1359 createmany -o $DIR/$tdir/$tfile-%d 2500
1360 replay_barrier $SINGLEMDS
1363 $CHECKSTAT -t file $DIR/$tdir/$tfile-* >/dev/null || return 1
1364 do_facet $SINGLEMDS "lctl set_param fail_loc=0x0"
1365 unlinkmany $DIR/$tdir/$tfile-%d 2500
1368 run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)"
1371 mount_client $MOUNT2
1373 touch $DIR/$tdir/$tfile
1374 replay_barrier $SINGLEMDS
1375 setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile
1377 VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
1378 [ x$VAL = x"bar" ] || return 1
1379 rm -f $DIR/$tdir/$tfile
1381 zconf_umount `hostname` $MOUNT2
1383 run_test 58b "test replay of setxattr op"
1385 test_58c() { # bug 16570
1386 mount_client $MOUNT2
1388 touch $DIR/$tdir/$tfile
1389 drop_request "setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile" || \
1391 VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile`
1392 [ x$VAL = x"bar" ] || return 2
1393 drop_reint_reply "setfattr -n trusted.foo1 -v bar1 $DIR/$tdir/$tfile" || \
1395 VAL=`getfattr --absolute-names --only-value -n trusted.foo1 $MOUNT2/$tdir/$tfile`
1396 [ x$VAL = x"bar1" ] || return 4
1397 rm -f $DIR/$tdir/$tfile
1399 zconf_umount `hostname` $MOUNT2
1401 run_test 58c "resend/reconstruct setxattr op"
1403 # log_commit_thread vs filter_destroy race used to lead to import use after free
1407 createmany -o $DIR/$tdir/$tfile-%d 200
1409 unlinkmany $DIR/$tdir/$tfile-%d 200
1410 #define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507
1411 do_facet ost1 "lctl set_param fail_loc=0x507"
1414 do_facet ost1 "lctl set_param fail_loc=0x0"
1418 run_test 59 "test log_commit_thread vs filter_destroy race"
1420 # race between add unlink llog vs cat log init in post_recovery (only for b1_6)
1421 # bug 12086: should no oops and No ctxt error for this test
1424 createmany -o $DIR/$tdir/$tfile-%d 200
1425 replay_barrier $SINGLEMDS
1426 unlinkmany $DIR/$tdir/$tfile-%d 0 100
1428 unlinkmany $DIR/$tdir/$tfile-%d 100 100
1429 local no_ctxt=`dmesg | grep "No ctxt"`
1430 [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery"
1432 run_test 60 "test llog post recovery init vs llog unlink"
1434 #test race llog recovery thread vs llog cleanup
1435 test_61a() { # was test_61
1437 createmany -o $DIR/$tdir/$tfile-%d 800
1439 # OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221
1440 unlinkmany $DIR/$tdir/$tfile-%d 800
1441 set_nodes_failloc "$(osts_nodes)" 0x80000221
1446 set_nodes_failloc "$(osts_nodes)" 0x0
1448 $CHECKSTAT -t file $DIR/$tdir/$tfile-* && return 1
1451 run_test 61a "test race llog recovery vs llog cleanup"
1453 #test race mds llog sync vs llog cleanup
1455 # OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x140
1456 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000140"
1457 facet_failover $SINGLEMDS
1460 do_facet client dd if=/dev/zero of=$DIR/$tfile bs=4k count=1 || return 1
1462 run_test 61b "test race mds llog sync vs llog cleanup"
1464 #test race cancel cookie cb vs llog cleanup
1466 # OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222
1468 set_nodes_failloc "$(osts_nodes)" 0x80000222
1472 set_nodes_failloc "$(osts_nodes)" 0x0
1474 run_test 61c "test race mds llog sync vs llog cleanup"
1476 test_62() { # Bug 15756 - don't mis-drop resent replay
1478 replay_barrier $SINGLEMDS
1479 createmany -o $DIR/$tdir/$tfile- 25
1480 #define OBD_FAIL_TGT_REPLAY_DROP 0x706
1481 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000707"
1482 facet_failover $SINGLEMDS
1483 df $MOUNT || return 1
1484 do_facet $SINGLEMDS "lctl set_param fail_loc=0"
1485 unlinkmany $DIR/$tdir/$tfile- 25 || return 2
1488 run_test 62 "don't mis-drop resent replay"
1490 #Adaptive Timeouts (bug 3055)
1492 # Suppose that all osts have the same at_max
1493 for facet in mds client ost; do
1494 eval AT_MAX_SAVE_${facet}=$(at_max_get $facet)
1499 local at_max_new=600
1500 if ! at_is_valid; then
1501 skip "AT env is invalid"
1507 for facet in mds client ost; do
1508 at_max=$(at_max_get $facet)
1509 if [ $at_max -ne $at_max_new ]; then
1510 echo "AT value on $facet is $at_max, set it by force temporarily to $at_max_new"
1511 at_max_set $at_max_new $facet
1516 if [ -z "$ATOLDBASE" ]; then
1517 local at_history=$(do_facet mds "find /sys/ -name at_history")
1518 [ -z "$at_history" ] && skip "missing /sys/.../at_history " && return 1
1519 ATOLDBASE=$(do_facet mds "cat $at_history")
1520 # speed up the timebase so we can check decreasing AT
1521 do_facet mds "echo 8 >> $at_history"
1522 do_facet ost1 "echo 8 >> $at_history"
1524 # sleep for a while to cool down, should be > 8s and also allow
1525 # at least one ping to be sent. simply use TIMEOUT to be safe.
1530 test_65a() #bug 3055
1532 at_start || return 0
1533 $LCTL dk > /dev/null
1535 sysctl -w lnet.debug="+other"
1536 # Slow down a request to the current service time, this is critical
1537 # because previous tests may have caused this value to increase.
1538 REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts |
1539 awk '/portal 12/ {print $5}'`
1540 REQ_DELAY=$((${REQ_DELAY} + 5))
1542 do_facet mds lctl set_param fail_val=$((${REQ_DELAY} * 1000))
1543 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1544 do_facet mds sysctl -w lustre.fail_loc=0x8000050a
1545 createmany -o $DIR/$tfile 10 > /dev/null
1546 unlinkmany $DIR/$tfile 10 > /dev/null
1547 # check for log message
1548 $LCTL dk | grep "Early reply #" || error "No early reply"
1550 # client should show REQ_DELAY estimates
1551 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1553 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal
1555 run_test 65a "AT: verify early replies"
1557 test_65b() #bug 3055
1559 at_start || return 0
1562 sysctl -w lnet.debug="other trace"
1563 $LCTL dk > /dev/null
1564 # Slow down a request to the current service time, this is critical
1565 # because previous tests may have caused this value to increase.
1566 REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts |
1567 awk '/portal 6/ {print $5}'`
1568 REQ_DELAY=$((${REQ_DELAY} + 5))
1570 do_facet ost1 lctl set_param fail_val=${REQ_DELAY}
1571 #define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224
1572 do_facet ost1 sysctl -w lustre.fail_loc=0x224
1575 lfs setstripe $DIR/$tfile --index=0 --count=1
1576 # force some real bulk transfer
1577 multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c
1579 do_facet ost1 sysctl -w lustre.fail_loc=0
1580 # check for log message
1581 $LCTL dk | grep "Early reply #" || error "No early reply"
1583 # client should show REQ_DELAY estimates
1584 lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal
1586 run_test 65b "AT: verify early replies on packed reply / bulk"
1588 test_66a() #bug 3055
1590 at_start || return 0
1591 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1592 # adjust 5s at a time so no early reply is sent (within deadline)
1593 do_facet mds "sysctl -w lustre.fail_val=5000"
1594 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1595 do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
1596 createmany -o $DIR/$tfile 20 > /dev/null
1597 unlinkmany $DIR/$tfile 20 > /dev/null
1598 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1599 do_facet mds "sysctl -w lustre.fail_val=10000"
1600 do_facet mds "sysctl -w lustre.fail_loc=0x8000050a"
1601 createmany -o $DIR/$tfile 20 > /dev/null
1602 unlinkmany $DIR/$tfile 20 > /dev/null
1603 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1604 do_facet mds "sysctl -w lustre.fail_loc=0"
1606 createmany -o $DIR/$tfile 20 > /dev/null
1607 unlinkmany $DIR/$tfile 20 > /dev/null
1608 lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12"
1609 CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $5}')
1610 WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $7}')
1611 echo "Current MDT timeout $CUR, worst $WORST"
1612 [ $CUR -lt $WORST ] || error "Current $CUR should be less than worst $WORST"
1614 run_test 66a "AT: verify MDT service time adjusts with no early replies"
1616 test_66b() #bug 3055
1618 at_start || return 0
1619 ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
1620 sysctl -w lustre.fail_val=$(($ORIG + 5))
1621 #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c
1622 sysctl -w lustre.fail_loc=0x50c
1623 ls $DIR/$tfile > /dev/null 2>&1
1624 sysctl -w lustre.fail_loc=0
1625 CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}')
1626 WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}')
1627 echo "network timeout orig $ORIG, cur $CUR, worst $WORST"
1628 [ $WORST -gt $ORIG ] || error "Worst $WORST should be worse than orig $ORIG"
1630 run_test 66b "AT: verify net latency adjusts"
1632 test_67a() #bug 3055
1634 at_start || return 0
1635 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1636 # sleeping threads may drive values above this
1637 do_facet ost1 "sysctl -w lustre.fail_val=400"
1638 #define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a
1639 do_facet ost1 "sysctl -w lustre.fail_loc=0x50a"
1640 createmany -o $DIR/$tfile 20 > /dev/null
1641 unlinkmany $DIR/$tfile 20 > /dev/null
1642 do_facet ost1 "sysctl -w lustre.fail_loc=0"
1643 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1644 ATTEMPTS=$(($CONN2 - $CONN1))
1645 echo "$ATTEMPTS osc reconnect attemps on gradual slow"
1646 [ $ATTEMPTS -gt 0 ] && error_ignore 13721 "AT should have prevented reconnect"
1649 run_test 67a "AT: verify slow request processing doesn't induce reconnects"
1651 test_67b() #bug 3055
1653 at_start || return 0
1654 CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1655 #define OBD_FAIL_OST_PAUSE_CREATE 0x223
1656 do_facet ost1 "sysctl -w lustre.fail_val=20000"
1657 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
1658 cp /etc/profile $DIR/$tfile || error "cp failed"
1660 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1662 CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1663 ATTEMPTS=$(($CONN2 - $CONN1))
1664 echo "$ATTEMPTS osc reconnect attemps on instant slow"
1665 # do it again; should not timeout
1666 do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223"
1667 cp /etc/profile $DIR/$tfile || error "cp failed"
1668 do_facet ost1 "sysctl -w lustre.fail_loc=0"
1670 do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts"
1671 CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}')
1672 ATTEMPTS=$(($CONN3 - $CONN2))
1673 echo "$ATTEMPTS osc reconnect attemps on 2nd slow"
1674 [ $ATTEMPTS -gt 0 ] && error "AT should have prevented reconnect"
1677 run_test 67b "AT: verify instant slowdown doesn't induce reconnects"
1679 test_68 () #bug 13813
1681 at_start || return 0
1682 local ldlm_enqueue_min=$(find /sys -name ldlm_enqueue_min)
1683 [ -z "$ldlm_enqueue_min" ] && skip "missing /sys/.../ldlm_enqueue_min" && return 0
1684 local ENQ_MIN=$(cat $ldlm_enqueue_min)
1685 echo $TIMEOUT >> $ldlm_enqueue_min
1686 rm -f $DIR/${tfile}_[1-2]
1687 lfs setstripe $DIR/$tfile --index=0 --count=1
1688 #define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312
1689 sysctl -w lustre.fail_val=$(($TIMEOUT - 1))
1690 sysctl -w lustre.fail_loc=0x80000312
1691 cp /etc/profile $DIR/${tfile}_1 || error "1st cp failed $?"
1692 sysctl -w lustre.fail_val=$((TIMEOUT * 3 / 2))
1693 sysctl -w lustre.fail_loc=0x80000312
1694 cp /etc/profile $DIR/${tfile}_2 || error "2nd cp failed $?"
1695 sysctl -w lustre.fail_loc=0
1696 echo $ENQ_MIN >> $ldlm_enqueue_min
1699 run_test 68 "AT: verify slowing locks"
1701 if [ -n "$ATOLDBASE" ]; then
1702 at_history=$(do_facet mds "find /sys/ -name at_history")
1703 do_facet mds "echo $ATOLDBASE >> $at_history" || true
1704 do_facet ost1 "echo $ATOLDBASE >> $at_history" || true
1707 if [ $AT_MAX_SET -ne 0 ]; then
1708 for facet in mds client ost; do
1709 var=AT_MAX_SAVE_${facet}
1710 echo restore AT on $facet to saved value ${!var}
1711 at_max_set ${!var} $facet
1712 AT_NEW=$(at_max_get $facet)
1713 echo Restored AT value on $facet $AT_NEW
1714 [ $AT_NEW -ne ${!var} ] && \
1715 error "$facet : AT value was not restored SAVED ${!var} NEW $AT_NEW"
1719 # end of AT tests includes above lines
1722 # start multi-client tests
1724 [ -z "$CLIENTS" ] && \
1725 { skip "Need two or more clients." && return; }
1726 [ $CLIENTCOUNT -lt 2 ] && \
1727 { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
1729 echo "mount clients $CLIENTS ..."
1730 zconf_mount_clients $CLIENTS $DIR
1732 local clients=${CLIENTS//,/ }
1733 echo "Write/read files on $DIR ; clients $CLIENTS ... "
1734 for CLIENT in $clients; do
1735 do_node $CLIENT dd bs=1M count=10 if=/dev/zero \
1736 of=$DIR/${tfile}_${CLIENT} 2>/dev/null || \
1737 error "dd failed on $CLIENT"
1740 local prev_client=$(echo $clients | sed 's/^.* \(.\+\)$/\1/')
1741 for C in ${CLIENTS//,/ }; do
1742 do_node $prev_client dd if=$DIR/${tfile}_${C} of=/dev/null 2>/dev/null || \
1743 error "dd if=$DIR/${tfile}_${C} failed on $prev_client"
1749 run_test 70a "check multi client t-f"
1752 [ -z "$CLIENTS" ] && \
1753 { skip "Need two or more clients." && return; }
1754 [ $CLIENTCOUNT -lt 2 ] && \
1755 { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
1757 zconf_mount_clients $CLIENTS $DIR
1759 local duration="-t 60"
1760 local cmd="rundbench 1 $duration "
1762 for CLIENT in ${CLIENTS//,/ }; do
1763 $PDSH $CLIENT "set -x; PATH=:$PATH:$LUSTRE/utils:$LUSTRE/tests/:${DBENCH_LIB} DBENCH_LIB=${DBENCH_LIB} $cmd" &
1765 echo $PID >pid.$CLIENT
1766 echo "Started load PID=`cat pid.$CLIENT`"
1769 replay_barrier $SINGLEMDS
1770 sleep 3 # give clients a time to do operations
1772 log "$TESTNAME fail mds 1"
1775 # wait for client to reconnect to MDS
1778 for CLIENT in ${CLIENTS//,/ }; do
1779 PID=`cat pid.$CLIENT`
1782 echo "load on ${CLIENT} returned $rc"
1786 run_test 70b "mds recovery; $CLIENTCOUNT clients"
1787 # end multi-client tests
1790 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1794 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
1795 rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
1799 run_test 80a "CMD: unlink cross-node dir (fail mds with inode)"
1802 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1806 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed"
1807 rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed"
1811 run_test 80b "CMD: unlink cross-node dir (fail mds with name)"
1814 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1817 createmany -o $DIR/$tdir/f 3000 || error "createmany failed"
1819 $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed"
1820 $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed"
1822 rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed"
1824 stat $DIR/$tdir/f1002
1826 run_test 81a "CMD: unlink cross-node file (fail mds with name)"
1829 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1833 mkdir $dir || error "mkdir $dir failed"
1837 $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
1839 run_test 82a "CMD: mkdir cross-node dir (fail mds with inode)"
1842 [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
1846 mkdir $dir || error "mkdir $dir failed"
1850 $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed"
1852 run_test 82b "CMD: mkdir cross-node dir (fail mds with name)"
1854 equals_msg `basename $0`: test complete, cleaning up
1855 check_and_cleanup_lustre
1856 [ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true