6 # This test needs to be run on the client
9 LUSTRE=${LUSTRE:-`dirname $0`/..}
10 . $LUSTRE/tests/test-framework.sh
14 . ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
21 # 46 - The MDS will always have to force close the cached opens
24 if [ `using_krb5_sec $SECURITY` == 'n' ] ; then
25 ALWAYS_EXCEPT="0c $ALWAYS_EXCEPT"
32 if [ "$MDSCOUNT" -gt 1 ]; then
34 for mds in `mds_list`; do
35 MDSDEV=$TMP/${mds}-`hostname`
36 add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc
38 add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \
39 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
42 add_mds $SINGLEMDS --dev $MDSDEV --size $MDSSIZE
43 if [ ! -z "$$SINGLEMDSfailover_HOST" ]; then
44 add_mdsfailover $SINGLEMDS --dev $MDSDEV --size $MDSSIZE
46 add_lov lov1 $SINGLEMDS --stripe_sz $STRIPE_BYTES \
47 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
51 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
52 add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
53 add_client client $MDS --lov lov1 --path $MOUNT
59 # make sure we are using the primary MDS, so the config log will
60 # be able to clean up properly.
61 activemds=`facet_active $SINGLEMDS`
62 if [ $activemds != "$SINGLEMDS" ]; then
65 zconf_umount `hostname` $MOUNT
66 for mds in `mds_list`; do
67 stop $mds ${FORCE} $MDSLCONFARGS
69 stop ost2 ${FORCE} --dump cleanup.log
70 stop ost ${FORCE} --dump cleanup.log
75 if [ "$ONLY" == "cleanup" ]; then
76 sysctl -w portals.debug=0 || true
81 SETUP=${SETUP:-"setup"}
82 CLEANUP=${CLEANUP:-"cleanup"}
87 start_krb5_kdc || exit 1
88 start_lsvcgssd || exit 2
90 start ost --reformat $OSTLCONFARGS
91 start ost2 --reformat $OSTLCONFARGS
92 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
93 for mds in `mds_list`; do
94 start $mds --reformat $MDSLCONFARGS
96 grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT
101 if [ "$ONLY" == "setup" ]; then
108 replay_barrier $SINGLEMDS
111 run_test 0 "empty replay"
114 # this test attempts to trigger a race in the precreation code,
115 # and must run before any other objects are created on the filesystem
117 createmany -o $DIR/$tfile 20 || return 1
118 unlinkmany $DIR/$tfile 20 || return 2
120 run_test 0b "ensure object created after recover exists. (3284)"
123 if [ `using_krb5_sec $SECURITY` == 'n' ] ; then
124 echo "Skip 0c in non-gss mode"
127 # drop gss error notification
128 replay_barrier $SINGLEMDS
129 fail_drop $SINGLEMDS 0x760
131 # drop gss init request
132 replay_barrier $SINGLEMDS
133 fail_drop $SINGLEMDS 0x780
135 run_test 0c "empty replay with gss init failures"
138 replay_barrier $SINGLEMDS
141 $CHECKSTAT -t file $DIR/$tfile || return 1
144 run_test 1 "simple create"
147 replay_barrier $SINGLEMDS
150 $CHECKSTAT -t file $DIR/$tfile || return 1
156 ./mcreate $DIR/$tfile
157 replay_barrier $SINGLEMDS
160 $CHECKSTAT -t file $DIR/$tfile || return 1
166 replay_barrier $SINGLEMDS
168 o_directory $DIR/$tfile
170 $CHECKSTAT -t file $DIR/$tfile || return 2
173 run_test 3a "replay failed open(O_DIRECTORY)"
176 replay_barrier $SINGLEMDS
177 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
178 do_facet mds "sysctl -w lustre.fail_loc=0x80000114"
180 do_facet mds "sysctl -w lustre.fail_loc=0"
182 $CHECKSTAT -t file $DIR/$tfile && return 2
185 run_test 3b "replay failed open -ENOMEM"
188 replay_barrier $SINGLEMDS
189 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
190 do_facet mds "sysctl -w lustre.fail_loc=0x80000128"
192 do_facet mds "sysctl -w lustre.fail_loc=0"
195 $CHECKSTAT -t file $DIR/$tfile && return 2
198 run_test 3c "replay failed open -ENOMEM"
201 replay_barrier $SINGLEMDS
202 for i in `seq 10`; do
203 echo "tag-$i" > $DIR/$tfile-$i
206 for i in `seq 10`; do
207 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
210 run_test 4 "|x| 10 open(O_CREAT)s"
213 replay_barrier $SINGLEMDS
216 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
218 run_test 4b "|x| rm 10 files"
220 # The idea is to get past the first block of precreated files on both
221 # osts, and then replay.
223 replay_barrier $SINGLEMDS
224 for i in `seq 220`; do
225 echo "tag-$i" > $DIR/$tfile-$i
228 for i in `seq 220`; do
229 grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
233 # waiting for commitment of removal
235 run_test 5 "|x| 220 open(O_CREAT)"
239 replay_barrier $SINGLEMDS
241 mcreate $DIR/$tdir/$tfile
243 $CHECKSTAT -t dir $DIR/$tdir || return 1
244 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
246 # waiting for log process thread
248 run_test 6 "mkdir + contained create"
251 replay_barrier $SINGLEMDS
254 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
256 run_test 6b "|X| rmdir"
260 replay_barrier $SINGLEMDS
261 mcreate $DIR/$tdir/$tfile
263 $CHECKSTAT -t dir $DIR/$tdir || return 1
264 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
267 run_test 7 "mkdir |X| contained create"
270 replay_barrier $SINGLEMDS
271 multiop $DIR/$tfile mo_c &
276 $CHECKSTAT -t file $DIR/$tfile || return 1
277 kill -USR1 $MULTIPID || return 2
278 wait $MULTIPID || return 3
281 run_test 8 "creat open |X| close"
284 replay_barrier $SINGLEMDS
286 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
288 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
290 echo " old_inum == $old_inum, new_inum == $new_inum"
291 if [ $old_inum -eq $new_inum ] ;
293 echo " old_inum and new_inum match"
295 echo "!!!! old_inum and new_inum NOT match"
300 run_test 9 "|X| create (same inum/gen)"
304 replay_barrier $SINGLEMDS
305 mv $DIR/$tfile $DIR/$tfile-2
309 $CHECKSTAT $DIR/$tfile && return 1
310 $CHECKSTAT $DIR/$tfile-2 || return 2
314 run_test 10 "create |X| rename unlink"
318 echo "old" > $DIR/$tfile
319 mv $DIR/$tfile $DIR/$tfile-2
320 replay_barrier $SINGLEMDS
321 echo "new" > $DIR/$tfile
323 grep old $DIR/$tfile-2
325 grep new $DIR/$tfile || return 1
326 grep old $DIR/$tfile-2 || return 2
328 run_test 11 "create open write rename |X| create-old-name read"
332 multiop $DIR/$tfile o_tSc &
334 # give multiop a chance to open
337 replay_barrier $SINGLEMDS
339 wait $pid || return 1
342 [ -e $DIR/$tfile ] && return 2
345 run_test 12 "open, unlink |X| close"
348 # 1777 - replay open after committed chmod that would make
349 # a regular open a failure
352 multiop $DIR/$tfile O_wc &
354 # give multiop a chance to open
357 $CHECKSTAT -p 0 $DIR/$tfile
358 replay_barrier $SINGLEMDS
361 wait $pid || return 1
363 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
366 run_test 13 "open chmod 0 |x| write close"
369 multiop $DIR/$tfile O_tSc &
371 # give multiop a chance to open
374 replay_barrier $SINGLEMDS
375 kill -USR1 $pid || return 1
376 wait $pid || return 2
379 [ -e $DIR/$tfile ] && return 3
382 run_test 14 "open(O_CREAT), unlink |X| close"
385 multiop $DIR/$tfile O_tSc &
387 # give multiop a chance to open
390 replay_barrier $SINGLEMDS
391 touch $DIR/g11 || return 1
393 wait $pid || return 2
396 [ -e $DIR/$tfile ] && return 3
397 touch $DIR/h11 || return 4
400 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
404 replay_barrier $SINGLEMDS
407 mcreate $DIR/$tfile-2
409 [ -e $DIR/$tfile ] && return 1
410 [ -e $DIR/$tfile-2 ] || return 2
411 munlink $DIR/$tfile-2 || return 3
413 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
416 replay_barrier $SINGLEMDS
417 multiop $DIR/$tfile O_c &
419 # give multiop a chance to open
422 kill -USR1 $pid || return 1
423 wait $pid || return 2
424 $CHECKSTAT -t file $DIR/$tfile || return 3
427 run_test 17 "|X| open(O_CREAT), |replay| close"
430 replay_barrier $SINGLEMDS
431 multiop $DIR/$tfile O_tSc &
433 # give multiop a chance to open
436 touch $DIR/$tfile-2 || return 1
437 echo "pid: $pid will close"
439 wait $pid || return 2
442 [ -e $DIR/$tfile ] && return 3
443 [ -e $DIR/$tfile-2 ] || return 4
444 # this touch frequently fails
445 touch $DIR/$tfile-3 || return 5
446 munlink $DIR/$tfile-2 || return 6
447 munlink $DIR/$tfile-3 || return 7
450 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
452 # bug 1855 (a simpler form of test_11 above)
454 replay_barrier $SINGLEMDS
456 echo "old" > $DIR/$tfile
457 mv $DIR/$tfile $DIR/$tfile-2
458 grep old $DIR/$tfile-2
460 grep old $DIR/$tfile-2 || return 2
462 run_test 19 "|X| mcreate, open, write, rename "
465 replay_barrier $SINGLEMDS
466 multiop $DIR/$tfile O_tSc &
468 # give multiop a chance to open
474 wait $pid || return 1
475 [ -e $DIR/$tfile ] && return 2
478 run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
481 replay_barrier $SINGLEMDS
482 multiop $DIR/$tfile O_tSc &
484 # give multiop a chance to open
487 touch $DIR/g11 || return 1
491 wait $pid || return 2
492 [ -e $DIR/$tfile ] && return 3
493 touch $DIR/h11 || return 4
496 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
499 multiop $DIR/$tfile O_tSc &
501 # give multiop a chance to open
504 replay_barrier $SINGLEMDS
509 wait $pid || return 1
510 [ -e $DIR/$tfile ] && return 2
513 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
516 multiop $DIR/$tfile O_tSc &
518 # give multiop a chance to open
521 replay_barrier $SINGLEMDS
523 touch $DIR/g11 || return 1
527 wait $pid || return 2
528 [ -e $DIR/$tfile ] && return 3
529 touch $DIR/h11 || return 4
532 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
535 multiop $DIR/$tfile O_tSc &
537 # give multiop a chance to open
540 replay_barrier $SINGLEMDS
544 wait $pid || return 1
545 [ -e $DIR/$tfile ] && return 2
548 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
551 multiop $DIR/$tfile O_tSc &
553 # give multiop a chance to open
557 replay_barrier $SINGLEMDS
560 wait $pid || return 1
561 [ -e $DIR/$tfile ] && return 2
564 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
567 replay_barrier $SINGLEMDS
568 multiop $DIR/$tfile-1 O_tSc &
570 multiop $DIR/$tfile-2 O_tSc &
572 # give multiop a chance to open
577 wait $pid2 || return 1
581 wait $pid1 || return 2
582 [ -e $DIR/$tfile-1 ] && return 3
583 [ -e $DIR/$tfile-2 ] && return 4
586 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
589 replay_barrier $SINGLEMDS
590 multiop $DIR/$tfile-1 O_tSc &
592 multiop $DIR/$tfile-2 O_tSc &
594 # give multiop a chance to open
601 wait $pid1 || return 1
603 wait $pid2 || return 2
604 [ -e $DIR/$tfile-1 ] && return 3
605 [ -e $DIR/$tfile-2 ] && return 4
608 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
611 multiop $DIR/$tfile-1 O_tSc &
613 multiop $DIR/$tfile-2 O_tSc &
615 # give multiop a chance to open
617 replay_barrier $SINGLEMDS
621 wait $pid2 || return 1
625 wait $pid1 || return 2
626 [ -e $DIR/$tfile-1 ] && return 3
627 [ -e $DIR/$tfile-2 ] && return 4
630 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
633 multiop $DIR/$tfile-1 O_tSc &
635 multiop $DIR/$tfile-2 O_tSc &
637 # give multiop a chance to open
639 replay_barrier $SINGLEMDS
645 wait $pid1 || return 1
647 wait $pid2 || return 2
648 [ -e $DIR/$tfile-1 ] && return 3
649 [ -e $DIR/$tfile-2 ] && return 4
652 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
655 multiop $DIR/$tfile-1 O_tSc &
657 multiop $DIR/$tfile-2 O_tSc &
659 # give multiop a chance to open
664 replay_barrier $SINGLEMDS
667 wait $pid1 || return 1
669 wait $pid2 || return 2
670 [ -e $DIR/$tfile-1 ] && return 3
671 [ -e $DIR/$tfile-2 ] && return 4
674 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
677 multiop $DIR/$tfile-1 O_tSc &
679 multiop $DIR/$tfile-2 O_tSc &
681 # give multiop a chance to open
685 replay_barrier $SINGLEMDS
689 wait $pid1 || return 1
691 wait $pid2 || return 2
692 [ -e $DIR/$tfile-1 ] && return 3
693 [ -e $DIR/$tfile-2 ] && return 4
696 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
698 # tests for bug 2104; completion without crashing is success. The close is
699 # stale, but we always return 0 for close, so the app never sees it.
701 multiop $DIR/$tfile O_c &
703 multiop $DIR/$tfile O_c &
705 # give multiop a chance to open.
706 # 1 second is not enough, I increased it to 5, however in ideal word
707 # I should have to wait for open finish in more smart manner. --umka
710 df $MOUNT || sleep 1 && df $MOUNT || return 1
716 run_test 32 "close() notices client eviction; close() after client eviction"
718 # Abort recovery before client complete
720 replay_barrier $SINGLEMDS
722 fail_abort $SINGLEMDS
723 # this file should be gone, because the replay was aborted
724 $CHECKSTAT -t file $DIR/$tfile && return 1
727 run_test 33 "abort recovery before client does replay"
730 multiop $DIR/$tfile O_c &
732 # give multiop a chance to open
736 replay_barrier $SINGLEMDS
737 fail_abort $SINGLEMDS
739 [ -e $DIR/$tfile ] && return 1
743 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
745 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
749 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
750 do_facet mds "sysctl -w lustre.fail_loc=0x80000119"
755 # give a chance to remove from MDS
756 fail_abort $SINGLEMDS
757 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
759 run_test 35 "test recovery from llog for unlink op"
761 # b=2432 resent cancel after replay uses wrong cookie,
762 # so don't resend cancels
764 replay_barrier $SINGLEMDS
766 checkstat $DIR/$tfile
767 facet_failover $SINGLEMDS
769 if dmesg | grep "unknown lock cookie"; then
770 echo "cancel after replay failed"
774 run_test 36 "don't resend cancel"
777 # directory orphans can't be unlinked from PENDING directory
779 rmdir $DIR/$tfile 2>/dev/null
780 multiop $DIR/$tfile dD_c &
782 # give multiop a chance to open
786 replay_barrier $SINGLEMDS
787 # clear the dmesg buffer so we only see errors from this recovery
789 fail_abort $SINGLEMDS
791 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
795 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
798 createmany -o $DIR/$tfile-%d 800
799 unlinkmany $DIR/$tfile-%d 0 400
800 replay_barrier $SINGLEMDS
802 unlinkmany $DIR/$tfile-%d 400 400
804 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
806 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
809 createmany -o $DIR/$tfile-%d 800
810 replay_barrier $SINGLEMDS
811 unlinkmany $DIR/$tfile-%d 0 400
813 unlinkmany $DIR/$tfile-%d 400 400
815 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
817 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
820 cat /proc/fs/lustre/osc/*/stats |
821 awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
826 $LCTL mark multiop $MOUNT/$tfile OS_c
827 multiop $MOUNT/$tfile OS_c &
829 writeme -s $MOUNT/${tfile}-2 &
832 facet_failover $SINGLEMDS
833 #define OBD_FAIL_MDS_CONNECT_NET 0x117
834 do_facet mds "sysctl -w lustre.fail_loc=0x80000117"
836 stat1=`count_ost_writes`
838 stat2=`count_ost_writes`
839 echo "$stat1, $stat2"
840 if [ $stat1 -lt $stat2 ]; then
841 echo "writes continuing during recovery"
844 echo "writes not continuing during recovery, bug 2477"
847 echo "waiting for writeme $WRITE_PID"
851 echo "waiting for multiop $PID"
852 wait $PID || return 2
853 do_facet client munlink $MOUNT/$tfile || return 3
854 do_facet client munlink $MOUNT/${tfile}-2 || return 3
857 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
861 # make sure that a read to one osc doesn't try to double-unlock its page just
862 # because another osc is invalid. trigger_group_io used to mistakenly return
863 # an error if any oscs were invalid even after having successfully put rpcs
864 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
865 # the page, guarnateeing that the unlock from the RPC completion would
866 # assert on trying to unlock the unlocked page.
868 local f=$MOUNT/$tfile
869 # make sure the start of the file is ost1
870 lfs setstripe $f $((128 * 1024)) 0 0
871 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
873 # fail ost2 and read from ost1
874 local osc2_dev=`$LCTL device_list | \
875 awk '(/ost2.*client_facet/){print $4}' `
876 $LCTL --device %$osc2_dev deactivate
877 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
878 $LCTL --device %$osc2_dev activate
881 run_test 41 "read from a valid osc while other oscs are invalid"
883 # test MDS recovery after ost failure
885 blocks=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
886 createmany -o $DIR/$tfile-%d 800
888 unlinkmany $DIR/$tfile-%d 0 400
891 # osc is evicted, fs is smaller
892 blocks_after=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
893 [ $blocks_after -lt $blocks ] || return 1
894 echo wait for MDS to timeout and recover
895 sleep $((TIMEOUT * 2))
896 unlinkmany $DIR/$tfile-%d 400 400
897 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
899 run_test 42 "recovery after ost failure"
902 # timeout in MDS/OST recovery RPC will LBUG MDS
904 replay_barrier $SINGLEMDS
906 # OBD_FAIL_OST_CREATE_NET 0x204
907 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
908 facet_failover $SINGLEMDS
909 df $MOUNT || return 1
911 do_facet ost "sysctl -w lustre.fail_loc=0"
915 run_test 43 "mds osc import failure during recovery; don't LBUG"
918 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
919 do_facet mds "sysctl -w lustre.fail_loc=0x80000701"
920 $LCTL --device $mdcdev recover
922 do_facet mds "sysctl -w lustre.fail_loc=0"
925 run_test 44 "race in target handle connect"
927 # Handle failed close
929 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
930 $LCTL --device $mdcdev recover
932 multiop $DIR/$tfile O_c &
936 # This will cause the CLOSE to fail before even
937 # allocating a reply buffer
938 $LCTL --device $mdcdev deactivate
942 wait $pid || return 1
944 $LCTL --device $mdcdev activate
947 $CHECKSTAT -t file $DIR/$tfile || return 2
950 run_test 45 "Handle failed close"
954 drop_reply "touch $DIR/$tfile"
956 # ironically, the previous test, 45, will cause a real forced close,
957 # so just look for one for this test
958 dmesg | grep -i "force closing client file handle for $tfile" && return 1
961 run_test 46 "Don't leak file handle after open resend (3325)"
966 # create some files to make sure precreate has been done on all
967 # OSTs. (just in case this test is run independently)
968 createmany -o $DIR/$tfile 20 || return 1
970 # OBD_FAIL_OST_CREATE_NET 0x204
972 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
973 df $MOUNT || return 2
975 # let the MDS discover the OST failure, attempt to recover, fail
977 sleep $((3 * TIMEOUT))
979 # Without 2824, this createmany would hang
980 createmany -o $DIR/$tfile 20 || return 3
981 unlinkmany $DIR/$tfile 20 || return 4
983 do_facet ost "sysctl -w lustre.fail_loc=0"
986 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
990 createmany -o $DIR/${tfile}- 100
991 $CHECKSTAT $DIR/${tfile}-99 || return 1
993 df $MOUNT || echo "first df failed"
995 df $MOUNT || return 2
997 $CHECKSTAT $DIR/${tfile}-99 || return 3
1000 replay_barrier $SINGLEMDS
1002 unlinkmany $DIR/${tfile}- 100 || return 4
1003 if dmesg | grep "back in time"; then
1004 echo "server went back in time!"
1009 run_test 48 "Don't lose transno when client is evicted (2525)"
1011 # b=3550 - replay of unlink
1013 replay_barrier $SINGLEMDS
1014 createmany -o $DIR/$tfile-%d 400 || return 1
1015 unlinkmany $DIR/$tfile-%d 0 400 || return 2
1017 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1019 run_test 49 "re-write records to llog as written during fail"
1022 local osc_dev=`$LCTL device_list | \
1023 awk '(/ost_svc_$SINGLEMDS_svc/){print $4}' `
1024 $LCTL --device %$osc_dev recover && $LCTL --device %$osc_dev recover
1025 # give the mds_lov_sync threads a chance to run
1028 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1030 # bug 3462 - simultaneous MDC requests
1032 replay_barrier_nodf $SINGLEMDS
1033 mkdir -p $DIR/${tdir}-1
1034 mkdir -p $DIR/${tdir}-2
1035 touch $DIR/${tdir}-2/f
1036 multiop $DIR/${tdir}-1/f O_c &
1038 # give multiop a chance to open
1041 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
1043 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1044 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 1
1048 wait $pid || return 2
1049 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1050 rm -rf $DIR/${tdir}-*
1052 run_test 51a "|X| close request while two MDC requests in flight"
1055 replay_barrier_nodf $SINGLEMDS
1056 mkdir -p $DIR/$tdir-1
1057 mkdir -p $DIR/$tdir-2
1058 multiop $DIR/$tdir-1/f O_c &
1060 # give multiop a chance to open
1063 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
1064 touch $DIR/${tdir}-2/f &
1066 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1069 wait $pid || return 1
1073 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1074 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1075 rm -rf $DIR/${tdir}-*
1077 run_test 51b "|X| open request while two MDC requests in flight"
1080 replay_barrier_nodf $SINGLEMDS
1081 mkdir -p $DIR/${tdir}-1
1082 mkdir -p $DIR/${tdir}-2
1083 multiop $DIR/${tdir}-1/f O_c &
1085 # give multiop a chance to open
1088 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
1089 touch $DIR/${tdir}-2/f &
1090 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1092 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
1094 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1098 wait $pid || return 1
1099 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1100 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1101 rm -rf $DIR/${tdir}-*
1103 run_test 51c "|X| open request and close request while two MDC requests in flight"
1106 replay_barrier_nodf $SINGLEMDS
1107 mkdir -p $DIR/${tdir}-1
1108 mkdir -p $DIR/${tdir}-2
1109 touch $DIR/${tdir}-2/f
1110 multiop $DIR/${tdir}-1/f O_c &
1112 # give multiop a chance to open
1115 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
1117 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1118 #$CHECKSTAT -t file $DIR/${tdir}-2/f || return 1
1122 wait $pid || return 2
1123 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1124 rm -rf $DIR/${tdir}-*
1126 run_test 51d "|X| close reply while two MDC requests in flight"
1129 replay_barrier_nodf $SINGLEMDS
1130 mkdir -p $DIR/$tdir-1
1131 mkdir -p $DIR/$tdir-2
1132 multiop $DIR/$tdir-1/f O_c &
1134 # give multiop a chance to open
1137 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
1138 touch $DIR/${tdir}-2/f &
1140 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1143 wait $pid || return 1
1147 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1148 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1149 rm -rf $DIR/${tdir}-*
1151 run_test 51e "|X| open reply while two MDC requests in flight"
1154 replay_barrier_nodf $SINGLEMDS
1155 mkdir -p $DIR/${tdir}-1
1156 mkdir -p $DIR/${tdir}-2
1157 multiop $DIR/${tdir}-1/f O_c &
1159 # give multiop a chance to open
1162 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
1163 touch $DIR/${tdir}-2/f &
1164 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1166 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
1168 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1172 wait $pid || return 1
1173 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1174 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1175 rm -rf $DIR/${tdir}-*
1177 run_test 51f "|X| open reply and close reply while two MDC requests in flight"
1180 replay_barrier_nodf $SINGLEMDS
1181 mkdir -p $DIR/${tdir}-1
1182 mkdir -p $DIR/${tdir}-2
1183 multiop $DIR/${tdir}-1/f O_c &
1185 # give multiop a chance to open
1188 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
1189 touch $DIR/${tdir}-2/f &
1190 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1192 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
1194 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1198 wait $pid || return 1
1199 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1200 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1201 rm -rf $DIR/${tdir}-*
1203 run_test 51g "|X| open reply and close request while two MDC requests in flight"
1206 replay_barrier_nodf $SINGLEMDS
1207 mkdir -p $DIR/${tdir}-1
1208 mkdir -p $DIR/${tdir}-2
1209 multiop $DIR/${tdir}-1/f O_c &
1211 # give multio:wp a chance to open
1214 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
1215 touch $DIR/${tdir}-2/f &
1216 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1218 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
1220 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1224 wait $pid || return 1
1225 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1226 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1227 rm -rf $DIR/${tdir}-*
1229 run_test 51h "|X| open request and close reply while two MDC requests in flight"
1231 # b3764 timed out lock replay
1234 cancel_lru_locks MDC
1236 multiop $DIR/$tfile s
1237 replay_barrier $SINGLEMDS
1238 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000030c"
1240 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0"
1242 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1244 run_test 52 "time out lock replay (3764)"
1247 replay_barrier_nodf $SINGLEMDS
1254 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
1256 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1261 run_test 53 "|X| open request and close reply while two MDC requests in flight"
1264 replay_barrier $SINGLEMDS
1265 createmany -o $DIR/$tfile 20
1266 unlinkmany $DIR/$tfile 20
1269 run_test 54 "|X| open request and close reply while two MDC requests in flight"
1271 #b3440 ASSERTION(rec->ur_fid2->id) failed
1273 sysctl -w portals.debug=-1 portals.debug_mb=25
1274 ln -s foo $DIR/$tfile
1275 replay_barrier $SINGLEMDS
1276 #drop_reply "cat $DIR/$tfile"
1280 run_test 55 "don't replay a symlink open request (3440)"
1282 #b3761 ASSERTION(hash != 0) failed
1284 # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
1285 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012b"
1288 # give a chance for touch to run
1290 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0"
1291 wait $pid || return 1
1295 run_test 56 "let MDS_CHECK_RESENT return the original return code instead of 0"
1297 equals_msg test complete, cleaning up