6 # This test needs to be run on the client
9 LUSTRE=${LUSTRE:-`dirname $0`/..}
10 . $LUSTRE/tests/test-framework.sh
14 . ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
21 # 46 - The MDS will always have to force close the cached opens
24 if [ `using_krb5_sec $SECURITY` == 'n' ] ; then
25 ALWAYS_EXCEPT="0c $ALWAYS_EXCEPT"
32 if [ "$MDSCOUNT" -gt 1 ]; then
34 for mds in `mds_list`; do
35 MDSDEV=$TMP/${mds}-`hostname`
36 add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc
38 add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \
39 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
42 add_mds $SINGLEMDS --dev $MDSDEV --size $MDSSIZE
43 if [ ! -z "$$SINGLEMDSfailover_HOST" ]; then
44 add_mdsfailover $SINGLEMDS --dev $MDSDEV --size $MDSSIZE
46 add_lov lov1 $SINGLEMDS --stripe_sz $STRIPE_BYTES \
47 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
51 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
52 add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
53 add_client client $MDS --lov lov1 --path $MOUNT
59 # make sure we are using the primary MDS, so the config log will
60 # be able to clean up properly.
61 activemds=`facet_active $SINGLEMDS`
62 if [ $activemds != "$SINGLEMDS" ]; then
65 zconf_umount `hostname` $MOUNT
66 for mds in `mds_list`; do
67 stop $mds ${FORCE} $MDSLCONFARGS
71 stop ost2 ${FORCE} --dump cleanup.log
72 stop ost ${FORCE} --dump cleanup.log
75 if [ "$ONLY" == "cleanup" ]; then
76 sysctl -w portals.debug=0 || true
81 SETUP=${SETUP:-"setup"}
82 CLEANUP=${CLEANUP:-"cleanup"}
87 start_krb5_kdc || exit 1
88 start ost --reformat $OSTLCONFARGS
89 start ost2 --reformat $OSTLCONFARGS
90 start_lsvcgssd || exit 2
92 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
93 for mds in `mds_list`; do
94 start $mds --reformat $MDSLCONFARGS
96 grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT
101 if [ "$ONLY" == "setup" ]; then
108 replay_barrier $SINGLEMDS
111 run_test 0 "empty replay"
114 # this test attempts to trigger a race in the precreation code,
115 # and must run before any other objects are created on the filesystem
117 createmany -o $DIR/$tfile 20 || return 1
118 unlinkmany $DIR/$tfile 20 || return 2
120 run_test 0b "ensure object created after recover exists. (3284)"
123 if [ `using_krb5_sec $SECURITY` == 'n' ] ; then
124 echo "Skip 0c in non-gss mode"
127 # drop gss error notification
128 replay_barrier $SINGLEMDS
129 fail_drop $SINGLEMDS 0x760
131 # drop gss init request
132 replay_barrier $SINGLEMDS
133 fail_drop $SINGLEMDS 0x780
135 run_test 0c "empty replay with gss init failures"
138 replay_barrier $SINGLEMDS
141 $CHECKSTAT -t file $DIR/$tfile || return 1
144 run_test 1 "simple create"
147 replay_barrier $SINGLEMDS
150 $CHECKSTAT -t file $DIR/$tfile || return 1
156 ./mcreate $DIR/$tfile
157 replay_barrier $SINGLEMDS
160 $CHECKSTAT -t file $DIR/$tfile || return 1
166 replay_barrier $SINGLEMDS
168 o_directory $DIR/$tfile
170 $CHECKSTAT -t file $DIR/$tfile || return 2
173 run_test 3a "replay failed open(O_DIRECTORY)"
176 replay_barrier $SINGLEMDS
177 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
178 do_facet mds "sysctl -w lustre.fail_loc=0x80000114"
180 do_facet mds "sysctl -w lustre.fail_loc=0"
182 $CHECKSTAT -t file $DIR/$tfile && return 2
185 run_test 3b "replay failed open -ENOMEM"
188 replay_barrier $SINGLEMDS
189 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
190 do_facet mds "sysctl -w lustre.fail_loc=0x80000128"
192 do_facet mds "sysctl -w lustre.fail_loc=0"
195 $CHECKSTAT -t file $DIR/$tfile && return 2
198 run_test 3c "replay failed open -ENOMEM"
201 replay_barrier $SINGLEMDS
202 for i in `seq 10`; do
203 echo "tag-$i" > $DIR/$tfile-$i
206 for i in `seq 10`; do
207 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
210 run_test 4 "|x| 10 open(O_CREAT)s"
213 replay_barrier $SINGLEMDS
216 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
218 run_test 4b "|x| rm 10 files"
220 # The idea is to get past the first block of precreated files on both
221 # osts, and then replay.
223 replay_barrier $SINGLEMDS
224 for i in `seq 220`; do
225 echo "tag-$i" > $DIR/$tfile-$i
228 for i in `seq 220`; do
229 grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
233 # waiting for commitment of removal
235 run_test 5 "|x| 220 open(O_CREAT)"
239 replay_barrier $SINGLEMDS
241 mcreate $DIR/$tdir/$tfile
243 $CHECKSTAT -t dir $DIR/$tdir || return 1
244 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
246 # waiting for log process thread
248 run_test 6 "mkdir + contained create"
251 replay_barrier $SINGLEMDS
254 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
256 run_test 6b "|X| rmdir"
260 replay_barrier $SINGLEMDS
261 mcreate $DIR/$tdir/$tfile
263 $CHECKSTAT -t dir $DIR/$tdir || return 1
264 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
267 run_test 7 "mkdir |X| contained create"
270 replay_barrier $SINGLEMDS
271 multiop $DIR/$tfile mo_c &
276 $CHECKSTAT -t file $DIR/$tfile || return 1
277 kill -USR1 $MULTIPID || return 2
278 wait $MULTIPID || return 3
281 run_test 8 "creat open |X| close"
284 replay_barrier $SINGLEMDS
286 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
288 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
290 echo " old_inum == $old_inum, new_inum == $new_inum"
291 if [ $old_inum -eq $new_inum ] ;
293 echo " old_inum and new_inum match"
295 echo "!!!! old_inum and new_inum NOT match"
300 run_test 9 "|X| create (same inum/gen)"
304 replay_barrier $SINGLEMDS
305 mv $DIR/$tfile $DIR/$tfile-2
309 $CHECKSTAT $DIR/$tfile && return 1
310 $CHECKSTAT $DIR/$tfile-2 || return 2
314 run_test 10 "create |X| rename unlink"
318 echo "old" > $DIR/$tfile
319 mv $DIR/$tfile $DIR/$tfile-2
320 replay_barrier $SINGLEMDS
321 echo "new" > $DIR/$tfile
323 grep old $DIR/$tfile-2
325 grep new $DIR/$tfile || return 1
326 grep old $DIR/$tfile-2 || return 2
328 run_test 11 "create open write rename |X| create-old-name read"
332 multiop $DIR/$tfile o_tSc &
334 # give multiop a chance to open
337 replay_barrier $SINGLEMDS
339 wait $pid || return 1
342 [ -e $DIR/$tfile ] && return 2
345 run_test 12 "open, unlink |X| close"
348 # 1777 - replay open after committed chmod that would make
349 # a regular open a failure
352 multiop $DIR/$tfile O_wc &
354 # give multiop a chance to open
357 $CHECKSTAT -p 0 $DIR/$tfile
358 replay_barrier $SINGLEMDS
361 wait $pid || return 1
363 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
366 run_test 13 "open chmod 0 |x| write close"
369 multiop $DIR/$tfile O_tSc &
371 # give multiop a chance to open
374 replay_barrier $SINGLEMDS
375 kill -USR1 $pid || return 1
376 wait $pid || return 2
379 [ -e $DIR/$tfile ] && return 3
382 run_test 14 "open(O_CREAT), unlink |X| close"
385 multiop $DIR/$tfile O_tSc &
387 # give multiop a chance to open
390 replay_barrier $SINGLEMDS
391 touch $DIR/g11 || return 1
393 wait $pid || return 2
396 [ -e $DIR/$tfile ] && return 3
397 touch $DIR/h11 || return 4
400 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
404 replay_barrier $SINGLEMDS
407 mcreate $DIR/$tfile-2
409 [ -e $DIR/$tfile ] && return 1
410 [ -e $DIR/$tfile-2 ] || return 2
411 munlink $DIR/$tfile-2 || return 3
413 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
416 replay_barrier $SINGLEMDS
417 multiop $DIR/$tfile O_c &
419 # give multiop a chance to open
422 kill -USR1 $pid || return 1
423 wait $pid || return 2
424 $CHECKSTAT -t file $DIR/$tfile || return 3
427 run_test 17 "|X| open(O_CREAT), |replay| close"
430 replay_barrier $SINGLEMDS
431 multiop $DIR/$tfile O_tSc &
433 # give multiop a chance to open
436 touch $DIR/$tfile-2 || return 1
437 echo "pid: $pid will close"
439 wait $pid || return 2
442 [ -e $DIR/$tfile ] && return 3
443 [ -e $DIR/$tfile-2 ] || return 4
444 # this touch frequently fails
445 touch $DIR/$tfile-3 || return 5
446 munlink $DIR/$tfile-2 || return 6
447 munlink $DIR/$tfile-3 || return 7
450 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
452 # bug 1855 (a simpler form of test_11 above)
454 replay_barrier $SINGLEMDS
456 echo "old" > $DIR/$tfile
457 mv $DIR/$tfile $DIR/$tfile-2
458 grep old $DIR/$tfile-2
460 grep old $DIR/$tfile-2 || return 2
462 run_test 19 "|X| mcreate, open, write, rename "
465 replay_barrier $SINGLEMDS
466 multiop $DIR/$tfile O_tSc &
468 # give multiop a chance to open
474 wait $pid || return 1
475 [ -e $DIR/$tfile ] && return 2
478 run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
481 replay_barrier $SINGLEMDS
482 multiop $DIR/$tfile O_tSc &
484 # give multiop a chance to open
487 touch $DIR/g11 || return 1
491 wait $pid || return 2
492 [ -e $DIR/$tfile ] && return 3
493 touch $DIR/h11 || return 4
496 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
499 multiop $DIR/$tfile O_tSc &
501 # give multiop a chance to open
504 replay_barrier $SINGLEMDS
509 wait $pid || return 1
510 [ -e $DIR/$tfile ] && return 2
513 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
516 multiop $DIR/$tfile O_tSc &
518 # give multiop a chance to open
521 replay_barrier $SINGLEMDS
523 touch $DIR/g11 || return 1
527 wait $pid || return 2
528 [ -e $DIR/$tfile ] && return 3
529 touch $DIR/h11 || return 4
532 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
535 multiop $DIR/$tfile O_tSc &
537 # give multiop a chance to open
540 replay_barrier $SINGLEMDS
544 wait $pid || return 1
545 [ -e $DIR/$tfile ] && return 2
548 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
551 multiop $DIR/$tfile O_tSc &
553 # give multiop a chance to open
557 replay_barrier $SINGLEMDS
560 wait $pid || return 1
561 [ -e $DIR/$tfile ] && return 2
564 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
567 replay_barrier $SINGLEMDS
568 multiop $DIR/$tfile-1 O_tSc &
570 multiop $DIR/$tfile-2 O_tSc &
572 # give multiop a chance to open
577 wait $pid2 || return 1
581 wait $pid1 || return 2
582 [ -e $DIR/$tfile-1 ] && return 3
583 [ -e $DIR/$tfile-2 ] && return 4
586 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
589 replay_barrier $SINGLEMDS
590 multiop $DIR/$tfile-1 O_tSc &
592 multiop $DIR/$tfile-2 O_tSc &
594 # give multiop a chance to open
601 wait $pid1 || return 1
603 wait $pid2 || return 2
604 [ -e $DIR/$tfile-1 ] && return 3
605 [ -e $DIR/$tfile-2 ] && return 4
608 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
611 multiop $DIR/$tfile-1 O_tSc &
613 multiop $DIR/$tfile-2 O_tSc &
615 # give multiop a chance to open
617 replay_barrier $SINGLEMDS
621 wait $pid2 || return 1
625 wait $pid1 || return 2
626 [ -e $DIR/$tfile-1 ] && return 3
627 [ -e $DIR/$tfile-2 ] && return 4
630 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
633 multiop $DIR/$tfile-1 O_tSc &
635 multiop $DIR/$tfile-2 O_tSc &
637 # give multiop a chance to open
639 replay_barrier $SINGLEMDS
645 wait $pid1 || return 1
647 wait $pid2 || return 2
648 [ -e $DIR/$tfile-1 ] && return 3
649 [ -e $DIR/$tfile-2 ] && return 4
652 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
655 multiop $DIR/$tfile-1 O_tSc &
657 multiop $DIR/$tfile-2 O_tSc &
659 # give multiop a chance to open
664 replay_barrier $SINGLEMDS
667 wait $pid1 || return 1
669 wait $pid2 || return 2
670 [ -e $DIR/$tfile-1 ] && return 3
671 [ -e $DIR/$tfile-2 ] && return 4
674 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
677 multiop $DIR/$tfile-1 O_tSc &
679 multiop $DIR/$tfile-2 O_tSc &
681 # give multiop a chance to open
685 replay_barrier $SINGLEMDS
689 wait $pid1 || return 1
691 wait $pid2 || return 2
692 [ -e $DIR/$tfile-1 ] && return 3
693 [ -e $DIR/$tfile-2 ] && return 4
696 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
698 # tests for bug 2104; completion without crashing is success. The close is
699 # stale, but we always return 0 for close, so the app never sees it.
701 multiop $DIR/$tfile O_c &
703 multiop $DIR/$tfile O_c &
705 # give multiop a chance to open
708 df $MOUNT || sleep 1 && df $MOUNT || return 1
714 run_test 32 "close() notices client eviction; close() after client eviction"
716 # Abort recovery before client complete
718 replay_barrier $SINGLEMDS
720 fail_abort $SINGLEMDS
721 # this file should be gone, because the replay was aborted
722 $CHECKSTAT -t file $DIR/$tfile && return 1
725 run_test 33 "abort recovery before client does replay"
728 multiop $DIR/$tfile O_c &
730 # give multiop a chance to open
734 replay_barrier $SINGLEMDS
735 fail_abort $SINGLEMDS
737 [ -e $DIR/$tfile ] && return 1
741 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
743 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
747 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
748 do_facet mds "sysctl -w lustre.fail_loc=0x80000119"
753 # give a chance to remove from MDS
754 fail_abort $SINGLEMDS
755 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
757 run_test 35 "test recovery from llog for unlink op"
759 # b=2432 resent cancel after replay uses wrong cookie,
760 # so don't resend cancels
762 replay_barrier $SINGLEMDS
764 checkstat $DIR/$tfile
765 facet_failover $SINGLEMDS
767 if dmesg | grep "unknown lock cookie"; then
768 echo "cancel after replay failed"
772 run_test 36 "don't resend cancel"
775 # directory orphans can't be unlinked from PENDING directory
777 rmdir $DIR/$tfile 2>/dev/null
778 multiop $DIR/$tfile dD_c &
780 # give multiop a chance to open
784 replay_barrier $SINGLEMDS
785 # clear the dmesg buffer so we only see errors from this recovery
787 fail_abort $SINGLEMDS
789 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
793 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
796 createmany -o $DIR/$tfile-%d 800
797 unlinkmany $DIR/$tfile-%d 0 400
798 replay_barrier $SINGLEMDS
800 unlinkmany $DIR/$tfile-%d 400 400
802 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
804 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
807 createmany -o $DIR/$tfile-%d 800
808 replay_barrier $SINGLEMDS
809 unlinkmany $DIR/$tfile-%d 0 400
811 unlinkmany $DIR/$tfile-%d 400 400
813 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
815 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
818 cat /proc/fs/lustre/osc/*/stats |
819 awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
824 $LCTL mark multiop $MOUNT/$tfile OS_c
825 multiop $MOUNT/$tfile OS_c &
827 writeme -s $MOUNT/${tfile}-2 &
830 facet_failover $SINGLEMDS
831 #define OBD_FAIL_MDS_CONNECT_NET 0x117
832 do_facet mds "sysctl -w lustre.fail_loc=0x80000117"
834 stat1=`count_ost_writes`
836 stat2=`count_ost_writes`
837 echo "$stat1, $stat2"
838 if [ $stat1 -lt $stat2 ]; then
839 echo "writes continuing during recovery"
842 echo "writes not continuing during recovery, bug 2477"
845 echo "waiting for writeme $WRITE_PID"
849 echo "waiting for multiop $PID"
850 wait $PID || return 2
851 do_facet client munlink $MOUNT/$tfile || return 3
852 do_facet client munlink $MOUNT/${tfile}-2 || return 3
855 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
859 # make sure that a read to one osc doesn't try to double-unlock its page just
860 # because another osc is invalid. trigger_group_io used to mistakenly return
861 # an error if any oscs were invalid even after having successfully put rpcs
862 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
863 # the page, guarnateeing that the unlock from the RPC completion would
864 # assert on trying to unlock the unlocked page.
866 local f=$MOUNT/$tfile
867 # make sure the start of the file is ost1
868 lfs setstripe $f $((128 * 1024)) 0 0
869 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
871 # fail ost2 and read from ost1
872 local osc2_dev=`$LCTL device_list | \
873 awk '(/ost2.*client_facet/){print $4}' `
874 $LCTL --device %$osc2_dev deactivate
875 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
876 $LCTL --device %$osc2_dev activate
879 run_test 41 "read from a valid osc while other oscs are invalid"
881 # test MDS recovery after ost failure
883 blocks=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
884 createmany -o $DIR/$tfile-%d 800
886 unlinkmany $DIR/$tfile-%d 0 400
889 # osc is evicted, fs is smaller
890 blocks_after=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
891 [ $blocks_after -lt $blocks ] || return 1
892 echo wait for MDS to timeout and recover
893 sleep $((TIMEOUT * 2))
894 unlinkmany $DIR/$tfile-%d 400 400
895 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
897 run_test 42 "recovery after ost failure"
900 # timeout in MDS/OST recovery RPC will LBUG MDS
902 replay_barrier $SINGLEMDS
904 # OBD_FAIL_OST_CREATE_NET 0x204
905 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
906 facet_failover $SINGLEMDS
907 df $MOUNT || return 1
909 do_facet ost "sysctl -w lustre.fail_loc=0"
913 run_test 43 "mds osc import failure during recovery; don't LBUG"
916 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
917 do_facet mds "sysctl -w lustre.fail_loc=0x80000701"
918 $LCTL --device $mdcdev recover
920 do_facet mds "sysctl -w lustre.fail_loc=0"
923 run_test 44 "race in target handle connect"
925 # Handle failed close
927 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
928 $LCTL --device $mdcdev recover
930 multiop $DIR/$tfile O_c &
934 # This will cause the CLOSE to fail before even
935 # allocating a reply buffer
936 $LCTL --device $mdcdev deactivate
940 wait $pid || return 1
942 $LCTL --device $mdcdev activate
945 $CHECKSTAT -t file $DIR/$tfile || return 2
948 run_test 45 "Handle failed close"
952 drop_reply "touch $DIR/$tfile"
954 # ironically, the previous test, 45, will cause a real forced close,
955 # so just look for one for this test
956 dmesg | grep -i "force closing client file handle for $tfile" && return 1
959 run_test 46 "Don't leak file handle after open resend (3325)"
964 # create some files to make sure precreate has been done on all
965 # OSTs. (just in case this test is run independently)
966 createmany -o $DIR/$tfile 20 || return 1
968 # OBD_FAIL_OST_CREATE_NET 0x204
970 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
971 df $MOUNT || return 2
973 # let the MDS discover the OST failure, attempt to recover, fail
975 sleep $((3 * TIMEOUT))
977 # Without 2824, this createmany would hang
978 createmany -o $DIR/$tfile 20 || return 3
979 unlinkmany $DIR/$tfile 20 || return 4
981 do_facet ost "sysctl -w lustre.fail_loc=0"
984 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
988 createmany -o $DIR/${tfile}- 100
989 $CHECKSTAT $DIR/${tfile}-99 || return 1
991 df $MOUNT || echo "first df failed"
993 df $MOUNT || return 2
995 $CHECKSTAT $DIR/${tfile}-99 || return 3
998 replay_barrier $SINGLEMDS
1000 unlinkmany $DIR/${tfile}- 100 || return 4
1001 if dmesg | grep "back in time"; then
1002 echo "server went back in time!"
1007 run_test 48 "Don't lose transno when client is evicted (2525)"
1009 # b=3550 - replay of unlink
1011 replay_barrier $SINGLEMDS
1012 createmany -o $DIR/$tfile-%d 400 || return 1
1013 unlinkmany $DIR/$tfile-%d 0 400 || return 2
1015 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1017 run_test 49 "re-write records to llog as written during fail"
1020 local osc_dev=`$LCTL device_list | \
1021 awk '(/ost_svc_$SINGLEMDS_svc/){print $4}' `
1022 $LCTL --device %$osc_dev recover && $LCTL --device %$osc_dev recover
1023 # give the mds_lov_sync threads a chance to run
1026 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1028 # bug 3462 - simultaneous MDC requests
1030 replay_barrier_nodf $SINGLEMDS
1031 mkdir -p $DIR/${tdir}-1
1032 mkdir -p $DIR/${tdir}-2
1033 touch $DIR/${tdir}-2/f
1034 multiop $DIR/${tdir}-1/f O_c &
1036 # give multiop a chance to open
1039 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
1041 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1042 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 1
1046 wait $pid || return 2
1047 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1048 rm -rf $DIR/${tdir}-*
1050 run_test 51a "|X| close request while two MDC requests in flight"
1053 replay_barrier_nodf $SINGLEMDS
1054 mkdir -p $DIR/$tdir-1
1055 mkdir -p $DIR/$tdir-2
1056 multiop $DIR/$tdir-1/f O_c &
1058 # give multiop a chance to open
1061 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
1062 touch $DIR/${tdir}-2/f &
1064 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1067 wait $pid || return 1
1071 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1072 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1073 rm -rf $DIR/${tdir}-*
1075 run_test 51b "|X| open request while two MDC requests in flight"
1078 replay_barrier_nodf $SINGLEMDS
1079 mkdir -p $DIR/${tdir}-1
1080 mkdir -p $DIR/${tdir}-2
1081 multiop $DIR/${tdir}-1/f O_c &
1083 # give multiop a chance to open
1086 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
1087 touch $DIR/${tdir}-2/f &
1088 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1090 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
1092 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1096 wait $pid || return 1
1097 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1098 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1099 rm -rf $DIR/${tdir}-*
1101 run_test 51c "|X| open request and close request while two MDC requests in flight"
1104 replay_barrier_nodf $SINGLEMDS
1105 mkdir -p $DIR/${tdir}-1
1106 mkdir -p $DIR/${tdir}-2
1107 touch $DIR/${tdir}-2/f
1108 multiop $DIR/${tdir}-1/f O_c &
1110 # give multiop a chance to open
1113 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
1115 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1116 #$CHECKSTAT -t file $DIR/${tdir}-2/f || return 1
1120 wait $pid || return 2
1121 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3
1122 rm -rf $DIR/${tdir}-*
1124 run_test 51d "|X| close reply while two MDC requests in flight"
1127 replay_barrier_nodf $SINGLEMDS
1128 mkdir -p $DIR/$tdir-1
1129 mkdir -p $DIR/$tdir-2
1130 multiop $DIR/$tdir-1/f O_c &
1132 # give multiop a chance to open
1135 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
1136 touch $DIR/${tdir}-2/f &
1138 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1141 wait $pid || return 1
1145 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1146 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1147 rm -rf $DIR/${tdir}-*
1149 run_test 51e "|X| open reply while two MDC requests in flight"
1152 replay_barrier_nodf $SINGLEMDS
1153 mkdir -p $DIR/${tdir}-1
1154 mkdir -p $DIR/${tdir}-2
1155 multiop $DIR/${tdir}-1/f O_c &
1157 # give multiop a chance to open
1160 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
1161 touch $DIR/${tdir}-2/f &
1162 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1164 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
1166 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1170 wait $pid || return 1
1171 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1172 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1173 rm -rf $DIR/${tdir}-*
1175 run_test 51f "|X| open reply and close reply while two MDC requests in flight"
1178 replay_barrier_nodf $SINGLEMDS
1179 mkdir -p $DIR/${tdir}-1
1180 mkdir -p $DIR/${tdir}-2
1181 multiop $DIR/${tdir}-1/f O_c &
1183 # give multiop a chance to open
1186 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119"
1187 touch $DIR/${tdir}-2/f &
1188 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1190 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115"
1192 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1196 wait $pid || return 1
1197 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1198 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1199 rm -rf $DIR/${tdir}-*
1201 run_test 51g "|X| open reply and close request while two MDC requests in flight"
1204 replay_barrier_nodf $SINGLEMDS
1205 mkdir -p $DIR/${tdir}-1
1206 mkdir -p $DIR/${tdir}-2
1207 multiop $DIR/${tdir}-1/f O_c &
1209 # give multio:wp a chance to open
1212 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
1213 touch $DIR/${tdir}-2/f &
1214 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1216 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122"
1218 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1222 wait $pid || return 1
1223 $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2
1224 $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3
1225 rm -rf $DIR/${tdir}-*
1227 run_test 51h "|X| open request and close reply while two MDC requests in flight"
1229 # b3764 timed out lock replay
1232 cancel_lru_locks MDC
1234 multiop $DIR/$tfile s
1235 replay_barrier $SINGLEMDS
1236 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000030c"
1238 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0"
1240 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1242 run_test 52 "time out lock replay (3764)"
1245 replay_barrier_nodf $SINGLEMDS
1252 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107"
1254 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0"
1259 run_test 53 "|X| open request and close reply while two MDC requests in flight"
1262 replay_barrier $SINGLEMDS
1263 createmany -o $DIR/$tfile 20
1264 unlinkmany $DIR/$tfile 20
1267 run_test 54 "|X| open request and close reply while two MDC requests in flight"
1269 #b3440 ASSERTION(rec->ur_fid2->id) failed
1271 sysctl -w portals.debug=-1 portals.debug_mb=25
1272 ln -s foo $DIR/$tfile
1273 replay_barrier $SINGLEMDS
1274 #drop_reply "cat $DIR/$tfile"
1277 lctl dk /r/tmp/debug
1279 run_test 55 "don't replay a symlink open request (3440)"
1281 #b3761 ASSERTION(hash != 0) failed
1283 # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE
1284 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012b"
1287 # give a chance for touch to run
1289 do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0"
1290 wait $pid || return 1
1294 run_test 56 "let MDS_CHECK_RESENT return the original return code instead of 0"
1296 equals_msg test complete, cleaning up