6 # This test needs to be run on the client
9 LUSTRE=${LUSTRE:-`dirname $0`/..}
10 . $LUSTRE/tests/test-framework.sh
14 . ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
23 if [ `using_krb5_sec $SECURITY` == 'n' ] ; then
24 ALWAYS_EXCEPT="0c $ALWAYS_EXCEPT"
31 if [ "$MDSCOUNT" -gt 1 ]; then
33 for mds in `mds_list`; do
34 MDSDEV=$TMP/${mds}-`hostname`
35 add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc
37 add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \
38 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
41 add_mds mds1 --dev $MDSDEV --size $MDSSIZE
42 if [ ! -z "$mds1failover_HOST" ]; then
43 add_mdsfailover mds1 --dev $MDSDEV --size $MDSSIZE
45 add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \
46 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
50 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
51 add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
52 add_client client $MDS --lov lov1 --path $MOUNT
58 # make sure we are using the primary MDS, so the config log will
59 # be able to clean up properly.
60 activemds=`facet_active mds1`
61 if [ $activemds != "mds1" ]; then
64 zconf_umount `hostname` $MOUNT
65 for mds in `mds_list`; do
66 stop $mds ${FORCE} $MDSLCONFARGS
70 stop ost2 ${FORCE} --dump cleanup.log
71 stop ost ${FORCE} --dump cleanup.log
74 if [ "$ONLY" == "cleanup" ]; then
75 sysctl -w portals.debug=0 || true
80 SETUP=${SETUP:-"setup"}
81 CLEANUP=${CLEANUP:-"cleanup"}
86 start_krb5_kdc || exit 1
87 start ost --reformat $OSTLCONFARGS
88 start ost2 --reformat $OSTLCONFARGS
89 start_lsvcgssd || exit 2
91 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
92 for mds in `mds_list`; do
93 start $mds --reformat $MDSLCONFARGS
95 grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT
100 if [ "$ONLY" == "setup" ]; then
110 run_test 0 "empty replay"
113 # this test attempts to trigger a race in the precreation code,
114 # and must run before any other objects are created on the filesystem
116 createmany -o $DIR/$tfile 20 || return 1
117 unlinkmany $DIR/$tfile 20 || return 2
119 run_test 0b "ensure object created after recover exists. (3284)"
122 # drop gss error notification
126 # drop gss init request
130 run_test 0c "empty replay with gss init failures"
136 $CHECKSTAT -t file $DIR/$tfile || return 1
139 run_test 1 "simple create"
145 $CHECKSTAT -t file $DIR/$tfile || return 1
151 ./mcreate $DIR/$tfile
155 $CHECKSTAT -t file $DIR/$tfile || return 1
163 o_directory $DIR/$tfile
165 $CHECKSTAT -t file $DIR/$tfile || return 2
168 run_test 3a "replay failed open(O_DIRECTORY)"
172 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
173 do_facet mds "sysctl -w lustre.fail_loc=0x80000114"
175 do_facet mds "sysctl -w lustre.fail_loc=0"
177 $CHECKSTAT -t file $DIR/$tfile && return 2
180 run_test 3b "replay failed open -ENOMEM"
184 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
185 do_facet mds "sysctl -w lustre.fail_loc=0x80000128"
187 do_facet mds "sysctl -w lustre.fail_loc=0"
190 $CHECKSTAT -t file $DIR/$tfile && return 2
193 run_test 3c "replay failed open -ENOMEM"
197 for i in `seq 10`; do
198 echo "tag-$i" > $DIR/$tfile-$i
201 for i in `seq 10`; do
202 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
205 run_test 4 "|x| 10 open(O_CREAT)s"
211 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
213 run_test 4b "|x| rm 10 files"
215 # The idea is to get past the first block of precreated files on both
216 # osts, and then replay.
219 for i in `seq 220`; do
220 echo "tag-$i" > $DIR/$tfile-$i
223 for i in `seq 220`; do
224 grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
228 # waiting for commitment of removal
230 run_test 5 "|x| 220 open(O_CREAT)"
236 mcreate $DIR/$tdir/$tfile
238 $CHECKSTAT -t dir $DIR/$tdir || return 1
239 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
241 # waiting for log process thread
243 run_test 6 "mkdir + contained create"
249 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
251 run_test 6b "|X| rmdir"
256 mcreate $DIR/$tdir/$tfile
258 $CHECKSTAT -t dir $DIR/$tdir || return 1
259 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
262 run_test 7 "mkdir |X| contained create"
266 multiop $DIR/$tfile mo_c &
271 $CHECKSTAT -t file $DIR/$tfile || return 1
272 kill -USR1 $MULTIPID || return 2
273 wait $MULTIPID || return 3
276 run_test 8 "creat open |X| close"
281 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
283 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
285 echo " old_inum == $old_inum, new_inum == $new_inum"
286 if [ $old_inum -eq $new_inum ] ;
288 echo " old_inum and new_inum match"
290 echo "!!!! old_inum and new_inum NOT match"
295 run_test 9 "|X| create (same inum/gen)"
300 mv $DIR/$tfile $DIR/$tfile-2
304 $CHECKSTAT $DIR/$tfile && return 1
305 $CHECKSTAT $DIR/$tfile-2 || return 2
309 run_test 10 "create |X| rename unlink"
313 echo "old" > $DIR/$tfile
314 mv $DIR/$tfile $DIR/$tfile-2
316 echo "new" > $DIR/$tfile
318 grep old $DIR/$tfile-2
320 grep new $DIR/$tfile || return 1
321 grep old $DIR/$tfile-2 || return 2
323 run_test 11 "create open write rename |X| create-old-name read"
327 multiop $DIR/$tfile o_tSc &
329 # give multiop a chance to open
334 wait $pid || return 1
337 [ -e $DIR/$tfile ] && return 2
340 run_test 12 "open, unlink |X| close"
343 # 1777 - replay open after committed chmod that would make
344 # a regular open a failure
347 multiop $DIR/$tfile O_wc &
349 # give multiop a chance to open
352 $CHECKSTAT -p 0 $DIR/$tfile
356 wait $pid || return 1
358 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
361 run_test 13 "open chmod 0 |x| write close"
364 multiop $DIR/$tfile O_tSc &
366 # give multiop a chance to open
370 kill -USR1 $pid || return 1
371 wait $pid || return 2
374 [ -e $DIR/$tfile ] && return 3
377 run_test 14 "open(O_CREAT), unlink |X| close"
380 multiop $DIR/$tfile O_tSc &
382 # give multiop a chance to open
386 touch $DIR/g11 || return 1
388 wait $pid || return 2
391 [ -e $DIR/$tfile ] && return 3
392 touch $DIR/h11 || return 4
395 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
402 mcreate $DIR/$tfile-2
404 [ -e $DIR/$tfile ] && return 1
405 [ -e $DIR/$tfile-2 ] || return 2
406 munlink $DIR/$tfile-2 || return 3
408 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
412 multiop $DIR/$tfile O_c &
414 # give multiop a chance to open
417 kill -USR1 $pid || return 1
418 wait $pid || return 2
419 $CHECKSTAT -t file $DIR/$tfile || return 3
422 run_test 17 "|X| open(O_CREAT), |replay| close"
426 multiop $DIR/$tfile O_tSc &
428 # give multiop a chance to open
431 touch $DIR/$tfile-2 || return 1
432 echo "pid: $pid will close"
434 wait $pid || return 2
437 [ -e $DIR/$tfile ] && return 3
438 [ -e $DIR/$tfile-2 ] || return 4
439 # this touch frequently fails
440 touch $DIR/$tfile-3 || return 5
441 munlink $DIR/$tfile-2 || return 6
442 munlink $DIR/$tfile-3 || return 7
445 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
447 # bug 1855 (a simpler form of test_11 above)
451 echo "old" > $DIR/$tfile
452 mv $DIR/$tfile $DIR/$tfile-2
453 grep old $DIR/$tfile-2
455 grep old $DIR/$tfile-2 || return 2
457 run_test 19 "|X| mcreate, open, write, rename "
461 multiop $DIR/$tfile O_tSc &
463 # give multiop a chance to open
469 wait $pid || return 1
470 [ -e $DIR/$tfile ] && return 2
473 run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
477 multiop $DIR/$tfile O_tSc &
479 # give multiop a chance to open
482 touch $DIR/g11 || return 1
486 wait $pid || return 2
487 [ -e $DIR/$tfile ] && return 3
488 touch $DIR/h11 || return 4
491 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
494 multiop $DIR/$tfile O_tSc &
496 # give multiop a chance to open
504 wait $pid || return 1
505 [ -e $DIR/$tfile ] && return 2
508 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
511 multiop $DIR/$tfile O_tSc &
513 # give multiop a chance to open
518 touch $DIR/g11 || return 1
522 wait $pid || return 2
523 [ -e $DIR/$tfile ] && return 3
524 touch $DIR/h11 || return 4
527 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
530 multiop $DIR/$tfile O_tSc &
532 # give multiop a chance to open
539 wait $pid || return 1
540 [ -e $DIR/$tfile ] && return 2
543 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
546 multiop $DIR/$tfile O_tSc &
548 # give multiop a chance to open
555 wait $pid || return 1
556 [ -e $DIR/$tfile ] && return 2
559 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
563 multiop $DIR/$tfile-1 O_tSc &
565 multiop $DIR/$tfile-2 O_tSc &
567 # give multiop a chance to open
572 wait $pid2 || return 1
576 wait $pid1 || return 2
577 [ -e $DIR/$tfile-1 ] && return 3
578 [ -e $DIR/$tfile-2 ] && return 4
581 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
585 multiop $DIR/$tfile-1 O_tSc &
587 multiop $DIR/$tfile-2 O_tSc &
589 # give multiop a chance to open
596 wait $pid1 || return 1
598 wait $pid2 || return 2
599 [ -e $DIR/$tfile-1 ] && return 3
600 [ -e $DIR/$tfile-2 ] && return 4
603 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
606 multiop $DIR/$tfile-1 O_tSc &
608 multiop $DIR/$tfile-2 O_tSc &
610 # give multiop a chance to open
616 wait $pid2 || return 1
620 wait $pid1 || return 2
621 [ -e $DIR/$tfile-1 ] && return 3
622 [ -e $DIR/$tfile-2 ] && return 4
625 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
628 multiop $DIR/$tfile-1 O_tSc &
630 multiop $DIR/$tfile-2 O_tSc &
632 # give multiop a chance to open
640 wait $pid1 || return 1
642 wait $pid2 || return 2
643 [ -e $DIR/$tfile-1 ] && return 3
644 [ -e $DIR/$tfile-2 ] && return 4
647 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
650 multiop $DIR/$tfile-1 O_tSc &
652 multiop $DIR/$tfile-2 O_tSc &
654 # give multiop a chance to open
662 wait $pid1 || return 1
664 wait $pid2 || return 2
665 [ -e $DIR/$tfile-1 ] && return 3
666 [ -e $DIR/$tfile-2 ] && return 4
669 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
672 multiop $DIR/$tfile-1 O_tSc &
674 multiop $DIR/$tfile-2 O_tSc &
676 # give multiop a chance to open
684 wait $pid1 || return 1
686 wait $pid2 || return 2
687 [ -e $DIR/$tfile-1 ] && return 3
688 [ -e $DIR/$tfile-2 ] && return 4
691 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
693 # tests for bug 2104; completion without crashing is success. The close is
694 # stale, but we always return 0 for close, so the app never sees it.
696 multiop $DIR/$tfile O_c &
698 multiop $DIR/$tfile O_c &
700 # give multiop a chance to open
703 df $MOUNT || sleep 1 && df $MOUNT || return 1
709 run_test 32 "close() notices client eviction; close() after client eviction"
711 # Abort recovery before client complete
716 # this file should be gone, because the replay was aborted
717 $CHECKSTAT -t file $DIR/$tfile && return 1
720 run_test 33 "abort recovery before client does replay"
723 multiop $DIR/$tfile O_c &
725 # give multiop a chance to open
732 [ -e $DIR/$tfile ] && return 1
736 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
738 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
742 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
743 do_facet mds "sysctl -w lustre.fail_loc=0x80000119"
748 # give a chance to remove from MDS
750 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
752 run_test 35 "test recovery from llog for unlink op"
754 # b=2432 resent cancel after replay uses wrong cookie,
755 # so don't resend cancels
759 checkstat $DIR/$tfile
762 if dmesg | grep "unknown lock cookie"; then
763 echo "cancel after replay failed"
767 run_test 36 "don't resend cancel"
770 # directory orphans can't be unlinked from PENDING directory
772 rmdir $DIR/$tfile 2>/dev/null
773 multiop $DIR/$tfile dD_c &
775 # give multiop a chance to open
780 # clear the dmesg buffer so we only see errors from this recovery
784 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
788 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
791 createmany -o $DIR/$tfile-%d 800
792 unlinkmany $DIR/$tfile-%d 0 400
795 unlinkmany $DIR/$tfile-%d 400 400
797 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
799 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
802 createmany -o $DIR/$tfile-%d 800
804 unlinkmany $DIR/$tfile-%d 0 400
806 unlinkmany $DIR/$tfile-%d 400 400
808 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
810 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
813 cat /proc/fs/lustre/osc/*/stats |
814 awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
819 $LCTL mark multiop $MOUNT/$tfile OS_c
820 multiop $MOUNT/$tfile OS_c &
822 writeme -s $MOUNT/${tfile}-2 &
826 #define OBD_FAIL_MDS_CONNECT_NET 0x117
827 do_facet mds "sysctl -w lustre.fail_loc=0x80000117"
829 stat1=`count_ost_writes`
831 stat2=`count_ost_writes`
832 echo "$stat1, $stat2"
833 if [ $stat1 -lt $stat2 ]; then
834 echo "writes continuing during recovery"
837 echo "writes not continuing during recovery, bug 2477"
840 echo "waiting for writeme $WRITE_PID"
844 echo "waiting for multiop $PID"
845 wait $PID || return 2
846 do_facet client munlink $MOUNT/$tfile || return 3
847 do_facet client munlink $MOUNT/${tfile}-2 || return 3
850 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
854 # make sure that a read to one osc doesn't try to double-unlock its page just
855 # because another osc is invalid. trigger_group_io used to mistakenly return
856 # an error if any oscs were invalid even after having successfully put rpcs
857 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
858 # the page, guarnateeing that the unlock from the RPC completion would
859 # assert on trying to unlock the unlocked page.
861 local f=$MOUNT/$tfile
862 # make sure the start of the file is ost1
863 lfs setstripe $f $((128 * 1024)) 0 0
864 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
866 # fail ost2 and read from ost1
867 local osc2_dev=`$LCTL device_list | \
868 awk '(/ost2.*client_facet/){print $4}' `
869 $LCTL --device %$osc2_dev deactivate
870 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
871 $LCTL --device %$osc2_dev activate
874 run_test 41 "read from a valid osc while other oscs are invalid"
876 # test MDS recovery after ost failure
878 blocks=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
879 createmany -o $DIR/$tfile-%d 800
881 unlinkmany $DIR/$tfile-%d 0 400
884 # osc is evicted, fs is smaller
885 blocks_after=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
886 [ $blocks_after -lt $blocks ] || return 1
887 echo wait for MDS to timeout and recover
888 sleep $((TIMEOUT * 2))
889 unlinkmany $DIR/$tfile-%d 400 400
890 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
892 run_test 42 "recovery after ost failure"
895 # timeout in MDS/OST recovery RPC will LBUG MDS
899 # OBD_FAIL_OST_CREATE_NET 0x204
900 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
902 df $MOUNT || return 1
904 do_facet ost "sysctl -w lustre.fail_loc=0"
908 run_test 43 "mds osc import failure during recovery; don't LBUG"
911 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
912 do_facet mds "sysctl -w lustre.fail_loc=0x80000701"
913 $LCTL --device $mdcdev recover
915 do_facet mds "sysctl -w lustre.fail_loc=0"
918 run_test 44 "race in target handle connect"
920 # Handle failed close
922 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
923 $LCTL --device $mdcdev recover
925 multiop $DIR/$tfile O_c &
929 # This will cause the CLOSE to fail before even
930 # allocating a reply buffer
931 $LCTL --device $mdcdev deactivate
935 wait $pid || return 1
937 $LCTL --device $mdcdev activate
939 $CHECKSTAT -t file $DIR/$tfile || return 2
942 run_test 45 "Handle failed close"
946 drop_reply "touch $DIR/$tfile"
948 # ironically, the previous test, 45, will cause a real forced close,
949 # so just look for one for this test
950 dmesg | grep -i "force closing client file handle for $tfile" && return 1
953 run_test 46 "Don't leak file handle after open resend (3325)"
958 # create some files to make sure precreate has been done on all
959 # OSTs. (just in case this test is run independently)
960 createmany -o $DIR/$tfile 20 || return 1
962 # OBD_FAIL_OST_CREATE_NET 0x204
964 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
965 df $MOUNT || return 2
967 # let the MDS discover the OST failure, attempt to recover, fail
969 sleep $((3 * TIMEOUT))
971 # Without 2824, this createmany would hang
972 createmany -o $DIR/$tfile 20 || return 3
973 unlinkmany $DIR/$tfile 20 || return 4
975 do_facet ost "sysctl -w lustre.fail_loc=0"
978 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
982 createmany -o $DIR/${tfile}- 100
983 $CHECKSTAT $DIR/${tfile}-99 || return 1
985 df $MOUNT || df $MOUNT || return 2
987 $CHECKSTAT $DIR/${tfile}-99 || return 3
992 unlinkmany $DIR/${tfile}- 100 || return 4
993 if dmesg | grep "back in time"; then
994 echo "server went back in time!"
999 run_test 48 "Don't lose transno when client is evicted (2525)"
1001 # b=3550 - replay of unlink
1004 createmany -o $DIR/$tfile-%d 400 || return 1
1005 unlinkmany $DIR/$tfile-%d 0 400 || return 2
1007 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1009 run_test 49 "re-write records to llog as written during fail"
1012 local osc_dev=`$LCTL device_list | \
1013 awk '(/ost_svc_mds1_svc/){print $4}' `
1014 $LCTL --device %$osc_dev recover && $LCTL --device %$osc_dev recover
1015 # give the mds_lov_sync threads a chance to run
1018 run_test 50 "Double OSC recovery, don't LASSERT (3812)"
1020 # b3764 timed out lock replay
1023 cancel_lru_locks MDC
1025 multiop $DIR/$tfile s
1027 do_facet mds1 "sysctl -w lustre.fail_loc=0x8000030c"
1029 do_facet mds1 "sysctl -w lustre.fail_loc=0x0"
1031 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
1033 run_test 52 "time out lock replay (3764)"
1035 equals_msg test complete, cleaning up