6 # This test needs to be run on the client
9 LUSTRE=${LUSTRE:-`dirname $0`/..}
10 . $LUSTRE/tests/test-framework.sh
14 . ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
26 if [ "$MDSCOUNT" -gt 1 ]; then
28 for mds in `mds_list`; do
29 MDSDEV=$TMP/${mds}-`hostname`
30 add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1
33 add_lov_to_lmv lov1 lmv1 --stripe_sz $STRIPE_BYTES \
34 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
36 add_mds mds1 --dev $MDSDEV --size $MDSSIZE
37 if [ ! -z "$mds1failover_HOST" ]; then
38 add_mdsfailover mds1 --dev $MDSDEV --size $MDSSIZE
40 add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \
41 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
45 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
46 add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
47 add_client client --mds $MDS --lov lov1 --path $MOUNT
54 # make sure we are using the primary MDS, so the config log will
55 # be able to clean up properly.
56 activemds=`facet_active mds1`
57 if [ $activemds != "mds1" ]; then
60 zconf_umount `hostname` $MOUNT
61 for mds in `mds_list`; do
62 stop $mds ${FORCE} $MDSLCONFARGS
64 stop ost2 ${FORCE} --dump cleanup.log
65 stop ost ${FORCE} --dump cleanup.log
68 if [ "$ONLY" == "cleanup" ]; then
69 sysctl -w portals.debug=0 || true
74 SETUP=${SETUP:-"setup"}
75 CLEANUP=${CLEANUP:-"cleanup"}
80 start ost --reformat $OSTLCONFARGS
81 start ost2 --reformat $OSTLCONFARGS
82 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
83 for mds in `mds_list`; do
84 start $mds --reformat $MDSLCONFARGS
86 grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT
91 if [ "$ONLY" == "setup" ]; then
101 run_test 0 "empty replay"
104 # this test attempts to trigger a race in the precreation code,
105 # and must run before any other objects are created on the filesystem
107 createmany -o $DIR/$tfile 20 || return 1
108 unlinkmany $DIR/$tfile 20 || return 2
110 run_test 0b "ensure object created after recover exists. (3284)"
116 $CHECKSTAT -t file $DIR/$tfile || return 1
119 run_test 1 "simple create"
125 $CHECKSTAT -t file $DIR/$tfile || return 1
131 ./mcreate $DIR/$tfile
135 $CHECKSTAT -t file $DIR/$tfile || return 1
143 o_directory $DIR/$tfile
145 $CHECKSTAT -t file $DIR/$tfile || return 2
148 run_test 3a "replay failed open(O_DIRECTORY)"
152 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
153 do_facet mds "sysctl -w lustre.fail_loc=0x80000114"
155 do_facet mds "sysctl -w lustre.fail_loc=0"
157 $CHECKSTAT -t file $DIR/$tfile && return 2
160 run_test 3b "replay failed open -ENOMEM"
164 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
165 do_facet mds "sysctl -w lustre.fail_loc=0x80000128"
167 do_facet mds "sysctl -w lustre.fail_loc=0"
170 $CHECKSTAT -t file $DIR/$tfile && return 2
173 run_test 3c "replay failed open -ENOMEM"
177 for i in `seq 10`; do
178 echo "tag-$i" > $DIR/$tfile-$i
181 for i in `seq 10`; do
182 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
185 run_test 4 "|x| 10 open(O_CREAT)s"
191 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
193 run_test 4b "|x| rm 10 files"
195 # The idea is to get past the first block of precreated files on both
196 # osts, and then replay.
199 for i in `seq 220`; do
200 echo "tag-$i" > $DIR/$tfile-$i
203 for i in `seq 220`; do
204 grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
208 # waiting for commitment of removal
210 run_test 5 "|x| 220 open(O_CREAT)"
216 mcreate $DIR/$tdir/$tfile
218 $CHECKSTAT -t dir $DIR/$tdir || return 1
219 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
221 # waiting for log process thread
223 run_test 6 "mkdir + contained create"
229 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
231 run_test 6b "|X| rmdir"
236 mcreate $DIR/$tdir/$tfile
238 $CHECKSTAT -t dir $DIR/$tdir || return 1
239 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
242 run_test 7 "mkdir |X| contained create"
246 multiop $DIR/$tfile mo_c &
251 $CHECKSTAT -t file $DIR/$tfile || return 1
252 kill -USR1 $MULTIPID || return 2
253 wait $MULTIPID || return 3
256 run_test 8 "creat open |X| close"
261 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
263 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
265 echo " old_inum == $old_inum, new_inum == $new_inum"
266 if [ $old_inum -eq $new_inum ] ;
268 echo " old_inum and new_inum match"
270 echo "!!!! old_inum and new_inum NOT match"
275 run_test 9 "|X| create (same inum/gen)"
280 mv $DIR/$tfile $DIR/$tfile-2
283 $CHECKSTAT $DIR/$tfile && return 1
284 $CHECKSTAT $DIR/$tfile-2 ||return 2
288 run_test 10 "create |X| rename unlink"
292 echo "old" > $DIR/$tfile
293 mv $DIR/$tfile $DIR/$tfile-2
295 echo "new" > $DIR/$tfile
297 grep old $DIR/$tfile-2
299 grep new $DIR/$tfile || return 1
300 grep old $DIR/$tfile-2 || return 2
302 run_test 11 "create open write rename |X| create-old-name read"
306 multiop $DIR/$tfile o_tSc &
308 # give multiop a chance to open
313 wait $pid || return 1
316 [ -e $DIR/$tfile ] && return 2
319 run_test 12 "open, unlink |X| close"
322 # 1777 - replay open after committed chmod that would make
323 # a regular open a failure
326 multiop $DIR/$tfile O_wc &
328 # give multiop a chance to open
331 $CHECKSTAT -p 0 $DIR/$tfile
335 wait $pid || return 1
337 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
340 run_test 13 "open chmod 0 |x| write close"
343 multiop $DIR/$tfile O_tSc &
345 # give multiop a chance to open
349 kill -USR1 $pid || return 1
350 wait $pid || return 2
353 [ -e $DIR/$tfile ] && return 3
356 run_test 14 "open(O_CREAT), unlink |X| close"
359 multiop $DIR/$tfile O_tSc &
361 # give multiop a chance to open
365 touch $DIR/g11 || return 1
367 wait $pid || return 2
370 [ -e $DIR/$tfile ] && return 3
371 touch $DIR/h11 || return 4
374 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
381 mcreate $DIR/$tfile-2
383 [ -e $DIR/$tfile ] && return 1
384 [ -e $DIR/$tfile-2 ] || return 2
385 munlink $DIR/$tfile-2 || return 3
387 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
391 multiop $DIR/$tfile O_c &
393 # give multiop a chance to open
396 kill -USR1 $pid || return 1
397 wait $pid || return 2
398 $CHECKSTAT -t file $DIR/$tfile || return 3
401 run_test 17 "|X| open(O_CREAT), |replay| close"
405 multiop $DIR/$tfile O_tSc &
407 # give multiop a chance to open
410 touch $DIR/$tfile-2 || return 1
411 echo "pid: $pid will close"
413 wait $pid || return 2
416 [ -e $DIR/$tfile ] && return 3
417 [ -e $DIR/$tfile-2 ] || return 4
418 # this touch frequently fails
419 touch $DIR/$tfile-3 || return 5
420 munlink $DIR/$tfile-2 || return 6
421 munlink $DIR/$tfile-3 || return 7
424 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
426 # bug 1855 (a simpler form of test_11 above)
430 echo "old" > $DIR/$tfile
431 mv $DIR/$tfile $DIR/$tfile-2
432 grep old $DIR/$tfile-2
434 grep old $DIR/$tfile-2 || return 2
436 run_test 19 "|X| mcreate, open, write, rename "
440 multiop $DIR/$tfile O_tSc &
442 # give multiop a chance to open
448 wait $pid || return 1
449 [ -e $DIR/$tfile ] && return 2
452 run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
456 multiop $DIR/$tfile O_tSc &
458 # give multiop a chance to open
461 touch $DIR/g11 || return 1
465 wait $pid || return 2
466 [ -e $DIR/$tfile ] && return 3
467 touch $DIR/h11 || return 4
470 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
473 multiop $DIR/$tfile O_tSc &
475 # give multiop a chance to open
483 wait $pid || return 1
484 [ -e $DIR/$tfile ] && return 2
487 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
490 multiop $DIR/$tfile O_tSc &
492 # give multiop a chance to open
497 touch $DIR/g11 || return 1
501 wait $pid || return 2
502 [ -e $DIR/$tfile ] && return 3
503 touch $DIR/h11 || return 4
506 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
509 multiop $DIR/$tfile O_tSc &
511 # give multiop a chance to open
518 wait $pid || return 1
519 [ -e $DIR/$tfile ] && return 2
522 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
525 multiop $DIR/$tfile O_tSc &
527 # give multiop a chance to open
534 wait $pid || return 1
535 [ -e $DIR/$tfile ] && return 2
538 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
542 multiop $DIR/$tfile-1 O_tSc &
544 multiop $DIR/$tfile-2 O_tSc &
546 # give multiop a chance to open
551 wait $pid2 || return 1
555 wait $pid1 || return 2
556 [ -e $DIR/$tfile-1 ] && return 3
557 [ -e $DIR/$tfile-2 ] && return 4
560 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
564 multiop $DIR/$tfile-1 O_tSc &
566 multiop $DIR/$tfile-2 O_tSc &
568 # give multiop a chance to open
575 wait $pid1 || return 1
577 wait $pid2 || return 2
578 [ -e $DIR/$tfile-1 ] && return 3
579 [ -e $DIR/$tfile-2 ] && return 4
582 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
585 multiop $DIR/$tfile-1 O_tSc &
587 multiop $DIR/$tfile-2 O_tSc &
589 # give multiop a chance to open
595 wait $pid2 || return 1
599 wait $pid1 || return 2
600 [ -e $DIR/$tfile-1 ] && return 3
601 [ -e $DIR/$tfile-2 ] && return 4
604 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
607 multiop $DIR/$tfile-1 O_tSc &
609 multiop $DIR/$tfile-2 O_tSc &
611 # give multiop a chance to open
619 wait $pid1 || return 1
621 wait $pid2 || return 2
622 [ -e $DIR/$tfile-1 ] && return 3
623 [ -e $DIR/$tfile-2 ] && return 4
626 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
629 multiop $DIR/$tfile-1 O_tSc &
631 multiop $DIR/$tfile-2 O_tSc &
633 # give multiop a chance to open
641 wait $pid1 || return 1
643 wait $pid2 || return 2
644 [ -e $DIR/$tfile-1 ] && return 3
645 [ -e $DIR/$tfile-2 ] && return 4
648 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
651 multiop $DIR/$tfile-1 O_tSc &
653 multiop $DIR/$tfile-2 O_tSc &
655 # give multiop a chance to open
663 wait $pid1 || return 1
665 wait $pid2 || return 2
666 [ -e $DIR/$tfile-1 ] && return 3
667 [ -e $DIR/$tfile-2 ] && return 4
670 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
672 # tests for bug 2104; completion without crashing is success. The close is
673 # stale, but we always return 0 for close, so the app never sees it.
675 multiop $DIR/$tfile O_c &
677 multiop $DIR/$tfile O_c &
679 # give multiop a chance to open
682 df $MOUNT || df $MOUNT || return 1
688 run_test 32 "close() notices client eviction; close() after client eviction"
690 # Abort recovery before client complete
695 # this file should be gone, because the replay was aborted
696 $CHECKSTAT -t file $DIR/$tfile && return 1
699 run_test 33 "abort recovery before client does replay"
702 multiop $DIR/$tfile O_c &
704 # give multiop a chance to open
711 [ -e $DIR/$tfile ] && return 1
715 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
717 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
721 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
722 do_facet mds "sysctl -w lustre.fail_loc=0x80000119"
727 # give a chance to remove from MDS
729 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
731 run_test 35 "test recovery from llog for unlink op"
733 # b=2432 resent cancel after replay uses wrong cookie,
734 # so don't resend cancels
738 checkstat $DIR/$tfile
741 if dmesg | grep "unknown lock cookie"; then
742 echo "cancel after replay failed"
746 run_test 36 "don't resend cancel"
749 # directory orphans can't be unlinked from PENDING directory
751 rmdir $DIR/$tfile 2>/dev/null
752 multiop $DIR/$tfile dD_c &
754 # give multiop a chance to open
759 # clear the dmesg buffer so we only see errors from this recovery
763 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
767 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
770 createmany -o $DIR/$tfile-%d 800
771 unlinkmany $DIR/$tfile-%d 0 400
774 unlinkmany $DIR/$tfile-%d 400 400
776 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
778 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
781 createmany -o $DIR/$tfile-%d 800
783 unlinkmany $DIR/$tfile-%d 0 400
785 unlinkmany $DIR/$tfile-%d 400 400
787 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
789 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
792 cat /proc/fs/lustre/osc/*/stats |
793 awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
798 $LCTL mark multiop $MOUNT/$tfile OS_c
799 multiop $MOUNT/$tfile OS_c &
801 writeme -s $MOUNT/${tfile}-2 &
805 #define OBD_FAIL_MDS_CONNECT_NET 0x117
806 do_facet mds "sysctl -w lustre.fail_loc=0x80000117"
808 stat1=`count_ost_writes`
810 stat2=`count_ost_writes`
811 echo "$stat1, $stat2"
812 if [ $stat1 -lt $stat2 ]; then
813 echo "writes continuing during recovery"
816 echo "writes not continuing during recovery, bug 2477"
819 echo "waiting for writeme $WRITE_PID"
823 echo "waiting for multiop $PID"
824 wait $PID || return 2
825 do_facet client munlink $MOUNT/$tfile || return 3
826 do_facet client munlink $MOUNT/${tfile}-2 || return 3
829 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
833 # make sure that a read to one osc doesn't try to double-unlock its page just
834 # because another osc is invalid. trigger_group_io used to mistakenly return
835 # an error if any oscs were invalid even after having successfully put rpcs
836 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
837 # the page, guarnateeing that the unlock from the RPC completion would
838 # assert on trying to unlock the unlocked page.
840 local f=$MOUNT/$tfile
841 # make sure the start of the file is ost1
842 lfs setstripe $f $((128 * 1024)) 0 0
843 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
845 # fail ost2 and read from ost1
846 local osc2_dev=`$LCTL device_list | \
847 awk '(/ost2.*client_facet/){print $4}' `
848 $LCTL --device %$osc2_dev deactivate
849 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
850 $LCTL --device %$osc2_dev activate
853 run_test 41 "read from a valid osc while other oscs are invalid"
855 # test MDS recovery after ost failure
857 blocks=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
858 createmany -o $DIR/$tfile-%d 800
860 unlinkmany $DIR/$tfile-%d 0 400
863 # osc is evicted, fs is smaller
864 blocks_after=`df $MOUNT | tail -n 1 | awk '{ print $1 }'`
865 [ $blocks_after -lt $blocks ] || return 1
866 echo wait for MDS to timeout and recover
867 sleep $((TIMEOUT * 2))
868 unlinkmany $DIR/$tfile-%d 400 400
869 $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true
871 run_test 42 "recovery after ost failure"
874 # timeout in MDS/OST recovery RPC will LBUG MDS
878 # OBD_FAIL_OST_CREATE_NET 0x204
879 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
881 df $MOUNT || return 1
883 do_facet ost "sysctl -w lustre.fail_loc=0"
887 run_test 43 "mds osc import failure during recovery; don't LBUG"
890 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
891 do_facet mds "sysctl -w lustre.fail_loc=0x80000701"
892 $LCTL --device $mdcdev recover
894 do_facet mds "sysctl -w lustre.fail_loc=0"
897 run_test 44 "race in target handle connect"
899 # Handle failed close
901 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
902 $LCTL --device $mdcdev recover
904 multiop $DIR/$tfile O_c &
908 # This will cause the CLOSE to fail before even
909 # allocating a reply buffer
910 $LCTL --device $mdcdev deactivate
914 wait $pid || return 1
916 $LCTL --device $mdcdev activate
918 $CHECKSTAT -t file $DIR/$tfile || return 2
921 run_test 45 "Handle failed close"
925 drop_reply "touch $DIR/$tfile"
927 # ironically, the previous test, 45, will cause a real forced close,
928 # so just look for one for this test
929 dmesg | grep -i "force closing client file handle for $tfile" && return 1
932 run_test 46 "Don't leak file handle after open resend (3325)"
937 # create some files to make sure precreate has been done on all
938 # OSTs. (just in case this test is run independently)
939 createmany -o $DIR/$tfile 20 || return 1
941 # OBD_FAIL_OST_CREATE_NET 0x204
943 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
944 df $MOUNT || return 2
946 # let the MDS discover the OST failure, attempt to recover, fail
948 sleep $((3 * TIMEOUT))
950 # Without 2824, this createmany would hang
951 createmany -o $DIR/$tfile 20 || return 3
952 unlinkmany $DIR/$tfile 20 || return 4
954 do_facet ost "sysctl -w lustre.fail_loc=0"
957 run_test 47 "MDS->OSC failure during precreate cleanup (2824)"
961 createmany -o $DIR/${tfile}- 100
962 $CHECKSTAT $DIR/${tfile}-99 || return 1
964 df $MOUNT || df $MOUNT || return 2
966 $CHECKSTAT $DIR/${tfile}-99 || return 3
971 unlinkmany $DIR/${tfile}- 100 || return 4
972 if dmesg | grep "back in time"; then
973 echo "server went back in time!"
978 run_test 48 "Don't lose transno when client is evicted (2525)"
980 # b=3550 - replay of unlink
983 createmany -o $DIR/$tfile-%d 400 || return 1
984 unlinkmany $DIR/$tfile-%d 0 400 || return 2
986 $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true
988 run_test 49 "re-write records to llog as written during fail"
990 equals_msg test complete, cleaning up