6 # This test needs to be run on the client
9 LUSTRE=${LUSTRE:-`dirname $0`/..}
10 . $LUSTRE/tests/test-framework.sh
14 . ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
23 if [ "$MDSCOUNT" -gt 1 ]; then
25 for num in `seq $MDSCOUNT`; do
26 MDSDEV=$TMP/mds${num}-`hostname`
27 add_mds mds$num --dev $MDSDEV --size $MDSSIZE --master --lmv lmv1
29 add_lov_to_lmv lov1 lmv1 --stripe_sz $STRIPE_BYTES \
30 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
31 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
32 add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
33 add_client client --lmv lmv1 --lov lov1 --path $MOUNT
35 add_mds mds1 --dev $MDSDEV --size $MDSSIZE
36 if [ ! -z "$mdsfailover_HOST" ]; then
37 add_mdsfailover mds --dev $MDSDEV --size $MDSSIZE
40 add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \
41 --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
42 add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE
43 add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE
44 add_client client --mds mds1_svc --lov lov1 --path $MOUNT
51 # make sure we are using the primary MDS, so the config log will
52 # be able to clean up properly.
53 activemds=`facet_active mds`
54 if [ $activemds != "mds" ]; then
57 zconf_umount `hostname` $MOUNT
58 if [ "$MDSCOUNT" -gt 1 ]; then
59 for num in `seq $MDSCOUNT`; do
60 stop mds$num ${FORCE} $MDSLCONFARGS
63 stop mds ${FORCE} $MDSLCONFARGS
65 stop ost2 ${FORCE} --dump cleanup.log
66 stop ost ${FORCE} --dump cleanup.log
69 if [ "$ONLY" == "cleanup" ]; then
70 sysctl -w portals.debug=0 || true
75 SETUP=${SETUP:-"setup"}
76 CLEANUP=${CLEANUP:-"cleanup"}
81 start ost --reformat $OSTLCONFARGS
82 start ost2 --reformat $OSTLCONFARGS
83 [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
84 if [ "$MDSCOUNT" -gt 1 ]; then
85 for num in `seq $MDSCOUNT`; do
86 start mds$num $MDSLCONFARGS --reformat
89 start mds $MDSLCONFARGS --reformat
91 zconf_mount `hostname` $MOUNT
92 echo 0x3f0410 > /proc/sys/portals/debug
97 if [ "$ONLY" == "setup" ]; then
107 run_test 0 "empty replay"
113 $CHECKSTAT -t file $DIR/$tfile || return 1
116 run_test 1 "simple create"
122 $CHECKSTAT -t file $DIR/$tfile || return 1
128 ./mcreate $DIR/$tfile
132 $CHECKSTAT -t file $DIR/$tfile || return 1
140 o_directory $DIR/$tfile
142 $CHECKSTAT -t file $DIR/$tfile || return 2
145 run_test 3a "replay failed open(O_DIRECTORY)"
149 #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE
150 do_facet mds "sysctl -w lustre.fail_loc=0x80000114"
152 do_facet mds "sysctl -w lustre.fail_loc=0"
154 $CHECKSTAT -t file $DIR/$tfile && return 2
157 run_test 3b "replay failed open -ENOMEM"
161 #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE
162 do_facet mds "sysctl -w lustre.fail_loc=0x80000128"
164 do_facet mds "sysctl -w lustre.fail_loc=0"
167 $CHECKSTAT -t file $DIR/$tfile && return 2
170 run_test 3c "replay failed open -ENOMEM"
174 for i in `seq 10`; do
175 echo "tag-$i" > $DIR/$tfile-$i
178 for i in `seq 10`; do
179 grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i"
182 run_test 4 "|x| 10 open(O_CREAT)s"
188 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
190 run_test 4b "|x| rm 10 files"
192 # The idea is to get past the first block of precreated files on both
193 # osts, and then replay.
196 for i in `seq 220`; do
197 echo "tag-$i" > $DIR/$tfile-$i
200 for i in `seq 220`; do
201 grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i"
205 # waiting for commitment of removal
207 run_test 5 "|x| 220 open(O_CREAT)"
213 mcreate $DIR/$tdir/$tfile
215 $CHECKSTAT -t dir $DIR/$tdir || return 1
216 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
218 # waiting for log process thread
220 run_test 6 "mkdir + contained create"
226 $CHECKSTAT -t dir $DIR/$tdir && return 1 || true
228 run_test 6b "|X| rmdir"
233 mcreate $DIR/$tdir/$tfile
235 $CHECKSTAT -t dir $DIR/$tdir || return 1
236 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2
239 run_test 7 "mkdir |X| contained create"
243 multiop $DIR/$tfile mo_c &
248 $CHECKSTAT -t file $DIR/$tfile || return 1
249 kill -USR1 $MULTIPID || return 2
250 wait $MULTIPID || return 3
253 run_test 8 "creat open |X| close"
258 local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
260 local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'`
262 echo " old_inum == $old_inum, new_inum == $new_inum"
263 if [ $old_inum -eq $new_inum ] ;
265 echo " old_inum and new_inum match"
267 echo "!!!! old_inum and new_inum NOT match"
272 run_test 9 "|X| create (same inum/gen)"
277 mv $DIR/$tfile $DIR/$tfile-2
280 $CHECKSTAT $DIR/$tfile && return 1
281 $CHECKSTAT $DIR/$tfile-2 ||return 2
285 run_test 10 "create |X| rename unlink"
289 echo "old" > $DIR/$tfile
290 mv $DIR/$tfile $DIR/$tfile-2
292 echo "new" > $DIR/$tfile
294 grep old $DIR/$tfile-2
296 grep new $DIR/$tfile || return 1
297 grep old $DIR/$tfile-2 || return 2
299 run_test 11 "create open write rename |X| create-old-name read"
303 multiop $DIR/$tfile o_tSc &
305 # give multiop a chance to open
310 wait $pid || return 1
313 [ -e $DIR/$tfile ] && return 2
316 run_test 12 "open, unlink |X| close"
319 # 1777 - replay open after committed chmod that would make
320 # a regular open a failure
323 multiop $DIR/$tfile O_wc &
325 # give multiop a chance to open
328 $CHECKSTAT -p 0 $DIR/$tfile
332 wait $pid || return 1
334 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2
337 run_test 13 "open chmod 0 |x| write close"
340 multiop $DIR/$tfile O_tSc &
342 # give multiop a chance to open
346 kill -USR1 $pid || return 1
347 wait $pid || return 2
350 [ -e $DIR/$tfile ] && return 3
353 run_test 14 "open(O_CREAT), unlink |X| close"
356 multiop $DIR/$tfile O_tSc &
358 # give multiop a chance to open
362 touch $DIR/g11 || return 1
364 wait $pid || return 2
367 [ -e $DIR/$tfile ] && return 3
368 touch $DIR/h11 || return 4
371 run_test 15 "open(O_CREAT), unlink |X| touch new, close"
378 mcreate $DIR/$tfile-2
380 [ -e $DIR/$tfile ] && return 1
381 [ -e $DIR/$tfile-2 ] || return 2
382 munlink $DIR/$tfile-2 || return 3
384 run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new"
388 multiop $DIR/$tfile O_c &
390 # give multiop a chance to open
393 kill -USR1 $pid || return 1
394 wait $pid || return 2
395 $CHECKSTAT -t file $DIR/$tfile || return 3
398 run_test 17 "|X| open(O_CREAT), |replay| close"
402 multiop $DIR/$tfile O_tSc &
404 # give multiop a chance to open
407 touch $DIR/$tfile-2 || return 1
409 wait $pid || return 2
412 [ -e $DIR/$tfile ] && return 3
413 [ -e $DIR/$tfile-2 ] || return 4
414 # this touch frequently fails
415 touch $DIR/$tfile-3 || return 5
416 munlink $DIR/$tfile-2 || return 6
417 munlink $DIR/$tfile-3 || return 7
420 run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink"
422 # bug 1855 (a simpler form of test_11 above)
426 echo "old" > $DIR/$tfile
427 mv $DIR/$tfile $DIR/$tfile-2
428 grep old $DIR/$tfile-2
430 grep old $DIR/$tfile-2 || return 2
432 run_test 19 "|X| mcreate, open, write, rename "
436 multiop $DIR/$tfile O_tSc &
438 # give multiop a chance to open
444 wait $pid || return 1
445 [ -e $DIR/$tfile ] && return 2
448 run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
452 multiop $DIR/$tfile O_tSc &
454 # give multiop a chance to open
457 touch $DIR/g11 || return 1
461 wait $pid || return 2
462 [ -e $DIR/$tfile ] && return 3
463 touch $DIR/h11 || return 4
466 run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)"
469 multiop $DIR/$tfile O_tSc &
471 # give multiop a chance to open
479 wait $pid || return 1
480 [ -e $DIR/$tfile ] && return 2
483 run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)"
486 multiop $DIR/$tfile O_tSc &
488 # give multiop a chance to open
493 touch $DIR/g11 || return 1
497 wait $pid || return 2
498 [ -e $DIR/$tfile ] && return 3
499 touch $DIR/h11 || return 4
502 run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)"
505 multiop $DIR/$tfile O_tSc &
507 # give multiop a chance to open
514 wait $pid || return 1
515 [ -e $DIR/$tfile ] && return 2
518 run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)"
521 multiop $DIR/$tfile O_tSc &
523 # give multiop a chance to open
530 wait $pid || return 1
531 [ -e $DIR/$tfile ] && return 2
534 run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)"
538 multiop $DIR/$tfile-1 O_tSc &
540 multiop $DIR/$tfile-2 O_tSc &
542 # give multiop a chance to open
547 wait $pid2 || return 1
551 wait $pid1 || return 2
552 [ -e $DIR/$tfile-1 ] && return 3
553 [ -e $DIR/$tfile-2 ] && return 4
556 run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)"
560 multiop $DIR/$tfile-1 O_tSc &
562 multiop $DIR/$tfile-2 O_tSc &
564 # give multiop a chance to open
571 wait $pid1 || return 1
573 wait $pid2 || return 2
574 [ -e $DIR/$tfile-1 ] && return 3
575 [ -e $DIR/$tfile-2 ] && return 4
578 run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)"
581 multiop $DIR/$tfile-1 O_tSc &
583 multiop $DIR/$tfile-2 O_tSc &
585 # give multiop a chance to open
591 wait $pid2 || return 1
595 wait $pid1 || return 2
596 [ -e $DIR/$tfile-1 ] && return 3
597 [ -e $DIR/$tfile-2 ] && return 4
600 run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)"
603 multiop $DIR/$tfile-1 O_tSc &
605 multiop $DIR/$tfile-2 O_tSc &
607 # give multiop a chance to open
615 wait $pid1 || return 1
617 wait $pid2 || return 2
618 [ -e $DIR/$tfile-1 ] && return 3
619 [ -e $DIR/$tfile-2 ] && return 4
622 run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)"
625 multiop $DIR/$tfile-1 O_tSc &
627 multiop $DIR/$tfile-2 O_tSc &
629 # give multiop a chance to open
637 wait $pid1 || return 1
639 wait $pid2 || return 2
640 [ -e $DIR/$tfile-1 ] && return 3
641 [ -e $DIR/$tfile-2 ] && return 4
644 run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)"
647 multiop $DIR/$tfile-1 O_tSc &
649 multiop $DIR/$tfile-2 O_tSc &
651 # give multiop a chance to open
659 wait $pid1 || return 1
661 wait $pid2 || return 2
662 [ -e $DIR/$tfile-1 ] && return 3
663 [ -e $DIR/$tfile-2 ] && return 4
666 run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_cleanup_orphans)"
668 # tests for bug 2104; completion without crashing is success. The close is
669 # stale, but we always return 0 for close, so the app never sees it.
671 multiop $DIR/$tfile O_c &
673 multiop $DIR/$tfile O_c &
675 # give multiop a chance to open
678 df $MOUNT || df $MOUNT || return 1
684 run_test 32 "close() notices client eviction; close() after client eviction"
686 # Abort recovery before client complete
691 # this file should be gone, because the replay was aborted
692 $CHECKSTAT -t file $DIR/$tfile && return 1
695 run_test 33 "abort recovery before client does replay"
698 multiop $DIR/$tfile O_c &
700 # give multiop a chance to open
707 [ -e $DIR/$tfile ] && return 1
711 run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)"
713 # bug 2278 - generate one orphan on OST, then destroy it during recovery from llog
717 #define OBD_FAIL_MDS_REINT_NET_REP 0x119
718 do_facet mds "sysctl -w lustre.fail_loc=0x80000119"
723 # give a chance to remove from MDS
725 $CHECKSTAT -t file $DIR/$tfile && return 1 || true
727 run_test 35 "test recovery from llog for unlink op"
729 # b=2432 resent cancel after replay uses wrong cookie,
730 # so don't resend cancels
734 checkstat $DIR/$tfile
737 if dmesg | grep "unknown lock cookie"; then
738 echo "cancel after replay failed"
742 run_test 36 "don't resend cancel"
745 # directory orphans can't be unlinked from PENDING directory
747 rmdir $DIR/$tfile 2>/dev/null
748 multiop $DIR/$tfile dD_c &
750 # give multiop a chance to open
755 # clear the dmesg buffer so we only see errors from this recovery
759 dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1
763 run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)"
766 createmany -o $DIR/$tfile-%d 800
767 unlinkmany $DIR/$tfile-%d 0 400
770 unlinkmany $DIR/$tfile-%d 400 400
772 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
774 run_test 38 "test recovery from unlink llog (test llog_gen_rec) "
777 createmany -o $DIR/$tfile-%d 800
779 unlinkmany $DIR/$tfile-%d 0 400
781 unlinkmany $DIR/$tfile-%d 400 400
783 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
785 run_test 39 "test recovery from unlink llog (test llog_gen_rec) "
788 cat /proc/fs/lustre/osc/*/stats |
789 awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }'
794 $LCTL mark multiop $MOUNT/$tfile OS_c
795 multiop $MOUNT/$tfile OS_c &
797 writeme -s $MOUNT/${tfile}-2 &
801 #define OBD_FAIL_MDS_CONNECT_NET 0x117
802 do_facet mds "sysctl -w lustre.fail_loc=0x80000117"
804 stat1=`count_ost_writes`
806 stat2=`count_ost_writes`
807 echo "$stat1, $stat2"
808 if [ $stat1 -lt $stat2 ]; then
809 echo "writes continuing during recovery"
812 echo "writes not continuing during recovery, bug 2477"
815 echo "waiting for writeme $WRITE_PID"
819 echo "waiting for multiop $PID"
820 wait $PID || return 2
821 do_facet client munlink $MOUNT/$tfile || return 3
822 do_facet client munlink $MOUNT/${tfile}-2 || return 3
825 run_test 40 "cause recovery in ptlrpc, ensure IO continues"
829 # make sure that a read to one osc doesn't try to double-unlock its page just
830 # because another osc is invalid. trigger_group_io used to mistakenly return
831 # an error if any oscs were invalid even after having successfully put rpcs
832 # on valid oscs. This was fatal if the caller was ll_readpage who unlocked
833 # the page, guarnateeing that the unlock from the RPC completion would
834 # assert on trying to unlock the unlocked page.
836 local f=$MOUNT/$tfile
837 # make sure the start of the file is ost1
838 lfs setstripe $f $((128 * 1024)) 0 0
839 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3
841 # fail ost2 and read from ost1
842 local osc2_dev=`$LCTL device_list | \
843 awk '(/ost2.*client_facet/){print $4}' `
844 $LCTL --device %$osc2_dev deactivate
845 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3
846 $LCTL --device %$osc2_dev activate
849 run_test 41 "read from a valid osc while other oscs are invalid"
851 # test MDS recovery after ost failure
853 blocks=`df $MOUNT | tail -1 | awk '{ print $1 }'`
854 createmany -o $DIR/$tfile-%d 800
856 unlinkmany $DIR/$tfile-%d 0 400
859 # osc is evicted, fs is smaller
860 blocks_after=`df $MOUNT | tail -1 | awk '{ print $1 }'`
861 [ $blocks_after -lt $blocks ] || return 1
862 echo wait for MDS to timeout and recover
863 sleep $((TIMEOUT * 2))
864 unlinkmany $DIR/$tfile-%d 400 400
865 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true
867 run_test 42 "recovery after ost failure"
870 # directory orphans can't be unlinked from PENDING directory
874 # OBD_FAIL_OST_CREATE_NET 0x204
875 do_facet ost "sysctl -w lustre.fail_loc=0x80000204"
877 df $MOUNT || return 1
879 do_facet ost "sysctl -w lustre.fail_loc=0"
883 run_test 43 "mds osc import failure during recovery; don't LBUG"
886 mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices`
887 do_facet mds "sysctl -w lustre.fail_loc=0x80000701"
888 $LCTL --device $mdcdev recover
890 do_facet mds "sysctl -w lustre.fail_loc=0"
893 run_test 44 "race in target handle connect"
895 equals_msg test complete, cleaning up