X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Freplay-dual.sh;h=d5c9baf49d53eb9a9c96cda89caefa621c6f052e;hb=c37103beb7231d1b41137ba90b25a9ca098ad167;hp=8e1063198a52e2bc666c98a3ee49c348c8a3df94;hpb=ff17cc7e0282d9b1522810e0c5d12171c4d46a2d;p=fs%2Flustre-release.git diff --git a/lustre/tests/replay-dual.sh b/lustre/tests/replay-dual.sh index 8e10631..d5c9baf 100755 --- a/lustre/tests/replay-dual.sh +++ b/lustre/tests/replay-dual.sh @@ -7,40 +7,70 @@ LUSTRE=${LUSTRE:-`dirname $0`/..} init_test_env $@ -. ${CONFIG:=$LUSTRE/tests/cfg/local.sh} +. ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh} + +# Skip these tests +# 21 - open vs. unlink out of order replay: isn't solved yet +ALWAYS_EXCEPT="21" + +SETUP=${SETUP:-"setup"} +CLEANUP=${CLEANUP:-"cleanup"} gen_config() { rm -f $XMLCONFIG - add_mds mds --dev $MDSDEV --size $MDSSIZE - if [ ! -z "$mdsfailover_HOST" ]; then - add_mdsfailover mds --dev $MDSDEV --size $MDSSIZE + if [ "$MDSCOUNT" -gt 1 ]; then + add_lmv lmv1_svc + for mds in `mds_list`; do + MDSDEV=$TMP/${mds}-`hostname` + add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc + done + add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \ + --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 + MDS=lmv1 + else + add_mds mds1 --dev $MDSDEV --size $MDSSIZE + add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \ + --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 + MDS=mds1 fi - - add_lov lov1 mds --stripe_sz $STRIPE_BYTES\ - --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 - add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE - add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE - add_client client mds --lov lov1 --path $MOUNT -} - + add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE --failover + add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE --failover + add_client client ${MDS} --lov lov1 --path $MOUNT +} build_test_filter +SETUP=${SETUP:-"setup"} +CLEANUP=${CLEANUP:-"cleanup"} + cleanup() { # make sure we are using the primary MDS, so the config log will # be able to clean up properly. - activemds=`facet_active mds` - if [ $activemds != "mds" ]; then - fail mds + activemds=`facet_active mds1` + if [ $activemds != "mds1" ]; then + fail mds1 fi - umount $MOUNT2 - umount $MOUNT + umount $MOUNT2 || true + umount $MOUNT || true rmmod llite - stop mds ${FORCE} + + # b=3941 + # In mds recovery, the mds will clear orphans in ost by + # mds_lov_clear_orphan, which will sent the request to ost and waiting for + # the reply, if we stop mds at this time, we will got the obd_refcount > 1 + # errors, because mds_lov_clear_orphan grab a export of mds, + # so the obd_refcount of mds will not be zero. So, wait a while before + # stop mds. This bug needs further work. + for mds in `mds_list`; do + sleep 5 + stop $mds ${FORCE} $MDSLCONFARGS + done stop ost2 ${FORCE} stop ost ${FORCE} --dump cleanup-dual.log + stop_lgssd + stop_lsvcgssd } if [ "$ONLY" == "cleanup" ]; then @@ -49,33 +79,43 @@ if [ "$ONLY" == "cleanup" ]; then exit fi -gen_config -start ost --reformat $OSTLCONFARGS -PINGER=`cat /proc/fs/lustre/pinger` +setup() { + gen_config -if [ "$PINGER" != "on" ]; then - echo "ERROR: Lustre must be built with --enable-pinger for replay-dual" - stop mds - exit 1 -fi + start_krb5_kdc || exit 1 + start_lsvcgssd || exit 2 + start_lgssd || exit 3 + start ost --reformat $OSTLCONFARGS + PINGER=`cat /proc/fs/lustre/pinger` -start ost2 --reformat $OSTLCONFARGS -[ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE -start mds $MDSLCONFARGS --reformat -zconf_mount `hostname` $MOUNT -zconf_mount `hostname` $MOUNT2 + if [ "$PINGER" != "on" ]; then + echo "ERROR: Lustre must be built with --enable-pinger for replay-dual" + stop ost + exit 1 + fi + + start ost2 --reformat $OSTLCONFARGS + [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE + for mds in `mds_list`; do + start $mds --reformat $MDSLCONFARGS + done + grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT + grep " $MOUNT2 " /proc/mounts || zconf_mount `hostname` $MOUNT2 -echo $TIMEOUT > /proc/sys/lustre/timeout -echo $UPCALL > /proc/sys/lustre/upcall + echo $TIMEOUT > /proc/sys/lustre/timeout + echo $UPCALL > /proc/sys/lustre/upcall +} +$SETUP [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE + test_1() { touch $MOUNT1/a - replay_barrier mds + replay_barrier mds1 touch $MOUNT2/b - fail mds + fail mds1 checkstat $MOUNT2/a || return 1 checkstat $MOUNT1/b || return 2 rm $MOUNT2/a $MOUNT1/b @@ -88,10 +128,10 @@ run_test 1 "|X| simple create" test_2() { - replay_barrier mds + replay_barrier mds1 mkdir $MOUNT1/adir - fail mds + fail mds1 checkstat $MOUNT2/adir || return 1 rmdir $MOUNT2/adir checkstat $MOUNT2/adir && return 2 @@ -101,11 +141,11 @@ test_2() { run_test 2 "|X| mkdir adir" test_3() { - replay_barrier mds + replay_barrier mds1 mkdir $MOUNT1/adir mkdir $MOUNT2/adir/bdir - fail mds + fail mds1 checkstat $MOUNT2/adir || return 1 checkstat $MOUNT1/adir/bdir || return 2 rmdir $MOUNT2/adir/bdir $MOUNT1/adir @@ -118,11 +158,11 @@ run_test 3 "|X| mkdir adir, mkdir adir/bdir " test_4() { mkdir $MOUNT1/adir - replay_barrier mds + replay_barrier mds1 mkdir $MOUNT1/adir && return 1 mkdir $MOUNT2/adir/bdir - fail mds + fail mds1 checkstat $MOUNT2/adir || return 2 checkstat $MOUNT1/adir/bdir || return 3 @@ -143,11 +183,11 @@ test_5() { # give multiop a chance to open sleep 1 rm -f $MOUNT1/a - replay_barrier mds + replay_barrier mds1 kill -USR1 $pid wait $pid || return 1 - fail mds + fail mds1 [ -e $MOUNT2/a ] && return 2 return 0 } @@ -163,11 +203,11 @@ test_6() { # give multiop a chance to open sleep 1 rm -f $MOUNT1/a - replay_barrier mds + replay_barrier mds1 kill -USR1 $pid1 wait $pid1 || return 1 - fail mds + fail mds1 kill -USR1 $pid2 wait $pid2 || return 1 [ -e $MOUNT2/a ] && return 2 @@ -175,7 +215,344 @@ test_6() { } run_test 6 "open1, open2, unlink |X| close1 [fail mds] close2" +test_6b() { + mcreate $MOUNT1/a + multiop $MOUNT2/a o_c & + pid1=$! + multiop $MOUNT1/a o_c & + pid2=$! + # give multiop a chance to open + sleep 1 + rm -f $MOUNT1/a + replay_barrier mds1 + kill -USR1 $pid2 + wait $pid2 || return 1 + + fail mds1 + kill -USR1 $pid1 + wait $pid1 || return 1 + [ -e $MOUNT2/a ] && return 2 + return 0 +} +run_test 6b "open1, open2, unlink |X| close2 [fail mds] close1" + +test_8() { + replay_barrier mds1 + drop_reint_reply "mcreate $MOUNT1/$tfile" || return 1 + fail mds1 + checkstat $MOUNT2/$tfile || return 2 + rm $MOUNT1/$tfile || return 3 + + return 0 +} +run_test 8 "replay of resent request" + +test_9() { + replay_barrier mds1 + mcreate $MOUNT1/$tfile-1 + mcreate $MOUNT2/$tfile-2 + # drop first reint reply + sysctl -w lustre.fail_loc=0x80000119 + fail mds1 + sysctl -w lustre.fail_loc=0 + + rm $MOUNT1/$tfile-[1,2] || return 1 + + return 0 +} +run_test 9 "resending a replayed create" + +test_10() { + mcreate $MOUNT1/$tfile-1 + replay_barrier mds1 + munlink $MOUNT1/$tfile-1 + mcreate $MOUNT2/$tfile-2 + # drop first reint reply + sysctl -w lustre.fail_loc=0x80000119 + fail mds1 + sysctl -w lustre.fail_loc=0 + + checkstat $MOUNT1/$tfile-1 && return 1 + checkstat $MOUNT1/$tfile-2 || return 2 + rm $MOUNT1/$tfile-2 + + return 0 +} +run_test 10 "resending a replayed unlink" + +test_11() { + replay_barrier mds1 + mcreate $MOUNT1/$tfile-1 + mcreate $MOUNT2/$tfile-2 + mcreate $MOUNT1/$tfile-3 + mcreate $MOUNT2/$tfile-4 + mcreate $MOUNT1/$tfile-5 + # drop all reint replies for a while + sysctl -w lustre.fail_loc=0x0119 + facet_failover mds1 + #sleep for while, let both clients reconnect and timeout + sleep $((TIMEOUT * 2)) + sysctl -w lustre.fail_loc=0 + + rm $MOUNT1/$tfile-[1-5] || return 1 + + return 0 +} +run_test 11 "both clients timeout during replay" + +test_12() { + replay_barrier mds1 + + multiop $DIR/$tfile mo_c & + MULTIPID=$! + sleep 5 + + # drop first enqueue + sysctl -w lustre.fail_loc=0x80000302 + facet_failover mds1 + df $MOUNT || return 1 + sysctl -w lustre.fail_loc=0 + + ls $DIR/$tfile + $CHECKSTAT -t file $DIR/$tfile || return 2 + kill -USR1 $MULTIPID || return 3 + wait $MULTIPID || return 4 + rm $DIR/$tfile + + return 0 +} +run_test 12 "open resend timeout" + +test_13() { + multiop $DIR/$tfile mo_c & + MULTIPID=$! + sleep 5 + + replay_barrier mds1 + + kill -USR1 $MULTIPID || return 3 + wait $MULTIPID || return 4 + + # drop close + sysctl -w lustre.fail_loc=0x80000115 + facet_failover mds1 + df $MOUNT || return 1 + sysctl -w lustre.fail_loc=0 + + ls $DIR/$tfile + $CHECKSTAT -t file $DIR/$tfile || return 2 + rm $DIR/$tfile + + return 0 +} +run_test 13 "close resend timeout" + + +test_14() { + replay_barrier mds1 + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 + createmany -o $MOUNT1/$tfile-3- 25 + umount $MOUNT2 + + facet_failover mds1 + # expect failover to fail + df $MOUNT && return 1 + sleep 1 + + # first 25 files shouuld have been + # replayed + sleep 2 + unlinkmany $MOUNT1/$tfile- 25 || return 2 + + zconf_mount `hostname` $MOUNT2 + return 0 +} +run_test 14 "timeouts waiting for lost client during replay" + +test_15() { + replay_barrier mds1 + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 + umount $MOUNT2 + + facet_failover mds1 + df $MOUNT || return 1 + sleep 1 + + unlinkmany $MOUNT1/$tfile- 25 || return 2 + + zconf_mount `hostname` $MOUNT2 + return 0 +} +run_test 15 "timeout waiting for lost client during replay, 1 client completes" +test_16() { + replay_barrier mds1 + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 + umount $MOUNT2 + + facet_failover mds1 + sleep $TIMEOUT + facet_failover mds1 + df $MOUNT || return 1 + sleep 1 + + unlinkmany $MOUNT1/$tfile- 25 || return 2 + + zconf_mount `hostname` $MOUNT2 + return 0 + +} +#run_test 16 "fail MDS during recovery (3571)" + +test_17() { + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 + + # Make sure the disconnect is lost + replay_barrier ost + umount $MOUNT2 + + echo -1 > /proc/sys/portals/debug + facet_failover ost + sleep $TIMEOUT + facet_failover ost + df $MOUNT || return 1 + sleep 1 + + unlinkmany $MOUNT1/$tfile- 25 || return 2 + + zconf_mount `hostname` $MOUNT2 + return 0 + +} +#Still not support ost fail over +#run_test 17 "fail OST during recovery (3571)" + +test_18 () { + replay_barrier mds1 + multiop $MOUNT2/$tfile O_c & + pid2=$! + multiop $MOUNT1/$tfile O_c & + pid1=$! + # give multiop a chance to open + sleep 1 + kill -USR1 $pid2 + kill -USR1 $pid1 + sleep 1 + umount $MOUNT2 + facet_failover mds1 + df || df || return 1 + zconf_mount `hostname` $MOUNT2 +} +run_test 18 "replay open, Abort recovery, don't assert (3892)" + +# cleanup with blocked enqueue fails until timer elapses (MDS busy), wait for +# itexport NOW=0 + +test_20() { # bug 3822 - evicting client with enqueued lock + mkdir -p $MOUNT1/$tdir + touch $MOUNT1/$tdir/f0 +#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b + statmany -s $MOUNT1/$tdir/f 500 & + OPENPID=$! + NOW=`date +%s` + do_facet mds1 sysctl -w lustre.fail_loc=0x8000030b # hold enqueue + sleep 1 +#define OBD_FAIL_LDLM_BL_CALLBACK 0x305 + do_facet client sysctl -w lustre.fail_loc=0x80000305 # drop cb, evict + cancel_lru_locks MDC + usleep 500 # wait to ensure first client is one that will be evicted + openfile -f O_RDONLY $MOUNT2/$tdir/f0 + wait $OPENPID + dmesg | grep "entering recovery in server" && \ + error "client not evicted" || true +} +run_test 20 "ldlm_handle_enqueue succeeds on evicted export (3822)" + +# $1 - number of mountpoint +# $2 - mds +function find_dev_for_fs_and_mds() +{ + local fsuuid=`cat /proc/fs/lustre/llite/fs$1/uuid` + $LCTL device_list | awk "/mdc.*$2.*$fsuuid/ {print \$4}" +} + +test_21() { + mdc1dev=`find_dev_for_fs_and_mds 0 mds1` + mdc2dev=`find_dev_for_fs_and_mds 1 mds1` + multiop $MOUNT1/f21 O + cancel_lru_locks MDC + # generate IT_OPEN to be replayed against existing file + multiop $MOUNT1/f21 o_Sc & + pid=$! + + # IT_OPEN will be committed by the failover time + replay_barrier mds1 + + # generate MDS_REINT_UNLINK to be replayed + rm -f $MOUNT2/f21 || return 1 + + # disable recovery on the both clients + $LCTL --device %$mdc1dev disable_recovery + $LCTL --device %$mdc2dev disable_recovery + facet_failover mds1 + + # let unlink to be replayed first + $LCTL --device %$mdc2dev enable_recovery + sleep $((TIMEOUT/2)) + + # now let open to be replaye + $LCTL --device %$mdc1dev enable_recovery + kill -USR1 $pid + wait $pid || return 2 +} +run_test 21 "open vs. unlink out of order replay" + +test_22() { # bug 6063 - AST during recovery + mdc1dev=`find_dev_for_fs_and_mds 0 mds1` + mdc2dev=`find_dev_for_fs_and_mds 1 mds1` + $LCTL --device %$mdc1dev disable_recovery + $LCTL --device %$mdc2dev disable_recovery + + replay_barrier mds1 + mkdir $MOUNT1/${tdir}-1 # client1: request to be replayed + ls $MOUNT2 # client2: take lock needed for + facet_failover mds1 + + # let's recover 2nd connection with granted UPDATE lock + $LCTL --device %$mdc2dev enable_recovery + sleep $((TIMEOUT / 2)) + + $LCTL mark "first recovered?" + LOCKS=`grep -v '^0$' /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count` + if [ "$LOCKS" != "" ]; then + echo "The lock got replayed before mkdir is replayed: $LOCKS" + echo 0 >${IMP1} + return 1 + fi + + # let's recover 1st connection with mkdir replay that needs the lock + $LCTL --device %$mdc1dev enable_recovery + sleep $TIMEOUT + $LCTL mark "second recovered?" + + LOCKS=`grep -v '^0$' /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count` + if [ "$LOCKS" != "1" ]; then + echo "The lock hasn't replayed: $LOCKS" + return 2 + fi + + return 0 +} +run_test 22 "AST during recovery" + if [ "$ONLY" != "setup" ]; then equals_msg test complete, cleaning up - cleanup + if [ $NOW ]; then + SLEEP=$((`date +%s` - $NOW)) + [ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP + fi + $CLEANUP fi