X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Freplay-dual.sh;h=d5c9baf49d53eb9a9c96cda89caefa621c6f052e;hp=73c922d48bbd17c0b3c7385f5fb051b239a41f69;hb=c37103beb7231d1b41137ba90b25a9ca098ad167;hpb=48e080ea156e4a6ad38ef6b14215787c52761f40 diff --git a/lustre/tests/replay-dual.sh b/lustre/tests/replay-dual.sh index 73c922d..d5c9baf 100755 --- a/lustre/tests/replay-dual.sh +++ b/lustre/tests/replay-dual.sh @@ -9,32 +9,36 @@ init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh} +# Skip these tests +# 21 - open vs. unlink out of order replay: isn't solved yet +ALWAYS_EXCEPT="21" + +SETUP=${SETUP:-"setup"} +CLEANUP=${CLEANUP:-"cleanup"} + gen_config() { rm -f $XMLCONFIG if [ "$MDSCOUNT" -gt 1 ]; then - add_lmv lmv1 + add_lmv lmv1_svc for mds in `mds_list`; do MDSDEV=$TMP/${mds}-`hostname` - add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1 + add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc done - add_lov_to_lmv lov1 lmv1 --stripe_sz $STRIPE_BYTES \ + add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \ --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 MDS=lmv1 else add_mds mds1 --dev $MDSDEV --size $MDSSIZE add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \ --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 - MDS=mds1_svc - + MDS=mds1 fi - add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE - add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE - add_client client --mds ${MDS} --lov lov1 --path $MOUNT + add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE --failover + add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE --failover + add_client client ${MDS} --lov lov1 --path $MOUNT } - - build_test_filter SETUP=${SETUP:-"setup"} @@ -51,11 +55,22 @@ cleanup() { umount $MOUNT2 || true umount $MOUNT || true rmmod llite + + # b=3941 + # In mds recovery, the mds will clear orphans in ost by + # mds_lov_clear_orphan, which will sent the request to ost and waiting for + # the reply, if we stop mds at this time, we will got the obd_refcount > 1 + # errors, because mds_lov_clear_orphan grab a export of mds, + # so the obd_refcount of mds will not be zero. So, wait a while before + # stop mds. This bug needs further work. for mds in `mds_list`; do + sleep 5 stop $mds ${FORCE} $MDSLCONFARGS done stop ost2 ${FORCE} stop ost ${FORCE} --dump cleanup-dual.log + stop_lgssd + stop_lsvcgssd } if [ "$ONLY" == "cleanup" ]; then @@ -66,6 +81,10 @@ fi setup() { gen_config + + start_krb5_kdc || exit 1 + start_lsvcgssd || exit 2 + start_lgssd || exit 3 start ost --reformat $OSTLCONFARGS PINGER=`cat /proc/fs/lustre/pinger` @@ -217,26 +236,6 @@ test_6b() { } run_test 6b "open1, open2, unlink |X| close2 [fail mds] close1" -test_7() { - replay_barrier mds1 - createmany -o $MOUNT1/$tfile- 25 - createmany -o $MOUNT2/$tfile-2- 1 - createmany -o $MOUNT1/$tfile-3- 25 - umount $MOUNT2 - - facet_failover mds1 - # expect failover to fail - df $MOUNT && return 1 - -# 3313 - current fix for 3313 prevents any reply here -# unlinkmany $MOUNT1/$tfile- 25 || return 2 - - zconf_mount `hostname` $MOUNT2 - return 0 -} -run_test 7 "timeouts waiting for lost client during replay" - - test_8() { replay_barrier mds1 drop_reint_reply "mcreate $MOUNT1/$tfile" || return 1 @@ -348,7 +347,90 @@ test_13() { } run_test 13 "close resend timeout" -test_20 () { + +test_14() { + replay_barrier mds1 + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 + createmany -o $MOUNT1/$tfile-3- 25 + umount $MOUNT2 + + facet_failover mds1 + # expect failover to fail + df $MOUNT && return 1 + sleep 1 + + # first 25 files shouuld have been + # replayed + sleep 2 + unlinkmany $MOUNT1/$tfile- 25 || return 2 + + zconf_mount `hostname` $MOUNT2 + return 0 +} +run_test 14 "timeouts waiting for lost client during replay" + +test_15() { + replay_barrier mds1 + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 + umount $MOUNT2 + + facet_failover mds1 + df $MOUNT || return 1 + sleep 1 + + unlinkmany $MOUNT1/$tfile- 25 || return 2 + + zconf_mount `hostname` $MOUNT2 + return 0 +} +run_test 15 "timeout waiting for lost client during replay, 1 client completes" +test_16() { + replay_barrier mds1 + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 + umount $MOUNT2 + + facet_failover mds1 + sleep $TIMEOUT + facet_failover mds1 + df $MOUNT || return 1 + sleep 1 + + unlinkmany $MOUNT1/$tfile- 25 || return 2 + + zconf_mount `hostname` $MOUNT2 + return 0 + +} +#run_test 16 "fail MDS during recovery (3571)" + +test_17() { + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 + + # Make sure the disconnect is lost + replay_barrier ost + umount $MOUNT2 + + echo -1 > /proc/sys/portals/debug + facet_failover ost + sleep $TIMEOUT + facet_failover ost + df $MOUNT || return 1 + sleep 1 + + unlinkmany $MOUNT1/$tfile- 25 || return 2 + + zconf_mount `hostname` $MOUNT2 + return 0 + +} +#Still not support ost fail over +#run_test 17 "fail OST during recovery (3571)" + +test_18 () { replay_barrier mds1 multiop $MOUNT2/$tfile O_c & pid2=$! @@ -364,9 +446,113 @@ test_20 () { df || df || return 1 zconf_mount `hostname` $MOUNT2 } -run_test 20 "replay open, Abort recovery, don't assert (3892)" +run_test 18 "replay open, Abort recovery, don't assert (3892)" + +# cleanup with blocked enqueue fails until timer elapses (MDS busy), wait for +# itexport NOW=0 + +test_20() { # bug 3822 - evicting client with enqueued lock + mkdir -p $MOUNT1/$tdir + touch $MOUNT1/$tdir/f0 +#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b + statmany -s $MOUNT1/$tdir/f 500 & + OPENPID=$! + NOW=`date +%s` + do_facet mds1 sysctl -w lustre.fail_loc=0x8000030b # hold enqueue + sleep 1 +#define OBD_FAIL_LDLM_BL_CALLBACK 0x305 + do_facet client sysctl -w lustre.fail_loc=0x80000305 # drop cb, evict + cancel_lru_locks MDC + usleep 500 # wait to ensure first client is one that will be evicted + openfile -f O_RDONLY $MOUNT2/$tdir/f0 + wait $OPENPID + dmesg | grep "entering recovery in server" && \ + error "client not evicted" || true +} +run_test 20 "ldlm_handle_enqueue succeeds on evicted export (3822)" + +# $1 - number of mountpoint +# $2 - mds +function find_dev_for_fs_and_mds() +{ + local fsuuid=`cat /proc/fs/lustre/llite/fs$1/uuid` + $LCTL device_list | awk "/mdc.*$2.*$fsuuid/ {print \$4}" +} + +test_21() { + mdc1dev=`find_dev_for_fs_and_mds 0 mds1` + mdc2dev=`find_dev_for_fs_and_mds 1 mds1` + multiop $MOUNT1/f21 O + cancel_lru_locks MDC + # generate IT_OPEN to be replayed against existing file + multiop $MOUNT1/f21 o_Sc & + pid=$! + + # IT_OPEN will be committed by the failover time + replay_barrier mds1 + + # generate MDS_REINT_UNLINK to be replayed + rm -f $MOUNT2/f21 || return 1 + + # disable recovery on the both clients + $LCTL --device %$mdc1dev disable_recovery + $LCTL --device %$mdc2dev disable_recovery + facet_failover mds1 + + # let unlink to be replayed first + $LCTL --device %$mdc2dev enable_recovery + sleep $((TIMEOUT/2)) + + # now let open to be replaye + $LCTL --device %$mdc1dev enable_recovery + kill -USR1 $pid + wait $pid || return 2 +} +run_test 21 "open vs. unlink out of order replay" + +test_22() { # bug 6063 - AST during recovery + mdc1dev=`find_dev_for_fs_and_mds 0 mds1` + mdc2dev=`find_dev_for_fs_and_mds 1 mds1` + $LCTL --device %$mdc1dev disable_recovery + $LCTL --device %$mdc2dev disable_recovery + + replay_barrier mds1 + mkdir $MOUNT1/${tdir}-1 # client1: request to be replayed + ls $MOUNT2 # client2: take lock needed for + facet_failover mds1 + + # let's recover 2nd connection with granted UPDATE lock + $LCTL --device %$mdc2dev enable_recovery + sleep $((TIMEOUT / 2)) + + $LCTL mark "first recovered?" + LOCKS=`grep -v '^0$' /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count` + if [ "$LOCKS" != "" ]; then + echo "The lock got replayed before mkdir is replayed: $LOCKS" + echo 0 >${IMP1} + return 1 + fi + + # let's recover 1st connection with mkdir replay that needs the lock + $LCTL --device %$mdc1dev enable_recovery + sleep $TIMEOUT + $LCTL mark "second recovered?" + + LOCKS=`grep -v '^0$' /proc/fs/lustre/ldlm/namespaces/mds-*/lock_count` + if [ "$LOCKS" != "1" ]; then + echo "The lock hasn't replayed: $LOCKS" + return 2 + fi + + return 0 +} +run_test 22 "AST during recovery" if [ "$ONLY" != "setup" ]; then equals_msg test complete, cleaning up + if [ $NOW ]; then + SLEEP=$((`date +%s` - $NOW)) + [ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP + fi $CLEANUP fi