X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Freplay-single.sh;h=06e2ab64051c6e1ffd8ea6cd8ffdc2e8af0861e0;hb=e52a140eb7f65a63f07dbb7e2aaff7f9e22974f2;hp=4fb7d516219f89c2c4a05ac0838ea84d64caf38e;hpb=090c677210ee2946d99c71412e4ff762bb300f4f;p=fs%2Flustre-release.git diff --git a/lustre/tests/replay-single.sh b/lustre/tests/replay-single.sh index 4fb7d51..06e2ab6 100755 --- a/lustre/tests/replay-single.sh +++ b/lustre/tests/replay-single.sh @@ -13,36 +13,41 @@ init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh} +build_test_filter + +assert_env MDSCOUNT + # Skip these tests -ALWAYS_EXCEPT="" +# 46 - The MDS will always have to force close the cached opens +ALWAYS_EXCEPT="46" + +if [ `using_krb5_sec $SECURITY` == 'n' ] ; then + ALWAYS_EXCEPT="0c $ALWAYS_EXCEPT" +fi gen_config() { rm -f $XMLCONFIG if [ "$MDSCOUNT" -gt 1 ]; then - add_lmv lmv1 - for num in `seq $MDSCOUNT`; do - MDSDEV=$TMP/mds${num}-`hostname` - add_mds mds$num --dev $MDSDEV --size $MDSSIZE --master --lmv lmv1 + add_lmv lmv1_svc + for mds in `mds_list`; do + MDSDEV=$TMP/${mds}-`hostname` + add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc done - add_lov_to_lmv lov1 lmv1 --stripe_sz $STRIPE_BYTES \ + add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \ --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 - add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE - add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE - add_client client --lmv lmv1 --lov lov1 --path $MOUNT + MDS=lmv1 else - add_mds mds1 --dev $MDSDEV --size $MDSSIZE - if [ ! -z "$mdsfailover_HOST" ]; then - add_mdsfailover mds --dev $MDSDEV --size $MDSSIZE - fi - - add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \ + add_mds $SINGLEMDS --dev $MDSDEV --size $MDSSIZE + add_lov lov1 $SINGLEMDS --stripe_sz $STRIPE_BYTES \ --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0 - add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE - add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE - add_client client --mds mds1_svc --lov lov1 --path $MOUNT + MDS=$SINGLEMDS fi + + add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE + add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE + add_client client $MDS --lov lov1 --path $MOUNT } build_test_filter @@ -50,20 +55,18 @@ build_test_filter cleanup() { # make sure we are using the primary MDS, so the config log will # be able to clean up properly. - activemds=`facet_active mds` - if [ $activemds != "mds" ]; then - fail mds + activemds=`facet_active $SINGLEMDS` + if [ $activemds != "$SINGLEMDS" ]; then + fail $SINGLEMDS fi zconf_umount `hostname` $MOUNT - if [ "$MDSCOUNT" -gt 1 ]; then - for num in `seq $MDSCOUNT`; do - stop mds$num ${FORCE} $MDSLCONFARGS - done - else - stop mds ${FORCE} $MDSLCONFARGS - fi + for mds in `mds_list`; do + stop $mds ${FORCE} $MDSLCONFARGS + done stop ost2 ${FORCE} --dump cleanup.log stop ost ${FORCE} --dump cleanup.log + stop_lgssd + stop_lsvcgssd } if [ "$ONLY" == "cleanup" ]; then @@ -78,18 +81,16 @@ CLEANUP=${CLEANUP:-"cleanup"} setup() { gen_config + start_krb5_kdc || exit 1 + start_lsvcgssd || exit 2 + start_lgssd || exit 3 start ost --reformat $OSTLCONFARGS start ost2 --reformat $OSTLCONFARGS [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE - if [ "$MDSCOUNT" -gt 1 ]; then - for num in `seq $MDSCOUNT`; do - start mds$num $MDSLCONFARGS --reformat - done - else - start mds $MDSLCONFARGS --reformat - fi - zconf_mount `hostname` $MOUNT - echo 0x3f0410 > /proc/sys/portals/debug + for mds in `mds_list`; do + start $mds --reformat $MDSLCONFARGS + done + grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT } $SETUP @@ -101,24 +102,48 @@ fi mkdir -p $DIR test_0() { - replay_barrier mds1 - fail mds1 + replay_barrier $SINGLEMDS + fail $SINGLEMDS } run_test 0 "empty replay" +test_0b() { + # this test attempts to trigger a race in the precreation code, + # and must run before any other objects are created on the filesystem + fail ost + createmany -o $DIR/$tfile 20 || return 1 + unlinkmany $DIR/$tfile 20 || return 2 +} +run_test 0b "ensure object created after recover exists. (3284)" + +test_0c() { + if [ `using_krb5_sec $SECURITY` == 'n' ] ; then + echo "Skip 0c in non-gss mode" + return 0 + fi + # drop gss error notification + replay_barrier $SINGLEMDS + fail_drop $SINGLEMDS 0x760 + + # drop gss init request + replay_barrier $SINGLEMDS + fail_drop $SINGLEMDS 0x780 +} +run_test 0c "empty replay with gss init failures" + test_1() { - replay_barrier mds2 + replay_barrier $SINGLEMDS mcreate $DIR/$tfile - fail mds2 + fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile || return 1 rm $DIR/$tfile } run_test 1 "simple create" test_2a() { - replay_barrier mds + replay_barrier $SINGLEMDS touch $DIR/$tfile - fail mds + fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile || return 1 rm $DIR/$tfile } @@ -126,43 +151,43 @@ run_test 2a "touch" test_2b() { ./mcreate $DIR/$tfile - replay_barrier mds + replay_barrier $SINGLEMDS touch $DIR/$tfile - fail mds + fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile || return 1 rm $DIR/$tfile } run_test 2b "touch" test_3a() { - replay_barrier mds + replay_barrier $SINGLEMDS mcreate $DIR/$tfile o_directory $DIR/$tfile - fail mds + fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile || return 2 rm $DIR/$tfile } run_test 3a "replay failed open(O_DIRECTORY)" test_3b() { - replay_barrier mds + replay_barrier $SINGLEMDS #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE do_facet mds "sysctl -w lustre.fail_loc=0x80000114" touch $DIR/$tfile do_facet mds "sysctl -w lustre.fail_loc=0" - fail mds + fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile && return 2 return 0 } run_test 3b "replay failed open -ENOMEM" test_3c() { - replay_barrier mds + replay_barrier $SINGLEMDS #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE do_facet mds "sysctl -w lustre.fail_loc=0x80000128" touch $DIR/$tfile do_facet mds "sysctl -w lustre.fail_loc=0" - fail mds + fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile && return 2 return 0 @@ -170,11 +195,11 @@ test_3c() { run_test 3c "replay failed open -ENOMEM" test_4() { - replay_barrier mds + replay_barrier $SINGLEMDS for i in `seq 10`; do echo "tag-$i" > $DIR/$tfile-$i done - fail mds + fail $SINGLEMDS for i in `seq 10`; do grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i" done @@ -182,9 +207,9 @@ test_4() { run_test 4 "|x| 10 open(O_CREAT)s" test_4b() { - replay_barrier mds + replay_barrier $SINGLEMDS rm -rf $DIR/$tfile-* - fail mds + fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true } run_test 4b "|x| rm 10 files" @@ -192,11 +217,11 @@ run_test 4b "|x| rm 10 files" # The idea is to get past the first block of precreated files on both # osts, and then replay. test_5() { - replay_barrier mds + replay_barrier $SINGLEMDS for i in `seq 220`; do echo "tag-$i" > $DIR/$tfile-$i done - fail mds + fail $SINGLEMDS for i in `seq 220`; do grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i" done @@ -208,10 +233,10 @@ run_test 5 "|x| 220 open(O_CREAT)" test_6() { - replay_barrier mds + replay_barrier $SINGLEMDS mkdir $DIR/$tdir mcreate $DIR/$tdir/$tfile - fail mds + fail $SINGLEMDS $CHECKSTAT -t dir $DIR/$tdir || return 1 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2 sleep 2 @@ -220,18 +245,18 @@ test_6() { run_test 6 "mkdir + contained create" test_6b() { - replay_barrier mds + replay_barrier $SINGLEMDS rm -rf $DIR/$tdir - fail mds + fail $SINGLEMDS $CHECKSTAT -t dir $DIR/$tdir && return 1 || true } run_test 6b "|X| rmdir" test_7() { mkdir $DIR/$tdir - replay_barrier mds + replay_barrier $SINGLEMDS mcreate $DIR/$tdir/$tfile - fail mds + fail $SINGLEMDS $CHECKSTAT -t dir $DIR/$tdir || return 1 $CHECKSTAT -t file $DIR/$tdir/$tfile || return 2 rm -fr $DIR/$tdir @@ -239,11 +264,11 @@ test_7() { run_test 7 "mkdir |X| contained create" test_8() { - replay_barrier mds + replay_barrier $SINGLEMDS multiop $DIR/$tfile mo_c & MULTIPID=$! sleep 1 - fail mds + fail $SINGLEMDS ls $DIR/$tfile $CHECKSTAT -t file $DIR/$tfile || return 1 kill -USR1 $MULTIPID || return 2 @@ -253,10 +278,10 @@ test_8() { run_test 8 "creat open |X| close" test_9() { - replay_barrier mds + replay_barrier $SINGLEMDS mcreate $DIR/$tfile local old_inum=`ls -i $DIR/$tfile | awk '{print $1}'` - fail mds + fail $SINGLEMDS local new_inum=`ls -i $DIR/$tfile | awk '{print $1}'` echo " old_inum == $old_inum, new_inum == $new_inum" @@ -273,12 +298,13 @@ run_test 9 "|X| create (same inum/gen)" test_10() { mcreate $DIR/$tfile - replay_barrier mds + replay_barrier $SINGLEMDS mv $DIR/$tfile $DIR/$tfile-2 rm -f $DIR/$tfile - fail mds + fail $SINGLEMDS + $CHECKSTAT $DIR/$tfile && return 1 - $CHECKSTAT $DIR/$tfile-2 ||return 2 + $CHECKSTAT $DIR/$tfile-2 || return 2 rm $DIR/$tfile-2 return 0 } @@ -288,11 +314,11 @@ test_11() { mcreate $DIR/$tfile echo "old" > $DIR/$tfile mv $DIR/$tfile $DIR/$tfile-2 - replay_barrier mds + replay_barrier $SINGLEMDS echo "new" > $DIR/$tfile grep new $DIR/$tfile grep old $DIR/$tfile-2 - fail mds + fail $SINGLEMDS grep new $DIR/$tfile || return 1 grep old $DIR/$tfile-2 || return 2 } @@ -305,11 +331,11 @@ test_12() { # give multiop a chance to open sleep 1 rm -f $DIR/$tfile - replay_barrier mds + replay_barrier $SINGLEMDS kill -USR1 $pid wait $pid || return 1 - fail mds + fail $SINGLEMDS [ -e $DIR/$tfile ] && return 2 return 0 } @@ -326,8 +352,8 @@ test_13() { sleep 1 chmod 0 $DIR/$tfile $CHECKSTAT -p 0 $DIR/$tfile - replay_barrier mds - fail mds + replay_barrier $SINGLEMDS + fail $SINGLEMDS kill -USR1 $pid wait $pid || return 1 @@ -342,11 +368,11 @@ test_14() { # give multiop a chance to open sleep 1 rm -f $DIR/$tfile - replay_barrier mds + replay_barrier $SINGLEMDS kill -USR1 $pid || return 1 wait $pid || return 2 - fail mds + fail $SINGLEMDS [ -e $DIR/$tfile ] && return 3 return 0 } @@ -358,12 +384,12 @@ test_15() { # give multiop a chance to open sleep 1 rm -f $DIR/$tfile - replay_barrier mds + replay_barrier $SINGLEMDS touch $DIR/g11 || return 1 kill -USR1 $pid wait $pid || return 2 - fail mds + fail $SINGLEMDS [ -e $DIR/$tfile ] && return 3 touch $DIR/h11 || return 4 return 0 @@ -372,11 +398,11 @@ run_test 15 "open(O_CREAT), unlink |X| touch new, close" test_16() { - replay_barrier mds + replay_barrier $SINGLEMDS mcreate $DIR/$tfile munlink $DIR/$tfile mcreate $DIR/$tfile-2 - fail mds + fail $SINGLEMDS [ -e $DIR/$tfile ] && return 1 [ -e $DIR/$tfile-2 ] || return 2 munlink $DIR/$tfile-2 || return 3 @@ -384,12 +410,12 @@ test_16() { run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new" test_17() { - replay_barrier mds + replay_barrier $SINGLEMDS multiop $DIR/$tfile O_c & pid=$! # give multiop a chance to open sleep 1 - fail mds + fail $SINGLEMDS kill -USR1 $pid || return 1 wait $pid || return 2 $CHECKSTAT -t file $DIR/$tfile || return 3 @@ -398,17 +424,18 @@ test_17() { run_test 17 "|X| open(O_CREAT), |replay| close" test_18() { - replay_barrier mds + replay_barrier $SINGLEMDS multiop $DIR/$tfile O_tSc & pid=$! # give multiop a chance to open sleep 1 rm -f $DIR/$tfile touch $DIR/$tfile-2 || return 1 + echo "pid: $pid will close" kill -USR1 $pid wait $pid || return 2 - fail mds + fail $SINGLEMDS [ -e $DIR/$tfile ] && return 3 [ -e $DIR/$tfile-2 ] || return 4 # this touch frequently fails @@ -421,25 +448,25 @@ run_test 18 "|X| open(O_CREAT), unlink, touch new, close, touch, unlink" # bug 1855 (a simpler form of test_11 above) test_19() { - replay_barrier mds + replay_barrier $SINGLEMDS mcreate $DIR/$tfile echo "old" > $DIR/$tfile mv $DIR/$tfile $DIR/$tfile-2 grep old $DIR/$tfile-2 - fail mds + fail $SINGLEMDS grep old $DIR/$tfile-2 || return 2 } run_test 19 "|X| mcreate, open, write, rename " test_20() { - replay_barrier mds + replay_barrier $SINGLEMDS multiop $DIR/$tfile O_tSc & pid=$! # give multiop a chance to open sleep 1 rm -f $DIR/$tfile - fail mds + fail $SINGLEMDS kill -USR1 $pid wait $pid || return 1 [ -e $DIR/$tfile ] && return 2 @@ -448,7 +475,7 @@ test_20() { run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)" test_21() { - replay_barrier mds + replay_barrier $SINGLEMDS multiop $DIR/$tfile O_tSc & pid=$! # give multiop a chance to open @@ -456,7 +483,7 @@ test_21() { rm -f $DIR/$tfile touch $DIR/g11 || return 1 - fail mds + fail $SINGLEMDS kill -USR1 $pid wait $pid || return 2 [ -e $DIR/$tfile ] && return 3 @@ -471,10 +498,10 @@ test_22() { # give multiop a chance to open sleep 1 - replay_barrier mds + replay_barrier $SINGLEMDS rm -f $DIR/$tfile - fail mds + fail $SINGLEMDS kill -USR1 $pid wait $pid || return 1 [ -e $DIR/$tfile ] && return 2 @@ -488,11 +515,11 @@ test_23() { # give multiop a chance to open sleep 1 - replay_barrier mds + replay_barrier $SINGLEMDS rm -f $DIR/$tfile touch $DIR/g11 || return 1 - fail mds + fail $SINGLEMDS kill -USR1 $pid wait $pid || return 2 [ -e $DIR/$tfile ] && return 3 @@ -507,8 +534,8 @@ test_24() { # give multiop a chance to open sleep 1 - replay_barrier mds - fail mds + replay_barrier $SINGLEMDS + fail $SINGLEMDS rm -f $DIR/$tfile kill -USR1 $pid wait $pid || return 1 @@ -524,8 +551,8 @@ test_25() { sleep 1 rm -f $DIR/$tfile - replay_barrier mds - fail mds + replay_barrier $SINGLEMDS + fail $SINGLEMDS kill -USR1 $pid wait $pid || return 1 [ -e $DIR/$tfile ] && return 2 @@ -534,7 +561,7 @@ test_25() { run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)" test_26() { - replay_barrier mds + replay_barrier $SINGLEMDS multiop $DIR/$tfile-1 O_tSc & pid1=$! multiop $DIR/$tfile-2 O_tSc & @@ -546,7 +573,7 @@ test_26() { kill -USR1 $pid2 wait $pid2 || return 1 - fail mds + fail $SINGLEMDS kill -USR1 $pid1 wait $pid1 || return 2 [ -e $DIR/$tfile-1 ] && return 3 @@ -556,7 +583,7 @@ test_26() { run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans)" test_27() { - replay_barrier mds + replay_barrier $SINGLEMDS multiop $DIR/$tfile-1 O_tSc & pid1=$! multiop $DIR/$tfile-2 O_tSc & @@ -566,7 +593,7 @@ test_27() { rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 - fail mds + fail $SINGLEMDS kill -USR1 $pid1 wait $pid1 || return 1 kill -USR1 $pid2 @@ -584,13 +611,13 @@ test_28() { pid2=$! # give multiop a chance to open sleep 1 - replay_barrier mds + replay_barrier $SINGLEMDS rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 kill -USR1 $pid2 wait $pid2 || return 1 - fail mds + fail $SINGLEMDS kill -USR1 $pid1 wait $pid1 || return 2 [ -e $DIR/$tfile-1 ] && return 3 @@ -606,11 +633,11 @@ test_29() { pid2=$! # give multiop a chance to open sleep 1 - replay_barrier mds + replay_barrier $SINGLEMDS rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 - fail mds + fail $SINGLEMDS kill -USR1 $pid1 wait $pid1 || return 1 kill -USR1 $pid2 @@ -631,8 +658,8 @@ test_30() { rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 - replay_barrier mds - fail mds + replay_barrier $SINGLEMDS + fail $SINGLEMDS kill -USR1 $pid1 wait $pid1 || return 1 kill -USR1 $pid2 @@ -652,9 +679,9 @@ test_31() { sleep 1 rm -f $DIR/$tfile-1 - replay_barrier mds + replay_barrier $SINGLEMDS rm -f $DIR/$tfile-2 - fail mds + fail $SINGLEMDS kill -USR1 $pid1 wait $pid1 || return 1 kill -USR1 $pid2 @@ -672,22 +699,24 @@ test_32() { pid1=$! multiop $DIR/$tfile O_c & pid2=$! - # give multiop a chance to open - sleep 1 + # give multiop a chance to open. + # 1 second is not enough, I increased it to 5, however in ideal word + # I should have to wait for open finish in more smart manner. --umka + sleep 5 mds_evict_client - df $MOUNT || df $MOUNT || return 1 + df $MOUNT || sleep 1 && df $MOUNT || return 1 kill -USR1 $pid1 kill -USR1 $pid2 - sleep 1 + sleep 5 return 0 } run_test 32 "close() notices client eviction; close() after client eviction" # Abort recovery before client complete test_33() { - replay_barrier mds + replay_barrier $SINGLEMDS touch $DIR/$tfile - fail_abort mds + fail_abort $SINGLEMDS # this file should be gone, because the replay was aborted $CHECKSTAT -t file $DIR/$tfile && return 1 return 0 @@ -701,8 +730,8 @@ test_34() { sleep 1 rm -f $DIR/$tfile - replay_barrier mds - fail_abort mds + replay_barrier $SINGLEMDS + fail_abort $SINGLEMDS kill -USR1 $pid [ -e $DIR/$tfile ] && return 1 sync @@ -721,7 +750,7 @@ test_35() { sync sleep 1 # give a chance to remove from MDS - fail_abort mds + fail_abort $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile && return 1 || true } run_test 35 "test recovery from llog for unlink op" @@ -729,10 +758,10 @@ run_test 35 "test recovery from llog for unlink op" # b=2432 resent cancel after replay uses wrong cookie, # so don't resend cancels test_36() { - replay_barrier mds + replay_barrier $SINGLEMDS touch $DIR/$tfile checkstat $DIR/$tfile - facet_failover mds + facet_failover $SINGLEMDS cancel_lru_locks MDC if dmesg | grep "unknown lock cookie"; then echo "cancel after replay failed" @@ -751,10 +780,10 @@ test_37() { sleep 1 rmdir $DIR/$tfile - replay_barrier mds + replay_barrier $SINGLEMDS # clear the dmesg buffer so we only see errors from this recovery dmesg -c >/dev/null - fail_abort mds + fail_abort $SINGLEMDS kill -USR1 $pid dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1 sync @@ -765,8 +794,8 @@ run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans test_38() { createmany -o $DIR/$tfile-%d 800 unlinkmany $DIR/$tfile-%d 0 400 - replay_barrier mds - fail mds + replay_barrier $SINGLEMDS + fail $SINGLEMDS unlinkmany $DIR/$tfile-%d 400 400 sleep 2 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true @@ -775,9 +804,9 @@ run_test 38 "test recovery from unlink llog (test llog_gen_rec) " test_39() { createmany -o $DIR/$tfile-%d 800 - replay_barrier mds + replay_barrier $SINGLEMDS unlinkmany $DIR/$tfile-%d 0 400 - fail mds + fail $SINGLEMDS unlinkmany $DIR/$tfile-%d 400 400 sleep 2 $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true @@ -797,7 +826,7 @@ test_40(){ writeme -s $MOUNT/${tfile}-2 & WRITE_PID=$! sleep 1 - facet_failover mds + facet_failover $SINGLEMDS #define OBD_FAIL_MDS_CONNECT_NET 0x117 do_facet mds "sysctl -w lustre.fail_loc=0x80000117" kill -USR1 $PID @@ -850,30 +879,30 @@ run_test 41 "read from a valid osc while other oscs are invalid" # test MDS recovery after ost failure test_42() { - blocks=`df $MOUNT | tail -1 | awk '{ print $1 }'` + blocks=`df $MOUNT | tail -n 1 | awk '{ print $1 }'` createmany -o $DIR/$tfile-%d 800 replay_barrier ost unlinkmany $DIR/$tfile-%d 0 400 facet_failover ost # osc is evicted, fs is smaller - blocks_after=`df $MOUNT | tail -1 | awk '{ print $1 }'` + blocks_after=`df $MOUNT | tail -n 1 | awk '{ print $1 }'` [ $blocks_after -lt $blocks ] || return 1 echo wait for MDS to timeout and recover sleep $((TIMEOUT * 2)) unlinkmany $DIR/$tfile-%d 400 400 - $CHECKSTAT -t file $DIR/$tfile-* && return 1 || true + $CHECKSTAT -t file $DIR/$tfile-* && return 2 || true } run_test 42 "recovery after ost failure" # b=2530 -# directory orphans can't be unlinked from PENDING directory +# timeout in MDS/OST recovery RPC will LBUG MDS test_43() { - replay_barrier mds + replay_barrier $SINGLEMDS # OBD_FAIL_OST_CREATE_NET 0x204 do_facet ost "sysctl -w lustre.fail_loc=0x80000204" - facet_failover mds + facet_failover $SINGLEMDS df $MOUNT || return 1 sleep 10 do_facet ost "sysctl -w lustre.fail_loc=0" @@ -892,6 +921,411 @@ test_44() { } run_test 44 "race in target handle connect" +# Handle failed close +test_45() { + mdcdev=`awk '/mds_svc_MNT/ {print $1}' < /proc/fs/lustre/devices` + $LCTL --device $mdcdev recover + + multiop $DIR/$tfile O_c & + pid=$! + sleep 1 + + # This will cause the CLOSE to fail before even + # allocating a reply buffer + $LCTL --device $mdcdev deactivate + + # try the close + kill -USR1 $pid + wait $pid || return 1 + + $LCTL --device $mdcdev activate + sleep 1 + + $CHECKSTAT -t file $DIR/$tfile || return 2 + return 0 +} +run_test 45 "Handle failed close" + +test_46() { + dmesg -c >/dev/null + drop_reply "touch $DIR/$tfile" + fail $SINGLEMDS + # ironically, the previous test, 45, will cause a real forced close, + # so just look for one for this test + dmesg | grep -i "force closing client file handle for $tfile" && return 1 + return 0 +} +run_test 46 "Don't leak file handle after open resend (3325)" + +# b=2824 +test_47() { + + # create some files to make sure precreate has been done on all + # OSTs. (just in case this test is run independently) + createmany -o $DIR/$tfile 20 || return 1 + + # OBD_FAIL_OST_CREATE_NET 0x204 + fail ost + do_facet ost "sysctl -w lustre.fail_loc=0x80000204" + df $MOUNT || return 2 + + # let the MDS discover the OST failure, attempt to recover, fail + # and recover again. + sleep $((3 * TIMEOUT)) + + # Without 2824, this createmany would hang + createmany -o $DIR/$tfile 20 || return 3 + unlinkmany $DIR/$tfile 20 || return 4 + + do_facet ost "sysctl -w lustre.fail_loc=0" + return 0 +} +run_test 47 "MDS->OSC failure during precreate cleanup (2824)" + + +test_48() { + createmany -o $DIR/${tfile}- 100 + $CHECKSTAT $DIR/${tfile}-99 || return 1 + mds_evict_client + df $MOUNT || echo "first df failed" + sleep 1 + df $MOUNT || return 2 + sleep 1 + $CHECKSTAT $DIR/${tfile}-99 || return 3 + + dmesg -c >/dev/null + replay_barrier $SINGLEMDS + fail $SINGLEMDS + unlinkmany $DIR/${tfile}- 100 || return 4 + if dmesg | grep "back in time"; then + echo "server went back in time!" + return 5 + fi + return 0 +} +run_test 48 "Don't lose transno when client is evicted (2525)" + +# b=3550 - replay of unlink +test_49() { + replay_barrier $SINGLEMDS + createmany -o $DIR/$tfile-%d 400 || return 1 + unlinkmany $DIR/$tfile-%d 0 400 || return 2 + fail $SINGLEMDS + $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true +} +run_test 49 "re-write records to llog as written during fail" + +test_50() { + local osc_dev=`$LCTL device_list | \ + awk '(/ost_svc_$SINGLEMDS_svc/){print $4}' ` + $LCTL --device %$osc_dev recover && $LCTL --device %$osc_dev recover + # give the mds_lov_sync threads a chance to run + sleep 5 +} +run_test 50 "Double OSC recovery, don't LASSERT (3812)" + +# bug 3462 - simultaneous MDC requests +test_51a() { + replay_barrier_nodf $SINGLEMDS + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + touch $DIR/${tdir}-2/f + multiop $DIR/${tdir}-1/f O_c & + pid=$! + # give multiop a chance to open + sleep 1 + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115" + kill -USR1 $pid + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 1 + + fail $SINGLEMDS + + wait $pid || return 2 + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3 + rm -rf $DIR/${tdir}-* +} +run_test 51a "|X| close request while two MDC requests in flight" + +test_51b() { + replay_barrier_nodf $SINGLEMDS + mkdir -p $DIR/$tdir-1 + mkdir -p $DIR/$tdir-2 + multiop $DIR/$tdir-1/f O_c & + pid=$! + + # give multiop a chance to open + # 1 second seems to be not enough, we met already such a cases + # --umka + sleep 5 + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107" + touch $DIR/${tdir}-2/f & + usleep 500 + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + kill -USR1 $pid + wait $pid || return 1 + + fail $SINGLEMDS + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3 + rm -rf $DIR/${tdir}-* +} +run_test 51b "|X| open request while two MDC requests in flight" + +test_51c() { + replay_barrier_nodf $SINGLEMDS + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + pid=$! + # give multiop a chance to open + sleep 1 + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107" + touch $DIR/${tdir}-2/f & + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115" + kill -USR1 $pid + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + fail $SINGLEMDS + + wait $pid || return 1 + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3 + rm -rf $DIR/${tdir}-* +} +run_test 51c "|X| open request and close request while two MDC requests in flight" + +test_51d() { + replay_barrier_nodf $SINGLEMDS + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + touch $DIR/${tdir}-2/f + multiop $DIR/${tdir}-1/f O_c & + pid=$! + # give multiop a chance to open + sleep 1 + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122" + kill -USR1 $pid + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + #$CHECKSTAT -t file $DIR/${tdir}-2/f || return 1 + + fail $SINGLEMDS + + wait $pid || return 2 + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3 + rm -rf $DIR/${tdir}-* +} +run_test 51d "|X| close reply while two MDC requests in flight" + +test_51e() { + replay_barrier_nodf $SINGLEMDS + mkdir -p $DIR/$tdir-1 + mkdir -p $DIR/$tdir-2 + multiop $DIR/$tdir-1/f O_c & + pid=$! + # give multiop a chance to open + sleep 1 + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119" + touch $DIR/${tdir}-2/f & + usleep 500 + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + kill -USR1 $pid + wait $pid || return 1 + + fail $SINGLEMDS + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3 + rm -rf $DIR/${tdir}-* +} +run_test 51e "|X| open reply while two MDC requests in flight" + +test_51f() { + replay_barrier_nodf $SINGLEMDS + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + pid=$! + # give multiop a chance to open + sleep 1 + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119" + touch $DIR/${tdir}-2/f & + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122" + kill -USR1 $pid + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + fail $SINGLEMDS + + wait $pid || return 1 + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3 + rm -rf $DIR/${tdir}-* +} +run_test 51f "|X| open reply and close reply while two MDC requests in flight" + +test_51g() { + replay_barrier_nodf $SINGLEMDS + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + pid=$! + # give multiop a chance to open + sleep 1 + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119" + touch $DIR/${tdir}-2/f & + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000115" + kill -USR1 $pid + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + fail $SINGLEMDS + + wait $pid || return 1 + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3 + rm -rf $DIR/${tdir}-* +} +run_test 51g "|X| open reply and close request while two MDC requests in flight" + +test_51h() { + replay_barrier_nodf $SINGLEMDS + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + pid=$! + # give multio:wp a chance to open + sleep 1 + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107" + touch $DIR/${tdir}-2/f & + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000122" + kill -USR1 $pid + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + fail $SINGLEMDS + + wait $pid || return 1 + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 2 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 3 + rm -rf $DIR/${tdir}-* +} +run_test 51h "|X| open request and close reply while two MDC requests in flight" + +# b3764 timed out lock replay +test_52() { + touch $DIR/$tfile + cancel_lru_locks MDC + + multiop $DIR/$tfile s + replay_barrier $SINGLEMDS + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000030c" + fail $SINGLEMDS + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0" + + $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true +} +run_test 52 "time out lock replay (3764)" + +test_53() { + replay_barrier_nodf $SINGLEMDS + f1=$DIR/${tfile}-1 + cat < $f1 +#!/bin/sh +true +EOF + chmod +x $f1 + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000107" + $f1 || return 1 + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + + fail $SINGLEMDS + rm -f $f1 +} +run_test 53 "|X| open request and close reply while two MDC requests in flight" + +test_54() { + replay_barrier $SINGLEMDS + createmany -o $DIR/$tfile 20 + unlinkmany $DIR/$tfile 20 + fail $SINGLEMDS +} +run_test 54 "|X| open request and close reply while two MDC requests in flight" + +#b3440 ASSERTION(rec->ur_fid2->id) failed +test_55() { + sysctl -w portals.debug=-1 portals.debug_mb=25 + ln -s foo $DIR/$tfile + replay_barrier $SINGLEMDS + #drop_reply "cat $DIR/$tfile" + fail $SINGLEMDS + sleep 10 +} +run_test 55 "don't replay a symlink open request (3440)" + +#b3761 ASSERTION(hash != 0) failed +test_56() { +# OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012b" + touch $DIR/$tfile + pid=$! + # give a chance for touch to run + sleep 5 + do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0" + wait $pid || return 1 + rm $DIR/$tfile + return 0 +} +run_test 56 "let MDS_CHECK_RESENT return the original return code instead of 0" + +#b7312 LASSERT(!IS_ERR(parent)) in reconstruct_open() +test_57() { + mkdir $DIR/$tdir || return 1 + touch $DIR/$tdir/$tfile || return 2 + multiop $DIR/$tdir/$tfile o_ & + MULTIPID=$! + sleep 1 + rm -f $DIR/$tdir/$tfile || return 3 + rm -rf $DIR/$tdir || return 4 + # drop first reint reply + sysctl -w lustre.fail_loc=0x0000030c + facet_failover $SINGLEMDS + df $MOUNT || return 1 + kill -USR1 $MULTIPID || return 5 + wait $MULTIPID || return 6 + sysctl -w lustre.fail_loc=0 +} +run_test 57 "open orphan in reconstruct_open()" + +test_58() { + capa=`cat /proc/fs/lustre/mds/mds*/capa` + if [ "$capa" == "0" ]; then + echo "skip testing - no capa enabled" + return 0 + fi + sysctl -w lustre.fail_loc=0x0000030c + rc=multiop $DIR/$tfile Ow + sysctl -w lustre.fail_loc=0 + return rc +} +run_test 58 "open+create resent should return capa" + equals_msg test complete, cleaning up $CLEANUP