X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Freplay-single.sh;h=1e4e91fa9c57e8eafcd0d29c2a9c1c70e20b289e;hp=78b2c343c2ac287cda8289c0a27ff4dad5ca2a23;hb=8f6d85eebc312b64d8e8a35b0be3ae137a50a45c;hpb=f151a8e900c8fc3aa95377ae9444417975207ce1 diff --git a/lustre/tests/replay-single.sh b/lustre/tests/replay-single.sh index 78b2c34..1e4e91f 100755 --- a/lustre/tests/replay-single.sh +++ b/lustre/tests/replay-single.sh @@ -7,43 +7,54 @@ set -e # This test needs to be run on the client # SAVE_PWD=$PWD -LUSTRE=${LUSTRE:-`dirname $0`/..} +LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)} SETUP=${SETUP:-} CLEANUP=${CLEANUP:-} . $LUSTRE/tests/test-framework.sh init_test_env $@ . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} +init_logging CHECK_GRANT=${CHECK_GRANT:-"yes"} GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""} +require_dsh_mds || exit 0 # Skip these tests -# bug number: 2766 4176 -ALWAYS_EXCEPT="0b 39 $REPLAY_SINGLE_EXCEPT" +# bug number: 17466 18857 +ALWAYS_EXCEPT="61d 33a 33b $REPLAY_SINGLE_EXCEPT" -# 63 min 7 min AT AT AT AT" -[ "SLOW" = "no" ] && EXCEPT="$EXCEPT 1 2 3 4 6 6b 12 16 44 44b 65 66 67 68" +if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then + CONFIG_EXCEPTIONS="0b 42 47 61a 61c" + echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. " + echo "Except the tests: $CONFIG_EXCEPTIONS" + ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS" +fi + +# 63 min 7 min AT AT AT AT" +[ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68" build_test_filter -cleanup_and_setup_lustre +check_and_setup_lustre mkdir -p $DIR -rm -rf $DIR/${TESTSUITE}/[df][0-9]* # bug 13798 new t-f tdir staff +assert_DIR rm -rf $DIR/[df][0-9]* -test_0() { +test_0a() { # was test_0 sleep 10 mkdir $DIR/$tfile replay_barrier $SINGLEMDS fail $SINGLEMDS rmdir $DIR/$tfile } -run_test 0 "empty replay" +run_test 0a "empty replay" test_0b() { - # this test attempts to trigger a race in the precreation code, + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + # this test attempts to trigger a race in the precreation code, # and must run before any other objects are created on the filesystem fail ost1 createmany -o $DIR/$tfile 20 || return 1 @@ -55,15 +66,13 @@ seq_set_width() { local mds=$1 local width=$2 - local file=`ls /proc/fs/lustre/seq/cli-srv-$mds-mdc-*/width` - echo $width > $file + lctl set_param -n seq.cli-srv-$mds-mdc-*.width=$width } seq_get_width() { local mds=$1 - local file=`ls /proc/fs/lustre/seq/cli-srv-$mds-mdc-*/width` - cat $file + lctl get_param -n seq.cli-srv-$mds-mdc-*.width } # This test should pass for single-mds and multi-mds configs. @@ -74,15 +83,15 @@ seq_get_width() # (1) fld_create replay should happen; # # (2) fld_create replay should not return -EEXISTS, if it does -# this means sequence manager recovery code is buggy and allocated +# this means sequence manager recovery code is buggy and allocated # same sequence two times after recovery. # # multi-mds # --------- -# (1) fld_create replay may not happen, because its home MDS is +# (1) fld_create replay may not happen, because its home MDS is # MDS2 which is not involved to revovery; # -# (2) as fld_create does not happen on MDS1, it does not make any +# (2) as fld_create does not happen on MDS1, it does not make any # problem. test_0c() { local label=`mdsdevlabel 1` @@ -90,35 +99,37 @@ test_0c() { replay_barrier $SINGLEMDS local sw=`seq_get_width $label` - - # make seq manager switch to next sequence each + + # make seq manager switch to next sequence each # time as new fid is needed. seq_set_width $label 1 - - # make sure that fld has created at least one new + + # make sure that fld has created at least one new # entry on server touch $DIR/$tfile || return 2 seq_set_width $label $sw - + # fail $SINGLEMDS and start recovery, replay RPCs, etc. fail $SINGLEMDS - + # wait for recovery finish sleep 10 df $MOUNT - - # flush fld cache and dentry cache to make it lookup + + # flush fld cache and dentry cache to make it lookup # created entry instead of revalidating existent one umount $MOUNT zconf_mount `hostname` $MOUNT - - # issue lookup which should call fld lookup which - # should fail if client did not replay fld create + + # issue lookup which should call fld lookup which + # should fail if client did not replay fld create # correctly and server has no fld entry touch $DIR/$tfile || return 3 rm $DIR/$tfile || return 4 } +start_full_debug_logging run_test 0c "fld create" +stop_full_debug_logging test_1() { replay_barrier $SINGLEMDS @@ -149,21 +160,22 @@ test_2b() { run_test 2b "touch" test_3a() { + local file=$DIR/$tfile replay_barrier $SINGLEMDS - mcreate $DIR/$tfile - o_directory $DIR/$tfile + mcreate $file + openfile -f O_DIRECTORY $file fail $SINGLEMDS - $CHECKSTAT -t file $DIR/$tfile || return 2 - rm $DIR/$tfile + $CHECKSTAT -t file $file || return 2 + rm $file } run_test 3a "replay failed open(O_DIRECTORY)" test_3b() { replay_barrier $SINGLEMDS #define OBD_FAIL_MDS_OPEN_PACK | OBD_FAIL_ONCE - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000114" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000114" touch $DIR/$tfile - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0" fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile && return 2 return 0 @@ -173,9 +185,9 @@ run_test 3b "replay failed open -ENOMEM" test_3c() { replay_barrier $SINGLEMDS #define OBD_FAIL_MDS_ALLOC_OBDO | OBD_FAIL_ONCE - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000128" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000128" touch $DIR/$tfile - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0" fail $SINGLEMDS $CHECKSTAT -t file $DIR/$tfile && return 2 @@ -183,17 +195,17 @@ test_3c() { } run_test 3c "replay failed open -ENOMEM" -test_4() { +test_4a() { # was test_4 replay_barrier $SINGLEMDS for i in `seq 10`; do echo "tag-$i" > $DIR/$tfile-$i - done + done fail $SINGLEMDS for i in `seq 10`; do grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i" - done + done } -run_test 4 "|x| 10 open(O_CREAT)s" +run_test 4a "|x| 10 open(O_CREAT)s" test_4b() { replay_barrier $SINGLEMDS @@ -203,17 +215,17 @@ test_4b() { } run_test 4b "|x| rm 10 files" -# The idea is to get past the first block of precreated files on both +# The idea is to get past the first block of precreated files on both # osts, and then replay. test_5() { replay_barrier $SINGLEMDS for i in `seq 220`; do echo "tag-$i" > $DIR/$tfile-$i - done + done fail $SINGLEMDS for i in `seq 220`; do - grep -q "tag-$i" $DIR/$tfile-$i || error "f1c-$i" - done + grep -q "tag-$i" $DIR/$tfile-$i || error "$tfile-$i" + done rm -rf $DIR/$tfile-* sleep 3 # waiting for commitment of removal @@ -221,7 +233,8 @@ test_5() { run_test 5 "|x| 220 open(O_CREAT)" -test_6() { +test_6a() { # was test_6 + mkdir -p $DIR/$tdir replay_barrier $SINGLEMDS mcreate $DIR/$tdir/$tfile fail $SINGLEMDS @@ -230,17 +243,19 @@ test_6() { sleep 2 # waiting for log process thread } -run_test 6 "mkdir + contained create" +run_test 6a "mkdir + contained create" test_6b() { + mkdir -p $DIR/$tdir replay_barrier $SINGLEMDS rm -rf $DIR/$tdir fail $SINGLEMDS - $CHECKSTAT -t dir $DIR/$tdir && return 1 || true + $CHECKSTAT -t dir $DIR/$tdir && return 1 || true } run_test 6b "|X| rmdir" test_7() { + mkdir -p $DIR/$tdir replay_barrier $SINGLEMDS mcreate $DIR/$tdir/$tfile fail $SINGLEMDS @@ -254,9 +269,8 @@ test_8() { # make sure no side-effect from previous test. rm -f $DIR/$tfile replay_barrier $SINGLEMDS - multiop $DIR/$tfile mo_c & + multiop_bg_pause $DIR/$tfile mo_c || return 4 MULTIPID=$! - sleep 1 fail $SINGLEMDS ls $DIR/$tfile $CHECKSTAT -t file $DIR/$tfile || return 1 @@ -304,7 +318,7 @@ test_11() { mv $DIR/$tfile $DIR/$tfile-2 replay_barrier $SINGLEMDS echo "new" > $DIR/$tfile - grep new $DIR/$tfile + grep new $DIR/$tfile grep old $DIR/$tfile-2 fail $SINGLEMDS grep new $DIR/$tfile || return 1 @@ -313,11 +327,9 @@ test_11() { run_test 11 "create open write rename |X| create-old-name read" test_12() { - mcreate $DIR/$tfile - multiop $DIR/$tfile o_tSc & + mcreate $DIR/$tfile + multiop_bg_pause $DIR/$tfile o_tSc || return 3 pid=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile replay_barrier $SINGLEMDS kill -USR1 $pid @@ -331,13 +343,11 @@ run_test 12 "open, unlink |X| close" # 1777 - replay open after committed chmod that would make -# a regular open a failure +# a regular open a failure test_13() { - mcreate $DIR/$tfile - multiop $DIR/$tfile O_wc & + mcreate $DIR/$tfile + multiop_bg_pause $DIR/$tfile O_wc || return 3 pid=$! - # give multiop a chance to open - sleep 1 chmod 0 $DIR/$tfile $CHECKSTAT -p 0 $DIR/$tfile replay_barrier $SINGLEMDS @@ -346,15 +356,14 @@ test_13() { wait $pid || return 1 $CHECKSTAT -s 1 -p 0 $DIR/$tfile || return 2 + rm $DIR/$tfile || return 4 return 0 } run_test 13 "open chmod 0 |x| write close" test_14() { - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 4 pid=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile replay_barrier $SINGLEMDS kill -USR1 $pid || return 1 @@ -367,10 +376,8 @@ test_14() { run_test 14 "open(O_CREAT), unlink |X| close" test_15() { - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 5 pid=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile replay_barrier $SINGLEMDS touch $DIR/g11 || return 1 @@ -399,10 +406,8 @@ run_test 16 "|X| open(O_CREAT), unlink, touch new, unlink new" test_17() { replay_barrier $SINGLEMDS - multiop $DIR/$tfile O_c & + multiop_bg_pause $DIR/$tfile O_c || return 4 pid=$! - # give multiop a chance to open - sleep 1 fail $SINGLEMDS kill -USR1 $pid || return 1 wait $pid || return 2 @@ -413,10 +418,8 @@ run_test 17 "|X| open(O_CREAT), |replay| close" test_18() { replay_barrier $SINGLEMDS - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 8 pid=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile touch $DIR/$tfile-2 || return 1 echo "pid: $pid will close" @@ -446,12 +449,10 @@ test_19() { } run_test 19 "|X| mcreate, open, write, rename " -test_20() { +test_20a() { # was test_20 replay_barrier $SINGLEMDS - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 3 pid=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile fail $SINGLEMDS @@ -460,7 +461,7 @@ test_20() { [ -e $DIR/$tfile ] && return 2 return 0 } -run_test 20 "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)" +run_test 20a "|X| open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)" test_20b() { # bug 10480 BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'` @@ -474,15 +475,11 @@ test_20b() { # bug 10480 lfs getstripe $DIR/$tfile || return 1 rm -f $DIR/$tfile || return 2 # make it an orphan mds_evict_client - df -P $DIR || df -P $DIR || true # reconnect + client_up || client_up || true # reconnect fail $SINGLEMDS # start orphan recovery - df -P $DIR || df -P $DIR || true # reconnect - wait_mds_recovery_done || error "MDS recovery not done" - - # FIXME just because recovery is done doesn't mean we've finished - # orphan cleanup. Fake it with a sleep for now... - sleep 10 + wait_recovery_complete $SINGLEMDS || error "MDS recovery not done" + wait_mds_ost_sync || return 3 AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'` log "before $BEFOREUSED, after $AFTERUSED" [ $AFTERUSED -gt $((BEFOREUSED + 20)) ] && \ @@ -492,19 +489,17 @@ test_20b() { # bug 10480 run_test 20b "write, unlink, eviction, replay, (test mds_cleanup_orphans)" test_20c() { # bug 10480 - multiop $DIR/$tfile Ow_c & + multiop_bg_pause $DIR/$tfile Ow_c || return 1 pid=$! - # give multiop a chance to open - sleep 1 ls -la $DIR/$tfile mds_evict_client - - df -P $DIR || df -P $DIR || true # reconnect + client_up || client_up || true # reconnect kill -USR1 $pid - test -s $DIR/$tfile || error "File was truncated" + wait $pid || return 1 + [ -s $DIR/$tfile ] || error "File was truncated" return 0 } @@ -512,10 +507,8 @@ run_test 20c "check that client eviction does not affect file content" test_21() { replay_barrier $SINGLEMDS - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 5 pid=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile touch $DIR/g11 || return 1 @@ -529,10 +522,8 @@ test_21() { run_test 21 "|X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans)" test_22() { - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 3 pid=$! - # give multiop a chance to open - sleep 1 replay_barrier $SINGLEMDS rm -f $DIR/$tfile @@ -546,10 +537,8 @@ test_22() { run_test 22 "open(O_CREAT), |X| unlink, replay, close (test mds_cleanup_orphans)" test_23() { - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 5 pid=$! - # give multiop a chance to open - sleep 1 replay_barrier $SINGLEMDS rm -f $DIR/$tfile @@ -565,10 +554,8 @@ test_23() { run_test 23 "open(O_CREAT), |X| unlink touch new, replay, close (test mds_cleanup_orphans)" test_24() { - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 3 pid=$! - # give multiop a chance to open - sleep 1 replay_barrier $SINGLEMDS fail $SINGLEMDS @@ -581,10 +568,8 @@ test_24() { run_test 24 "open(O_CREAT), replay, unlink, close (test mds_cleanup_orphans)" test_25() { - multiop $DIR/$tfile O_tSc & + multiop_bg_pause $DIR/$tfile O_tSc || return 3 pid=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile replay_barrier $SINGLEMDS @@ -598,12 +583,10 @@ run_test 25 "open(O_CREAT), unlink, replay, close (test mds_cleanup_orphans)" test_26() { replay_barrier $SINGLEMDS - multiop $DIR/$tfile-1 O_tSc & + multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! - multiop $DIR/$tfile-2 O_tSc & + multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 kill -USR1 $pid2 @@ -620,12 +603,10 @@ run_test 26 "|X| open(O_CREAT), unlink two, close one, replay, close one (test m test_27() { replay_barrier $SINGLEMDS - multiop $DIR/$tfile-1 O_tSc & + multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! - multiop $DIR/$tfile-2 O_tSc & + multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 @@ -641,12 +622,10 @@ test_27() { run_test 27 "|X| open(O_CREAT), unlink two, replay, close two (test mds_cleanup_orphans)" test_28() { - multiop $DIR/$tfile-1 O_tSc & + multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! - multiop $DIR/$tfile-2 O_tSc & + multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! - # give multiop a chance to open - sleep 1 replay_barrier $SINGLEMDS rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 @@ -663,12 +642,10 @@ test_28() { run_test 28 "open(O_CREAT), |X| unlink two, close one, replay, close one (test mds_cleanup_orphans)" test_29() { - multiop $DIR/$tfile-1 O_tSc & + multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! - multiop $DIR/$tfile-2 O_tSc & + multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! - # give multiop a chance to open - sleep 1 replay_barrier $SINGLEMDS rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 @@ -685,12 +662,10 @@ test_29() { run_test 29 "open(O_CREAT), |X| unlink two, replay, close two (test mds_cleanup_orphans)" test_30() { - multiop $DIR/$tfile-1 O_tSc & + multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! - multiop $DIR/$tfile-2 O_tSc & + multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile-1 rm -f $DIR/$tfile-2 @@ -707,12 +682,10 @@ test_30() { run_test 30 "open(O_CREAT) two, unlink two, replay, close two (test mds_cleanup_orphans)" test_31() { - multiop $DIR/$tfile-1 O_tSc & + multiop_bg_pause $DIR/$tfile-1 O_tSc || return 5 pid1=$! - multiop $DIR/$tfile-2 O_tSc & + multiop_bg_pause $DIR/$tfile-2 O_tSc || return 6 pid2=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile-1 replay_barrier $SINGLEMDS @@ -731,35 +704,34 @@ run_test 31 "open(O_CREAT) two, unlink one, |X| unlink one, close two (test mds_ # tests for bug 2104; completion without crashing is success. The close is # stale, but we always return 0 for close, so the app never sees it. test_32() { - multiop $DIR/$tfile O_c & + multiop_bg_pause $DIR/$tfile O_c || return 2 pid1=$! - multiop $DIR/$tfile O_c & + multiop_bg_pause $DIR/$tfile O_c || return 3 pid2=$! - # give multiop a chance to open - sleep 1 mds_evict_client - df $MOUNT || sleep 1 && df $MOUNT || return 1 + client_up || client_up || return 1 kill -USR1 $pid1 kill -USR1 $pid2 - sleep 1 + wait $pid1 || return 4 + wait $pid2 || return 5 return 0 } run_test 32 "close() notices client eviction; close() after client eviction" # Abort recovery before client complete -test_33() { +test_33a() { # was test_33 replay_barrier $SINGLEMDS createmany -o $DIR/$tfile-%d 100 fail_abort $SINGLEMDS # this file should be gone, because the replay was aborted - $CHECKSTAT -t file $DIR/$tfile-* && return 3 + $CHECKSTAT -t file $DIR/$tfile-* && return 3 unlinkmany $DIR/$tfile-%d 0 100 return 0 } -run_test 33 "abort recovery before client does replay" +run_test 33a "abort recovery before client does replay" -# Stale FID sequence -test_33a() { +# Stale FID sequence bug 15962 +test_33b() { # was test_33a replay_barrier $SINGLEMDS createmany -o $DIR/$tfile-%d 10 fail_abort $SINGLEMDS @@ -769,30 +741,29 @@ test_33a() { unlinkmany $DIR/$tfile-%d 0 10 return 0 } -run_test 33a "fid shouldn't be reused after abort recovery" +run_test 33b "fid shouldn't be reused after abort recovery" test_34() { - multiop $DIR/$tfile O_c & + multiop_bg_pause $DIR/$tfile O_c || return 2 pid=$! - # give multiop a chance to open - sleep 1 rm -f $DIR/$tfile replay_barrier $SINGLEMDS fail_abort $SINGLEMDS kill -USR1 $pid + wait $pid || return 3 [ -e $DIR/$tfile ] && return 1 sync return 0 } run_test 34 "abort recovery before client does replay (test mds_cleanup_orphans)" -# bug 2278 - generate one orphan on OST, then destroy it during recovery from llog +# bug 2278 - generate one orphan on OST, then destroy it during recovery from llog test_35() { touch $DIR/$tfile #define OBD_FAIL_MDS_REINT_NET_REP 0x119 - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000119" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000119" rm -f $DIR/$tfile & sleep 1 sync @@ -811,7 +782,7 @@ test_36() { checkstat $DIR/$tfile facet_failover $SINGLEMDS cancel_lru_locks mdc - if dmesg | grep "unknown lock cookie"; then + if dmesg | grep "unknown lock cookie"; then echo "cancel after replay failed" return 1 fi @@ -822,10 +793,8 @@ run_test 36 "don't resend cancel" # directory orphans can't be unlinked from PENDING directory test_37() { rmdir $DIR/$tfile 2>/dev/null - multiop $DIR/$tfile dD_c & + multiop_bg_pause $DIR/$tfile dD_c || return 2 pid=$! - # give multiop a chance to open - sleep 1 rmdir $DIR/$tfile replay_barrier $SINGLEMDS @@ -834,10 +803,13 @@ test_37() { fail_abort $SINGLEMDS kill -USR1 $pid dmesg | grep "mds_unlink_orphan.*error .* unlinking orphan" && return 1 + wait $pid || return 3 sync return 0 } +start_full_debug_logging run_test 37 "abort recovery before client does replay (test mds_cleanup_orphans for directories)" +stop_full_debug_logging test_38() { createmany -o $DIR/$tfile-%d 800 @@ -862,12 +834,12 @@ test_39() { # bug 4176 run_test 39 "test recovery from unlink llog (test llog_gen_rec) " count_ost_writes() { - awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }' $LPROC/osc/*/stats + lctl get_param -n osc.*.stats | awk -vwrites=0 '/ost_write/ { writes += $2 } END { print writes; }' } #b=2477,2532 test_40(){ - $LCTL mark multiop $MOUNT/$tfile OS_c + $LCTL mark multiop $MOUNT/$tfile OS_c multiop $MOUNT/$tfile OS_c & PID=$! writeme -s $MOUNT/${tfile}-2 & @@ -875,13 +847,13 @@ test_40(){ sleep 1 facet_failover $SINGLEMDS #define OBD_FAIL_MDS_CONNECT_NET 0x117 - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000117" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117" kill -USR1 $PID stat1=`count_ost_writes` sleep $TIMEOUT stat2=`count_ost_writes` echo "$stat1, $stat2" - if [ $stat1 -lt $stat2 ]; then + if [ $stat1 -lt $stat2 ]; then echo "writes continuing during recovery" RC=0 else @@ -890,7 +862,7 @@ test_40(){ fi echo "waiting for writeme $WRITE_PID" kill $WRITE_PID - wait $WRITE_PID + wait $WRITE_PID echo "waiting for multiop $PID" wait $PID || return 2 @@ -910,20 +882,20 @@ run_test 40 "cause recovery in ptlrpc, ensure IO continues" # assert on trying to unlock the unlocked page. test_41() { [ $OSTCOUNT -lt 2 ] && \ - skip "skipping test 41: we don't have a second OST to test with" && \ + skip_env "skipping test 41: we don't have a second OST to test with" && \ return local f=$MOUNT/$tfile # make sure the start of the file is ost1 - lfs setstripe $f $((128 * 1024)) 0 0 + lfs setstripe $f -s $((128 * 1024)) -i 0 do_facet client dd if=/dev/zero of=$f bs=4k count=1 || return 3 cancel_lru_locks osc # fail ost2 and read from ost1 - local osc2dev=`do_facet mds "grep ${ost2_svc}-osc-MDT0000 $LPROC/devices" | awk '{print $1}'` - [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && cat $LPROC/devices && return 4 - do_facet mds $LCTL --device $osc2dev deactivate || return 1 + local osc2dev=`do_facet $SINGLEMDS "lctl get_param -n devices | grep ${ost2_svc}-osc-MDT0000" | awk '{print $1}'` + [ -z "$osc2dev" ] && echo "OST: $ost2_svc" && lctl get_param -n devices && return 4 + do_facet $SINGLEMDS $LCTL --device $osc2dev deactivate || return 1 do_facet client dd if=$f of=/dev/null bs=4k count=1 || return 3 - do_facet mds $LCTL --device $osc2dev activate || return 2 + do_facet $SINGLEMDS $LCTL --device $osc2dev activate || return 2 return 0 } run_test 41 "read from a valid osc while other oscs are invalid" @@ -935,9 +907,9 @@ test_42() { replay_barrier ost1 unlinkmany $DIR/$tfile-%d 0 400 debugsave - sysctl -w lnet.debug=-1 + lctl set_param debug=-1 facet_failover ost1 - + # osc is evicted, fs is smaller (but only with failout OSTs (bug 7287) #blocks_after=`df -P $MOUNT | tail -n 1 | awk '{ print $2 }'` #[ $blocks_after -lt $blocks ] || return 1 @@ -951,57 +923,79 @@ run_test 42 "recovery after ost failure" # timeout in MDS/OST recovery RPC will LBUG MDS test_43() { # bug 2530 + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + replay_barrier $SINGLEMDS # OBD_FAIL_OST_CREATE_NET 0x204 - do_facet ost1 "sysctl -w lustre.fail_loc=0x80000204" + do_facet ost1 "lctl set_param fail_loc=0x80000204" fail $SINGLEMDS sleep 10 - do_facet ost1 "sysctl -w lustre.fail_loc=0" + do_facet ost1 "lctl set_param fail_loc=0" return 0 } run_test 43 "mds osc import failure during recovery; don't LBUG" -test_44() { - mdcdev=`awk '/MDT0000-mdc-/ {print $1}' $LPROC/devices` - [ "$mdcdev" ] || exit 2 +test_44a() { # was test_44 + local at_max_saved=0 + + mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'` + [ "$mdcdev" ] || return 2 + [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; } + + # adaptive timeouts slow this way down + if at_is_enabled; then + at_max_saved=$(at_max_get mds) + at_max_set 40 mds + fi + for i in `seq 1 10`; do - #define OBD_FAIL_TGT_CONN_RACE 0x701 - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000701" - $LCTL --device $mdcdev recover - df $MOUNT + echo "$i of 10 ($(date +%s))" + do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service" + #define OBD_FAIL_TGT_CONN_RACE 0x701 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701" + # lctl below may fail, it is valid case + $LCTL --device $mdcdev recover + df $MOUNT done - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds return 0 } -run_test 44 "race in target handle connect" +run_test 44a "race in target handle connect" test_44b() { - mdcdev=`awk '/MDT0000-mdc-/ {print $1}' $LPROC/devices` - [ "$mdcdev" ] || exit 2 + local mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'` + [ "$mdcdev" ] || return 2 + [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; } + for i in `seq 1 10`; do - #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704 - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x80000704" - $LCTL --device $mdcdev recover - df $MOUNT + echo "$i of 10 ($(date +%s))" + do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service" + #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704" + # lctl below may fail, it is valid case + $LCTL --device $mdcdev recover + df $MOUNT done - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0" return 0 } run_test 44b "race in target handle connect" # Handle failed close test_45() { - mdcdev=`awk '/MDT0000-mdc-/ {print $1}' $LPROC/devices` - [ "$mdcdev" ] || exit 2 - $LCTL --device $mdcdev recover + mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'` + [ "$mdcdev" ] || return 2 + [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; } + + $LCTL --device $mdcdev recover || return 6 - multiop $DIR/$tfile O_c & + multiop_bg_pause $DIR/$tfile O_c || return 1 pid=$! - sleep 1 - # This will cause the CLOSE to fail before even + # This will cause the CLOSE to fail before even # allocating a reply buffer $LCTL --device $mdcdev deactivate || return 4 @@ -1029,46 +1023,49 @@ test_46() { run_test 46 "Don't leak file handle after open resend (3325)" test_47() { # bug 2824 - # create some files to make sure precreate has been done on all + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + # create some files to make sure precreate has been done on all # OSTs. (just in case this test is run independently) createmany -o $DIR/$tfile 20 || return 1 # OBD_FAIL_OST_CREATE_NET 0x204 fail ost1 - do_facet ost1 "sysctl -w lustre.fail_loc=0x80000204" - df $MOUNT || return 2 + do_facet ost1 "lctl set_param fail_loc=0x80000204" + client_up || return 2 # let the MDS discover the OST failure, attempt to recover, fail - # and recover again. + # and recover again. sleep $((3 * TIMEOUT)) - # Without 2824, this createmany would hang + # Without 2824, this createmany would hang createmany -o $DIR/$tfile 20 || return 3 unlinkmany $DIR/$tfile 20 || return 4 - do_facet ost1 "sysctl -w lustre.fail_loc=0" + do_facet ost1 "lctl set_param fail_loc=0" return 0 } run_test 47 "MDS->OSC failure during precreate cleanup (2824)" test_48() { + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + [ "$OSTCOUNT" -lt "2" ] && skip_env "$OSTCOUNT < 2 OSTs -- skipping" && return + replay_barrier $SINGLEMDS createmany -o $DIR/$tfile 20 || return 1 # OBD_FAIL_OST_EROFS 0x216 - fail $SINGLEMDS - do_facet ost1 "sysctl -w lustre.fail_loc=0x80000216" - df $MOUNT || return 2 + facet_failover $SINGLEMDS + do_facet ost1 "lctl set_param fail_loc=0x80000216" + client_up || return 2 createmany -o $DIR/$tfile 20 20 || return 2 unlinkmany $DIR/$tfile 40 || return 3 - - do_facet ost1 "sysctl -w lustre.fail_loc=0" return 0 } run_test 48 "MDS->OSC failure during precreate cleanup (2824)" test_50() { - local oscdev=`do_facet $SINGLEMDS grep ${ost1_svc}-osc-MDT0000 $LPROC/devices | awk '{print $1}'` + local oscdev=`do_facet $SINGLEMDS lctl get_param -n devices | grep ${ost1_svc}-osc-MDT0000 | awk '{print $1}'` [ "$oscdev" ] || return 1 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 2 do_facet $SINGLEMDS $LCTL --device $oscdev recover || return 3 @@ -1085,25 +1082,290 @@ test_52() { multiop $DIR/$tfile s || return 1 replay_barrier $SINGLEMDS #define OBD_FAIL_LDLM_REPLY 0x30c - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000030c" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c" fail $SINGLEMDS || return 2 - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x0" $CHECKSTAT -t file $DIR/$tfile-* && return 3 || true } run_test 52 "time out lock replay (3764)" -#b_cray 53 "|X| open request and close reply while two MDC requests in flight" +# bug 3462 - simultaneous MDC requests +test_53a() { + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + close_pid=$! + # give multiop a change to open + sleep 1 + + #define OBD_FAIL_MDS_CLOSE_NET 0x115 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115" + kill -USR1 $close_pid + cancel_lru_locks mdc # force the close + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + + mcreate $DIR/${tdir}-2/f || return 1 + + # close should still be here + [ -d /proc/$close_pid ] || return 2 + + replay_barrier_nodf $SINGLEMDS + fail $SINGLEMDS + wait $close_pid || return 3 + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5 + rm -rf $DIR/${tdir}-* +} +run_test 53a "|X| close request while two MDC requests in flight" + +test_53b() { + rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2 + + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop_bg_pause $DIR/${tdir}-1/f O_c || return 6 + close_pid=$! + + #define OBD_FAIL_MDS_REINT_NET 0x107 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107" + mcreate $DIR/${tdir}-2/f & + open_pid=$! + sleep 1 + + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + kill -USR1 $close_pid + cancel_lru_locks mdc # force the close + wait $close_pid || return 1 + # open should still be here + [ -d /proc/$open_pid ] || return 2 + + replay_barrier_nodf $SINGLEMDS + fail $SINGLEMDS + wait $open_pid || return 3 + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5 + rm -rf $DIR/${tdir}-* +} +run_test 53b "|X| open request while two MDC requests in flight" + +test_53c() { + rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2 + + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + close_pid=$! + + #define OBD_FAIL_MDS_REINT_NET 0x107 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107" + mcreate $DIR/${tdir}-2/f & + open_pid=$! + sleep 1 + + #define OBD_FAIL_MDS_CLOSE_NET 0x115 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115" + kill -USR1 $close_pid + cancel_lru_locks mdc # force the close + + #bz20647: make sure all pids are exists before failover + [ -d /proc/$close_pid ] || error "close_pid doesn't exist" + [ -d /proc/$open_pid ] || error "open_pid doesn't exists" + replay_barrier_nodf $SINGLEMDS + fail_nodf $SINGLEMDS + wait $open_pid || return 1 + sleep 2 + # close should be gone + [ -d /proc/$close_pid ] && return 2 + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4 + rm -rf $DIR/${tdir}-* +} +run_test 53c "|X| open request and close request while two MDC requests in flight" + +test_53d() { + cancel_lru_locks mdc # cleanup locks from former test cases + rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2 + + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + close_pid=$! + # give multiop a chance to open + sleep 1 + + #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b" + kill -USR1 $close_pid + cancel_lru_locks mdc # force the close + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + mcreate $DIR/${tdir}-2/f || return 1 + + # close should still be here + [ -d /proc/$close_pid ] || return 2 + fail $SINGLEMDS + wait $close_pid || return 3 + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5 + rm -rf $DIR/${tdir}-* +} +run_test 53d "|X| close reply while two MDC requests in flight" + +test_53e() { + rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2 + + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + close_pid=$! + + #define OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x119" + mcreate $DIR/${tdir}-2/f & + open_pid=$! + sleep 1 + + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + kill -USR1 $close_pid + cancel_lru_locks mdc # force the close + wait $close_pid || return 1 + # open should still be here + [ -d /proc/$open_pid ] || return 2 + + replay_barrier_nodf $SINGLEMDS + fail $SINGLEMDS + wait $open_pid || return 3 + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 4 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 5 + rm -rf $DIR/${tdir}-* +} +run_test 53e "|X| open reply while two MDC requests in flight" + +test_53f() { + rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2 + + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + close_pid=$! + + #define OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x119" + mcreate $DIR/${tdir}-2/f & + open_pid=$! + sleep 1 + + #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b" + kill -USR1 $close_pid + cancel_lru_locks mdc # force the close + + #bz20647: make sure all pids are exists before failover + [ -d /proc/$close_pid ] || error "close_pid doesn't exist" + [ -d /proc/$open_pid ] || error "open_pid doesn't exists" + replay_barrier_nodf $SINGLEMDS + fail_nodf $SINGLEMDS + wait $open_pid || return 1 + sleep 2 + # close should be gone + [ -d /proc/$close_pid ] && return 2 + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4 + rm -rf $DIR/${tdir}-* +} +run_test 53f "|X| open reply and close reply while two MDC requests in flight" + +test_53g() { + rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2 + + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + close_pid=$! + + #define OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x119" + mcreate $DIR/${tdir}-2/f & + open_pid=$! + sleep 1 + + #define OBD_FAIL_MDS_CLOSE_NET 0x115 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000115" + kill -USR1 $close_pid + cancel_lru_locks mdc # force the close + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + + #bz20647: make sure all pids are exists before failover + [ -d /proc/$close_pid ] || error "close_pid doesn't exist" + [ -d /proc/$open_pid ] || error "open_pid doesn't exists" + replay_barrier_nodf $SINGLEMDS + fail_nodf $SINGLEMDS + wait $open_pid || return 1 + sleep 2 + # close should be gone + [ -d /proc/$close_pid ] && return 2 + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4 + rm -rf $DIR/${tdir}-* +} +run_test 53g "|X| drop open reply and close request while close and open are both in flight" + +test_53h() { + rm -rf $DIR/${tdir}-1 $DIR/${tdir}-2 + + mkdir -p $DIR/${tdir}-1 + mkdir -p $DIR/${tdir}-2 + multiop $DIR/${tdir}-1/f O_c & + close_pid=$! + + #define OBD_FAIL_MDS_REINT_NET 0x107 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000107" + mcreate $DIR/${tdir}-2/f & + open_pid=$! + sleep 1 + + #define OBD_FAIL_MDS_CLOSE_NET_REP 0x13b + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013b" + kill -USR1 $close_pid + cancel_lru_locks mdc # force the close + sleep 1 + + #bz20647: make sure all pids are exists before failover + [ -d /proc/$close_pid ] || error "close_pid doesn't exist" + [ -d /proc/$open_pid ] || error "open_pid doesn't exists" + replay_barrier_nodf $SINGLEMDS + fail_nodf $SINGLEMDS + wait $open_pid || return 1 + sleep 2 + # close should be gone + [ -d /proc/$close_pid ] && return 2 + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + + $CHECKSTAT -t file $DIR/${tdir}-1/f || return 3 + $CHECKSTAT -t file $DIR/${tdir}-2/f || return 4 + rm -rf $DIR/${tdir}-* +} +run_test 53h "|X| open request and close reply while two MDC requests in flight" + #b_cray 54 "|X| open request and close reply while two MDC requests in flight" #b3761 ASSERTION(hash != 0) failed test_55() { # OBD_FAIL_MDS_OPEN_CREATE | OBD_FAIL_ONCE - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012b" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012b" touch $DIR/$tfile & # give touch a chance to run sleep 5 - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x0" rm $DIR/$tfile return 0 } @@ -1122,43 +1384,80 @@ run_test 56 "don't replay a symlink open request (3440)" #recovery one mds-ost setattr from llog test_57() { #define OBD_FAIL_MDS_OST_SETATTR 0x12c - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012c" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c" touch $DIR/$tfile replay_barrier $SINGLEMDS fail $SINGLEMDS sleep 1 $CHECKSTAT -t file $DIR/$tfile || return 1 - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x0" rm $DIR/$tfile } run_test 57 "test recovery from llog for setattr op" #recovery many mds-ost setattr from llog -test_58() { +test_58a() { + mkdir -p $DIR/$tdir #define OBD_FAIL_MDS_OST_SETATTR 0x12c - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x8000012c" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000012c" createmany -o $DIR/$tdir/$tfile-%d 2500 replay_barrier $SINGLEMDS fail $SINGLEMDS sleep 2 $CHECKSTAT -t file $DIR/$tdir/$tfile-* >/dev/null || return 1 - do_facet $SINGLEMDS "sysctl -w lustre.fail_loc=0x0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x0" unlinkmany $DIR/$tdir/$tfile-%d 2500 rmdir $DIR/$tdir } -run_test 58 "test recovery from llog for setattr op (test llog_gen_rec)" +run_test 58a "test recovery from llog for setattr op (test llog_gen_rec)" + +test_58b() { + mount_client $MOUNT2 + mkdir -p $DIR/$tdir + touch $DIR/$tdir/$tfile + replay_barrier $SINGLEMDS + setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile + fail $SINGLEMDS + VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile` + [ x$VAL = x"bar" ] || return 1 + rm -f $DIR/$tdir/$tfile + rmdir $DIR/$tdir + zconf_umount `hostname` $MOUNT2 +} +run_test 58b "test replay of setxattr op" + +test_58c() { # bug 16570 + mount_client $MOUNT2 + mkdir -p $DIR/$tdir + touch $DIR/$tdir/$tfile + drop_request "setfattr -n trusted.foo -v bar $DIR/$tdir/$tfile" || \ + return 1 + VAL=`getfattr --absolute-names --only-value -n trusted.foo $MOUNT2/$tdir/$tfile` + [ x$VAL = x"bar" ] || return 2 + drop_reint_reply "setfattr -n trusted.foo1 -v bar1 $DIR/$tdir/$tfile" || \ + return 3 + VAL=`getfattr --absolute-names --only-value -n trusted.foo1 $MOUNT2/$tdir/$tfile` + [ x$VAL = x"bar1" ] || return 4 + rm -f $DIR/$tdir/$tfile + rmdir $DIR/$tdir + zconf_umount `hostname` $MOUNT2 +} +run_test 58c "resend/reconstruct setxattr op" # log_commit_thread vs filter_destroy race used to lead to import use after free # bug 11658 test_59() { + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + mkdir -p $DIR/$tdir createmany -o $DIR/$tdir/$tfile-%d 200 sync unlinkmany $DIR/$tdir/$tfile-%d 200 #define OBD_FAIL_PTLRPC_DELAY_RECOV 0x507 - do_facet ost1 "sysctl -w lustre.fail_loc=0x507" + do_facet ost1 "lctl set_param fail_loc=0x507" fail ost1 fail $SINGLEMDS - do_facet ost1 "sysctl -w lustre.fail_loc=0x0" + do_facet ost1 "lctl set_param fail_loc=0x0" sleep 20 rmdir $DIR/$tdir } @@ -1167,16 +1466,583 @@ run_test 59 "test log_commit_thread vs filter_destroy race" # race between add unlink llog vs cat log init in post_recovery (only for b1_6) # bug 12086: should no oops and No ctxt error for this test test_60() { + mkdir -p $DIR/$tdir createmany -o $DIR/$tdir/$tfile-%d 200 replay_barrier $SINGLEMDS unlinkmany $DIR/$tdir/$tfile-%d 0 100 fail $SINGLEMDS unlinkmany $DIR/$tdir/$tfile-%d 100 100 local no_ctxt=`dmesg | grep "No ctxt"` - [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery" + [ -z "$no_ctxt" ] || error "ctxt is not initialized in recovery" } run_test 60 "test llog post recovery init vs llog unlink" +#test race llog recovery thread vs llog cleanup +test_61a() { # was test_61 + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + mkdir -p $DIR/$tdir + createmany -o $DIR/$tdir/$tfile-%d 800 + replay_barrier ost1 +# OBD_FAIL_OST_LLOG_RECOVERY_TIMEOUT 0x221 + unlinkmany $DIR/$tdir/$tfile-%d 800 + set_nodes_failloc "$(osts_nodes)" 0x80000221 + facet_failover ost1 + sleep 10 + fail ost1 + sleep 30 + set_nodes_failloc "$(osts_nodes)" 0x0 + + $CHECKSTAT -t file $DIR/$tdir/$tfile-* && return 1 + rmdir $DIR/$tdir +} +run_test 61a "test race llog recovery vs llog cleanup" + +#test race mds llog sync vs llog cleanup +test_61b() { +# OBD_FAIL_MDS_LLOG_SYNC_TIMEOUT 0x13a + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000013a" + facet_failover $SINGLEMDS + sleep 10 + fail $SINGLEMDS + do_facet client dd if=/dev/zero of=$DIR/$tfile bs=4k count=1 || return 1 +} +run_test 61b "test race mds llog sync vs llog cleanup" + +#test race cancel cookie cb vs llog cleanup +test_61c() { + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + +# OBD_FAIL_OST_CANCEL_COOKIE_TIMEOUT 0x222 + touch $DIR/$tfile + set_nodes_failloc "$(osts_nodes)" 0x80000222 + rm $DIR/$tfile + sleep 10 + fail ost1 + set_nodes_failloc "$(osts_nodes)" 0x0 +} +run_test 61c "test race mds llog sync vs llog cleanup" + +test_61d() { # bug 16002 # bug 17466 + shutdown_facet $SINGLEMDS +#define OBD_FAIL_OBD_LLOG_SETUP 0x605 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x605" + start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS && error "mds start should have failed" + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + start $SINGLEMDS `mdsdevname 1` $MDS_MOUNT_OPTS || error "cannot restart mds" +} +run_test 61d "error in llog_setup should cleanup the llog context correctly" + +test_62() { # Bug 15756 - don't mis-drop resent replay + mkdir -p $DIR/$tdir + replay_barrier $SINGLEMDS + createmany -o $DIR/$tdir/$tfile- 25 +#define OBD_FAIL_TGT_REPLAY_DROP 0x707 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000707" + fail $SINGLEMDS + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + unlinkmany $DIR/$tdir/$tfile- 25 || return 2 + return 0 +} +run_test 62 "don't mis-drop resent replay" + +#Adaptive Timeouts (bug 3055) +AT_MAX_SET=0 + +at_cleanup () { + local var + local facet + local at_new + + echo "Cleaning up AT ..." + if [ -n "$ATOLDBASE" ]; then + local at_history=$($LCTL get_param -n at_history) + do_facet mds "lctl set_param at_history=$at_history" || true + do_facet ost1 "lctl set_param at_history=$at_history" || true + fi + + if [ $AT_MAX_SET -ne 0 ]; then + for facet in mds client ost; do + var=AT_MAX_SAVE_${facet} + echo restore AT on $facet to saved value ${!var} + at_max_set ${!var} $facet + at_new=$(at_max_get $facet) + echo Restored AT value on $facet $at_new + [ $at_new -eq ${!var} ] || \ + error "$facet : AT value was not restored SAVED ${!var} NEW $at_new" + done + fi +} + +at_start() +{ + local at_max_new=600 + + # Save at_max original values + local facet + if [ $AT_MAX_SET -eq 0 ]; then + # Suppose that all osts have the same at_max + for facet in mds client ost; do + eval AT_MAX_SAVE_${facet}=$(at_max_get $facet) + done + fi + local at_max + for facet in mds client ost; do + at_max=$(at_max_get $facet) + if [ $at_max -ne $at_max_new ]; then + echo "AT value on $facet is $at_max, set it by force temporarily to $at_max_new" + at_max_set $at_max_new $facet + AT_MAX_SET=1 + fi + done + + if [ -z "$ATOLDBASE" ]; then + ATOLDBASE=$(do_facet mds "lctl get_param -n at_history") + # speed up the timebase so we can check decreasing AT + do_facet mds "lctl set_param at_history=8" || true + do_facet ost1 "lctl set_param at_history=8" || true + + # sleep for a while to cool down, should be > 8s and also allow + # at least one ping to be sent. simply use TIMEOUT to be safe. + sleep $TIMEOUT + fi +} + +test_65a() #bug 3055 +{ + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + at_start || return 0 + $LCTL dk > /dev/null + debugsave + sysctl -w lnet.debug="+other" + # Slow down a request to the current service time, this is critical + # because previous tests may have caused this value to increase. + REQ_DELAY=`lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | + awk '/portal 12/ {print $5}'` + REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5)) + + do_facet mds lctl set_param fail_val=$((${REQ_DELAY} * 1000)) +#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a + do_facet mds sysctl -w lustre.fail_loc=0x8000050a + createmany -o $DIR/$tfile 10 > /dev/null + unlinkmany $DIR/$tfile 10 > /dev/null + # check for log message + $LCTL dk | grep "Early reply #" || error "No early reply" + debugrestore + # client should show REQ_DELAY estimates + lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal + sleep 9 + lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep portal +} +run_test 65a "AT: verify early replies" + +test_65b() #bug 3055 +{ + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + at_start || return 0 + # turn on D_ADAPTTO + debugsave + sysctl -w lnet.debug="other trace" + $LCTL dk > /dev/null + # Slow down a request to the current service time, this is critical + # because previous tests may have caused this value to increase. + REQ_DELAY=`lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | + awk '/portal 6/ {print $5}'` + REQ_DELAY=$((${REQ_DELAY} + ${REQ_DELAY} / 4 + 5)) + + do_facet ost1 lctl set_param fail_val=${REQ_DELAY} +#define OBD_FAIL_OST_BRW_PAUSE_PACK 0x224 + do_facet ost1 sysctl -w lustre.fail_loc=0x224 + + rm -f $DIR/$tfile + lfs setstripe $DIR/$tfile --index=0 --count=1 + # force some real bulk transfer + multiop $DIR/$tfile oO_CREAT:O_RDWR:O_SYNC:w4096c + + do_facet ost1 sysctl -w lustre.fail_loc=0 + # check for log message + $LCTL dk | grep "Early reply #" || error "No early reply" + debugrestore + # client should show REQ_DELAY estimates + lctl get_param -n osc.${FSNAME}-OST0000-osc-*.timeouts | grep portal +} +run_test 65b "AT: verify early replies on packed reply / bulk" + +test_66a() #bug 3055 +{ + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + at_start || return 0 + lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12" + # adjust 5s at a time so no early reply is sent (within deadline) + do_facet mds "sysctl -w lustre.fail_val=5000" +#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a + do_facet mds "sysctl -w lustre.fail_loc=0x8000050a" + createmany -o $DIR/$tfile 20 > /dev/null + unlinkmany $DIR/$tfile 20 > /dev/null + lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12" + do_facet mds "sysctl -w lustre.fail_val=10000" + do_facet mds "sysctl -w lustre.fail_loc=0x8000050a" + createmany -o $DIR/$tfile 20 > /dev/null + unlinkmany $DIR/$tfile 20 > /dev/null + lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12" + do_facet mds "sysctl -w lustre.fail_loc=0" + sleep 9 + createmany -o $DIR/$tfile 20 > /dev/null + unlinkmany $DIR/$tfile 20 > /dev/null + lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | grep "portal 12" + CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $5}') + WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000-mdc-*.timeouts | awk '/portal 12/ {print $7}') + echo "Current MDT timeout $CUR, worst $WORST" + [ $CUR -lt $WORST ] || error "Current $CUR should be less than worst $WORST" +} +run_test 66a "AT: verify MDT service time adjusts with no early replies" + +test_66b() #bug 3055 +{ + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + at_start || return 0 + ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}') + sysctl -w lustre.fail_val=$(($ORIG + 5)) +#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c + sysctl -w lustre.fail_loc=0x50c + ls $DIR/$tfile > /dev/null 2>&1 + sysctl -w lustre.fail_loc=0 + CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}') + WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}') + echo "network timeout orig $ORIG, cur $CUR, worst $WORST" + [ $WORST -gt $ORIG ] || error "Worst $WORST should be worse than orig $ORIG" +} +run_test 66b "AT: verify net latency adjusts" + +test_67a() #bug 3055 +{ + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + at_start || return 0 + CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') + # sleeping threads may drive values above this + do_facet ost1 "sysctl -w lustre.fail_val=400" +#define OBD_FAIL_PTLRPC_PAUSE_REQ 0x50a + do_facet ost1 "sysctl -w lustre.fail_loc=0x50a" + createmany -o $DIR/$tfile 20 > /dev/null + unlinkmany $DIR/$tfile 20 > /dev/null + do_facet ost1 "sysctl -w lustre.fail_loc=0" + CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') + ATTEMPTS=$(($CONN2 - $CONN1)) + echo "$ATTEMPTS osc reconnect attempts on gradual slow" + [ $ATTEMPTS -gt 0 ] && error_ignore 13721 "AT should have prevented reconnect" + return 0 +} +run_test 67a "AT: verify slow request processing doesn't induce reconnects" + +test_67b() #bug 3055 +{ + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + at_start || return 0 + CONN1=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') + + # exhaust precreations on ost1 + local OST=$(lfs osts | grep 0": " | awk '{print $2}' | sed -e 's/_UUID$//') + local mdtosc=$(get_mdtosc_proc_path $OST) + local last_id=$(do_facet mds lctl get_param -n osc.$mdtosc.prealloc_last_id) + local next_id=$(do_facet mds lctl get_param -n osc.$mdtosc.prealloc_next_id) + + mkdir -p $DIR/$tdir/${OST} + lfs setstripe $DIR/$tdir/${OST} -o 0 -c 1 || error "setstripe" + echo "Creating to objid $last_id on ost $OST..." +#define OBD_FAIL_OST_PAUSE_CREATE 0x223 + do_facet ost1 "sysctl -w lustre.fail_val=20000" + do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223" + createmany -o $DIR/$tdir/${OST}/f $next_id $((last_id - next_id + 2)) + + client_reconnect + do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts" + log "phase 2" + CONN2=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') + ATTEMPTS=$(($CONN2 - $CONN1)) + echo "$ATTEMPTS osc reconnect attempts on instant slow" + # do it again; should not timeout + do_facet ost1 "sysctl -w lustre.fail_loc=0x80000223" + cp /etc/profile $DIR/$tfile || error "cp failed" + do_facet ost1 "sysctl -w lustre.fail_loc=0" + client_reconnect + do_facet ost1 "lctl get_param -n ost.OSS.ost_create.timeouts" + CONN3=$(lctl get_param -n osc.*.stats | awk '/_connect/ {total+=$2} END {print total}') + ATTEMPTS=$(($CONN3 - $CONN2)) + echo "$ATTEMPTS osc reconnect attempts on 2nd slow" + [ $ATTEMPTS -gt 0 ] && error "AT should have prevented reconnect" + return 0 +} +run_test 67b "AT: verify instant slowdown doesn't induce reconnects" + +test_68 () #bug 13813 +{ + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + at_start || return 0 + local ldlm_enqueue_min=$(find /sys -name ldlm_enqueue_min) + [ -z "$ldlm_enqueue_min" ] && skip "missing /sys/.../ldlm_enqueue_min" && return 0 + local ldlm_enqueue_min_r=$(do_facet ost1 "find /sys -name ldlm_enqueue_min") + [ -z "$ldlm_enqueue_min_r" ] && skip "missing /sys/.../ldlm_enqueue_min in the ost1" && return 0 + local ENQ_MIN=$(cat $ldlm_enqueue_min) + local ENQ_MIN_R=$(do_facet ost1 "cat $ldlm_enqueue_min_r") + echo $TIMEOUT >> $ldlm_enqueue_min + do_facet ost1 "echo $TIMEOUT >> $ldlm_enqueue_min_r" + + rm -rf $DIR/$tdir + mkdir -p $DIR/$tdir + lfs setstripe $DIR/$tdir --index=0 --count=1 +#define OBD_FAIL_LDLM_PAUSE_CANCEL 0x312 + sysctl -w lustre.fail_val=$(($TIMEOUT - 1)) + sysctl -w lustre.fail_loc=0x80000312 + cp /etc/profile $DIR/$tdir/${tfile}_1 || error "1st cp failed $?" + sysctl -w lustre.fail_val=$((TIMEOUT * 5 / 4)) + sysctl -w lustre.fail_loc=0x80000312 + cp /etc/profile $DIR/$tdir/${tfile}_2 || error "2nd cp failed $?" + sysctl -w lustre.fail_loc=0 + + echo $ENQ_MIN >> $ldlm_enqueue_min + do_facet ost1 "echo $ENQ_MIN_R >> $ldlm_enqueue_min_r" + rm -rf $DIR/$tdir + return 0 +} +run_test 68 "AT: verify slowing locks" + +at_cleanup +# end of AT tests includes above lines + + +# start multi-client tests +test_70a () { + [ -z "$CLIENTS" ] && \ + { skip "Need two or more clients." && return; } + [ $CLIENTCOUNT -lt 2 ] && \ + { skip "Need two or more clients, have $CLIENTCOUNT" && return; } + + echo "mount clients $CLIENTS ..." + zconf_mount_clients $CLIENTS $DIR + + local clients=${CLIENTS//,/ } + echo "Write/read files on $DIR ; clients $CLIENTS ... " + for CLIENT in $clients; do + do_node $CLIENT dd bs=1M count=10 if=/dev/zero \ + of=$DIR/${tfile}_${CLIENT} 2>/dev/null || \ + error "dd failed on $CLIENT" + done + + local prev_client=$(echo $clients | sed 's/^.* \(.\+\)$/\1/') + for C in ${CLIENTS//,/ }; do + do_node $prev_client dd if=$DIR/${tfile}_${C} of=/dev/null 2>/dev/null || \ + error "dd if=$DIR/${tfile}_${C} failed on $prev_client" + prev_client=$C + done + + ls $DIR +} +run_test 70a "check multi client t-f" + +test_70b () { + local clients=${CLIENTS:-$HOSTNAME} + + zconf_mount_clients $clients $DIR + + local duration=300 + [ "$SLOW" = "no" ] && duration=60 + local cmd="rundbench 1 -t $duration" + local PID="" + do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \ + PATH=:$PATH:$LUSTRE/utils:$LUSTRE/tests/:$DBENCH_LIB \ + DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \ + LCTL=$LCTL $cmd" & + PID=$! + log "Started rundbench load PID=$PID ..." + ELAPSED=0 + NUM_FAILOVERS=0 + START_TS=$(date +%s) + CURRENT_TS=$START_TS + while [ $ELAPSED -lt $duration ]; do + sleep 1 + replay_barrier $SINGLEMDS + sleep 1 # give clients a time to do operations + # Increment the number of failovers + NUM_FAILOVERS=$((NUM_FAILOVERS+1)) + log "$TESTNAME fail mds1 $NUM_FAILOVERS times" + fail $SINGLEMDS + CURRENT_TS=$(date +%s) + ELAPSED=$((CURRENT_TS - START_TS)) + done + wait $PID || error "rundbench load on $CLIENTS failed!" +} +run_test 70b "mds recovery; $CLIENTCOUNT clients" +# end multi-client tests + +test_73a() { + multiop_bg_pause $DIR/$tfile O_tSc || return 3 + pid=$! + rm -f $DIR/$tfile + + replay_barrier $SINGLEMDS +#define OBD_FAIL_LDLM_ENQUEUE 0x302 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000302" + fail $SINGLEMDS + kill -USR1 $pid + wait $pid || return 1 + [ -e $DIR/$tfile ] && return 2 + return 0 +} +run_test 73a "open(O_CREAT), unlink, replay, reconnect before open replay , close" + +test_73b() { + multiop_bg_pause $DIR/$tfile O_tSc || return 3 + pid=$! + rm -f $DIR/$tfile + + replay_barrier $SINGLEMDS +#define OBD_FAIL_LDLM_REPLY 0x30c + do_facet $SINGLEMDS "lctl set_param fail_loc=0x8000030c" + fail $SINGLEMDS + kill -USR1 $pid + wait $pid || return 1 + [ -e $DIR/$tfile ] && return 2 + return 0 +} +run_test 73b "open(O_CREAT), unlink, replay, reconnect at open_replay reply, close" + +test_73c() { + multiop_bg_pause $DIR/$tfile O_tSc || return 3 + pid=$! + rm -f $DIR/$tfile + + replay_barrier $SINGLEMDS +#define OBD_FAIL_TGT_LAST_REPLAY 0x710 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000710" + fail $SINGLEMDS + kill -USR1 $pid + wait $pid || return 1 + [ -e $DIR/$tfile ] && return 2 + return 0 +} +run_test 73c "open(O_CREAT), unlink, replay, reconnect at last_replay, close" + +# bug 18554 +test_74() { + local clients=${CLIENTS:-$HOSTNAME} + + stop ost1 + zconf_umount_clients $clients $MOUNT + facet_failover $SINGLEMDS + zconf_mount_clients $clients $MOUNT + mount_facet ost1 + touch $DIR/$tfile || return 1 + rm $DIR/$tfile || return 2 + clients_up || error "client evicted: $?" + return 0 +} +run_test 74 "Ensure applications don't fail waiting for OST recovery" + +test_80a() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + + mkdir -p $DIR/$tdir + replay_barrier mds2 + $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed" + rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed" + fail mds2 + stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!" + return 0 +} +run_test 80a "CMD: unlink cross-node dir (fail mds with inode)" + +test_80b() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + + mkdir -p $DIR/$tdir + replay_barrier mds1 + $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed" + rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed" + fail mds1 + stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!" + return 0 +} +run_test 80b "CMD: unlink cross-node dir (fail mds with name)" + +test_81a() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + + mkdir -p $DIR/$tdir + createmany -o $DIR/$tdir/f 3000 || error "createmany failed" + sleep 10 + $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed" + $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed" + replay_barrier mds1 + rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed" + fail mds1 + stat $DIR/$tdir/f1002 +} +run_test 81a "CMD: unlink cross-node file (fail mds with name)" + +test_82a() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + + local dir=$DIR/d82a + replay_barrier mds2 + mkdir $dir || error "mkdir $dir failed" + log "FAILOVER mds2" + fail mds2 + stat $DIR + $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed" +} +run_test 82a "CMD: mkdir cross-node dir (fail mds with inode)" + +test_82b() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + + local dir=$DIR/d82b + replay_barrier mds1 + mkdir $dir || error "mkdir $dir failed" + log "FAILOVER mds1" + fail mds1 + stat $DIR + $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed" +} +run_test 82b "CMD: mkdir cross-node dir (fail mds with name)" + +test_83a() { + mkdir -p $DIR/$tdir + createmany -o $DIR/$tdir/$tfile- 10 || return 1 +#define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000140" + unlinkmany $DIR/$tdir/$tfile- 10 || return 2 +} +run_test 83a "fail log_add during unlink recovery" + +test_83b() { + mkdir -p $DIR/$tdir + createmany -o $DIR/$tdir/$tfile- 10 || return 1 + replay_barrier $SINGLEMDS + unlinkmany $DIR/$tdir/$tfile- 10 || return 2 +#define OBD_FAIL_MDS_FAIL_LOV_LOG_ADD 0x140 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000140" + fail $SINGLEMDS +} +run_test 83b "fail log_add during unlink recovery" + +test_84a() { +#define OBD_FAIL_MDS_OPEN_WAIT_CREATE 0x144 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000144" + createmany -o $DIR/$tfile- 1 & + PID=$! + mds_evict_client + wait $PID + client_up || client_up || true # reconnect +} +run_test 84a "stale open during export disconnect" + equals_msg `basename $0`: test complete, cleaning up check_and_cleanup_lustre -[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true +[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true