X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Ftests%2Freplay-single.sh;h=07e7104a6b9405ac0d9224a1fe1c291a1c8d93f8;hp=1d09159468683435d3ee21fd937c1f04eb2bfcc0;hb=ac37e7b4d101761bbff401ed12fcf671d6b68f9c;hpb=5d0201db65d366d8905a28d103a2a9b511c22ca7 diff --git a/lustre/tests/replay-single.sh b/lustre/tests/replay-single.sh index 1d09159..07e7104 100755 --- a/lustre/tests/replay-single.sh +++ b/lustre/tests/replay-single.sh @@ -7,6 +7,7 @@ set -e # This test needs to be run on the client # SAVE_PWD=$PWD +export MULTIOP=${MULTIOP:-multiop} LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)} SETUP=${SETUP:-} CLEANUP=${CLEANUP:-} @@ -20,12 +21,20 @@ GRANT_CHECK_LIST=${GRANT_CHECK_LIST:-""} require_dsh_mds || exit 0 # Skip these tests -# bug number: 17466 18857 -ALWAYS_EXCEPT="61d 33a 33b $REPLAY_SINGLE_EXCEPT" +# bug number: 17466 18857 LU-1867 LU-1473 +ALWAYS_EXCEPT="61d 33a 33b 89 62 $REPLAY_SINGLE_EXCEPT" + +[ $(facet_fstype $SINGLEMDS) = "zfs" ] && +# bug number for skipped test: LU-951 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 73a" # 63 min 7 min AT AT AT AT" [ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 6 12 16 44a 44b 65 66 67 68" +[ $(facet_fstype $SINGLEMDS) = "zfs" ] && +# bug number for skipped test: LU-3127 + ALWAYS_EXCEPT="$ALWAYS_EXCEPT 73b" + build_test_filter check_and_setup_lustre @@ -33,7 +42,7 @@ check_and_setup_lustre mkdir -p $DIR assert_DIR -rm -rf $DIR/[df][0-9]* +rm -rf $DIR/[df][0-9]* $DIR/f.$TESTSUITE.* # LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels if [ $LINUX_VERSION_CODE -lt $(version_code 2.6.33) ]; then @@ -60,6 +69,19 @@ test_0b() { } run_test 0b "ensure object created after recover exists. (3284)" +test_0c() { + replay_barrier $SINGLEMDS + mcreate $DIR/$tfile + umount $MOUNT + facet_failover $SINGLEMDS + zconf_mount `hostname` $MOUNT || error "mount fails" + client_up || error "post-failover df failed" + # file shouldn't exist if replay-barrier works as expected + rm $DIR/$tfile && error "File exists and it shouldn't" + return 0 +} +run_test 0c "check replay-barrier" + test_0d() { replay_barrier $SINGLEMDS umount $MOUNT @@ -420,7 +442,7 @@ test_20b() { # bug 10480 wait_mds_ost_sync || return 3 AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'` log "before $BEFOREUSED, after $AFTERUSED" - [ $AFTERUSED -gt $((BEFOREUSED + 20)) ] && \ + (( $AFTERUSED > $BEFOREUSED + $(fs_log_size) )) && error "after $AFTERUSED > before $BEFOREUSED" return 0 } @@ -778,36 +800,40 @@ count_ost_writes() { #b=2477,2532 test_40(){ - $LCTL mark multiop $MOUNT/$tfile OS_c - multiop $MOUNT/$tfile OS_c & - PID=$! - writeme -s $MOUNT/${tfile}-2 & - WRITE_PID=$! - sleep 1 - facet_failover $SINGLEMDS + # always need connection to MDS to verify layout during IO. LU-2628. + lctl get_param mdc.*.connect_flags | grep -q layout_lock && + skip "layout_lock needs MDS connection for IO" && return 0 + + $LCTL mark multiop $MOUNT/$tfile OS_c + multiop $MOUNT/$tfile OS_c & + PID=$! + writeme -s $MOUNT/${tfile}-2 & + WRITE_PID=$! + sleep 1 + facet_failover $SINGLEMDS #define OBD_FAIL_MDS_CONNECT_NET 0x117 - do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117" - kill -USR1 $PID - stat1=`count_ost_writes` - sleep $TIMEOUT - stat2=`count_ost_writes` - echo "$stat1, $stat2" - if [ $stat1 -lt $stat2 ]; then - echo "writes continuing during recovery" - RC=0 - else - echo "writes not continuing during recovery, bug 2477" - RC=4 - fi - echo "waiting for writeme $WRITE_PID" - kill $WRITE_PID - wait $WRITE_PID - - echo "waiting for multiop $PID" - wait $PID || return 2 - do_facet client munlink $MOUNT/$tfile || return 3 - do_facet client munlink $MOUNT/${tfile}-2 || return 3 - return $RC + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000117" + kill -USR1 $PID + stat1=`count_ost_writes` + sleep $TIMEOUT + stat2=`count_ost_writes` + echo "$stat1, $stat2" + if [ $stat1 -lt $stat2 ]; then + echo "writes continuing during recovery" + RC=0 + else + echo "writes not continuing during recovery, bug 2477" + RC=4 + fi + echo "waiting for writeme $WRITE_PID" + kill $WRITE_PID + wait $WRITE_PID + + echo "waiting for multiop $PID" + wait $PID || return 2 + do_facet client munlink $MOUNT/$tfile || return 3 + do_facet client munlink $MOUNT/${tfile}-2 || return 3 + return $RC } run_test 40 "cause recovery in ptlrpc, ensure IO continues" @@ -880,89 +906,97 @@ test_43() { # bug 2530 run_test 43 "mds osc import failure during recovery; don't LBUG" test_44a() { # was test_44 - local at_max_saved=0 - - mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'` - [ "$mdcdev" ] || return 2 - [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; } - - # adaptive timeouts slow this way down - if at_is_enabled; then - at_max_saved=$(at_max_get mds) - at_max_set 40 mds - fi - - for i in `seq 1 10`; do - echo "$i of 10 ($(date +%s))" - do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service" - #define OBD_FAIL_TGT_CONN_RACE 0x701 - do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701" - # lctl below may fail, it is valid case - $LCTL --device $mdcdev recover - df $MOUNT - done - do_facet $SINGLEMDS "lctl set_param fail_loc=0" - [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds - return 0 + local at_max_saved=0 + + local mdcdev=$($LCTL get_param -n devices | + awk "/ ${FSNAME}-MDT0000-mdc-/ {print \$1}") + [ "$mdcdev" ] || return 2 + [ $(echo $mdcdev | wc -w) -eq 1 ] || + { echo mdcdev=$mdcdev; $LCTL dl; return 3; } + + # adaptive timeouts slow this way down + if at_is_enabled; then + at_max_saved=$(at_max_get mds) + at_max_set 40 mds + fi + + for i in `seq 1 10`; do + echo "$i of 10 ($(date +%s))" + do_facet $SINGLEMDS \ + "lctl get_param -n md[ts].*.mdt.timeouts | grep service" +#define OBD_FAIL_TGT_CONN_RACE 0x701 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000701" + # lctl below may fail, it is valid case + $LCTL --device $mdcdev recover + df $MOUNT + done + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + [ $at_max_saved -ne 0 ] && at_max_set $at_max_saved mds + return 0 } run_test 44a "race in target handle connect" test_44b() { - local mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'` - [ "$mdcdev" ] || return 2 - [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; } - - for i in `seq 1 10`; do - echo "$i of 10 ($(date +%s))" - do_facet $SINGLEMDS "lctl get_param -n mdt.*.mdt.timeouts | grep service" + local mdcdev=$($LCTL get_param -n devices | + awk "/ ${FSNAME}-MDT0000-mdc-/ {print \$1}") + [ "$mdcdev" ] || return 2 + [ $(echo $mdcdev | wc -w) -eq 1 ] || + { echo mdcdev=$mdcdev; $LCTL dl; return 3; } + + for i in `seq 1 10`; do + echo "$i of 10 ($(date +%s))" + do_facet $SINGLEMDS \ + "lctl get_param -n md[ts].*.mdt.timeouts | grep service" #define OBD_FAIL_TGT_DELAY_RECONNECT 0x704 - do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704" + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000704" # lctl below may fail, it is valid case - $LCTL --device $mdcdev recover - df $MOUNT - done - do_facet $SINGLEMDS "lctl set_param fail_loc=0" - return 0 + $LCTL --device $mdcdev recover + df $MOUNT + done + do_facet $SINGLEMDS "lctl set_param fail_loc=0" + return 0 } run_test 44b "race in target handle connect" test_44c() { - replay_barrier $SINGLEMDS - createmany -m $DIR/$tfile-%d 100 + replay_barrier $SINGLEMDS + createmany -m $DIR/$tfile-%d 100 || error "failed to create directories" #define OBD_FAIL_TGT_RCVG_FLAG 0x712 - do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000712" - fail_abort $SINGLEMDS - unlinkmany $DIR/$tfile-%d 100 && return 1 - fail $SINGLEMDS - unlinkmany $DIR/$tfile-%d 100 && return 1 - return 0 + do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000712" + fail_abort $SINGLEMDS + unlinkmany $DIR/$tfile-%d 100 && error "unliked after fail abort" + fail $SINGLEMDS + unlinkmany $DIR/$tfile-%d 100 && error "unliked after fail" + return 0 } run_test 44c "race in target handle connect" # Handle failed close test_45() { - mdcdev=`lctl get_param -n devices | awk '/MDT0000-mdc-/ {print $1}'` - [ "$mdcdev" ] || return 2 - [ $(echo $mdcdev | wc -w) -eq 1 ] || { echo $mdcdev=$mdcdev && return 3; } + local mdcdev=$($LCTL get_param -n devices | + awk "/ ${FSNAME}-MDT0000-mdc-/ {print \$1}") + [ "$mdcdev" ] || return 2 + [ $(echo $mdcdev | wc -w) -eq 1 ] || + { echo mdcdev=$mdcdev; $LCTL dl; return 3; } - $LCTL --device $mdcdev recover || return 6 + $LCTL --device $mdcdev recover || return 6 - multiop_bg_pause $DIR/$tfile O_c || return 1 - pid=$! + multiop_bg_pause $DIR/$tfile O_c || return 1 + pid=$! - # This will cause the CLOSE to fail before even - # allocating a reply buffer - $LCTL --device $mdcdev deactivate || return 4 + # This will cause the CLOSE to fail before even + # allocating a reply buffer + $LCTL --device $mdcdev deactivate || return 4 - # try the close - kill -USR1 $pid - wait $pid || return 1 + # try the close + kill -USR1 $pid + wait $pid || return 1 - $LCTL --device $mdcdev activate || return 5 - sleep 1 + $LCTL --device $mdcdev activate || return 5 + sleep 1 - $CHECKSTAT -t file $DIR/$tfile || return 2 - return 0 + $CHECKSTAT -t file $DIR/$tfile || return 2 + return 0 } run_test 45 "Handle failed close" @@ -1687,19 +1721,23 @@ run_test 66a "AT: verify MDT service time adjusts with no early replies" test_66b() #bug 3055 { - remote_ost_nodsh && skip "remote OST with nodsh" && return 0 - - at_start || return 0 - ORIG=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}') - $LCTL set_param fail_val=$(($ORIG + 5)) -#define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c - $LCTL set_param fail_loc=0x50c - ls $DIR/$tfile > /dev/null 2>&1 - $LCTL set_param fail_loc=0 - CUR=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $4}') - WORST=$(lctl get_param -n mdc.${FSNAME}-*.timeouts | awk '/network/ {print $6}') - echo "network timeout orig $ORIG, cur $CUR, worst $WORST" - [ $WORST -gt $ORIG ] || error "Worst $WORST should be worse than orig $ORIG" + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + + at_start || return 0 + ORIG=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts | + awk '/network/ {print $4}') + $LCTL set_param fail_val=$(($ORIG + 5)) + #define OBD_FAIL_PTLRPC_PAUSE_REP 0x50c + $LCTL set_param fail_loc=0x50c + ls $DIR/$tfile > /dev/null 2>&1 + $LCTL set_param fail_loc=0 + CUR=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts | + awk '/network/ {print $4}') + WORST=$(lctl get_param -n mdc.${FSNAME}-MDT0000*.timeouts | + awk '/network/ {print $6}') + echo "network timeout orig $ORIG, cur $CUR, worst $WORST" + [ $WORST -gt $ORIG ] || + error "Worst $WORST should be worse than orig $ORIG" } run_test 66b "AT: verify net latency adjusts" @@ -1856,10 +1894,12 @@ test_70b () { zconf_mount_clients $clients $MOUNT local duration=300 - [ "$SLOW" = "no" ] && duration=60 + [ "$SLOW" = "no" ] && duration=120 # set duration to 900 because it takes some time to boot node [ "$FAILURE_MODE" = HARD ] && duration=900 + local elapsed + local start_ts=$(date +%s) local cmd="rundbench 1 -t $duration" local pid="" do_nodesv $clients "set -x; MISSING_DBENCH_OK=$MISSING_DBENCH_OK \ @@ -1867,16 +1907,24 @@ test_70b () { DBENCH_LIB=$DBENCH_LIB TESTSUITE=$TESTSUITE TESTNAME=$TESTNAME \ MOUNT=$MOUNT DIR=$DIR/$tdir/\\\$(hostname) LCTL=$LCTL $cmd" & pid=$! + + #LU-1897 wait for all dbench copies to start + while ! check_for_process $clients dbench; do + elapsed=$(($(date +%s) - start_ts)) + if [ $elapsed -gt $duration]; then + killall_process $clients dbench + error "dbench failed to start on $clients!" + fi + sleep 1 + done + log "Started rundbench load pid=$pid ..." - # give rundbench a chance to start, bug 24118 - sleep 12 - local elapsed=0 + elapsed=$(($(date +%s) - start_ts)) local num_failovers=0 - local start_ts=$(date +%s) while [ $elapsed -lt $duration ]; do if ! check_for_process $clients dbench; then - error_noexit "dbench not found on some of $clients!" + error_noexit "dbench stopped on some of $clients!" killall_process $clients dbench break fi @@ -1901,7 +1949,7 @@ test_73a() { rm -f $DIR/$tfile replay_barrier $SINGLEMDS -#define OBD_FAIL_LDLM_ENQUEUE 0x302 +#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302 do_facet $SINGLEMDS "lctl set_param fail_loc=0x80000302" fail $SINGLEMDS kill -USR1 $pid @@ -1947,8 +1995,8 @@ run_test 73c "open(O_CREAT), unlink, replay, reconnect at last_replay, close" test_74() { local clients=${CLIENTS:-$HOSTNAME} - stop ost1 zconf_umount_clients $clients $MOUNT + stop ost1 facet_failover $SINGLEMDS zconf_mount_clients $clients $MOUNT mount_facet ost1 @@ -1959,72 +2007,457 @@ test_74() { } run_test 74 "Ensure applications don't fail waiting for OST recovery" +remote_dir_check_80() { + local MDTIDX=1 + local diridx=$($GETSTRIPE -M $remote_dir) + [ $diridx -eq $MDTIDX ] || error "$diridx != $MDTIDX" + + createmany -o $remote_dir/f-%d 20 || error "creation failed" + local fileidx=$($GETSTRIPE -M $remote_dir/f-1) + [ $fileidx -eq $MDTIDX ] || error "$fileidx != $MDTIDX" + + return 0 +} + test_80a() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 - mkdir -p $DIR/$tdir - replay_barrier mds2 - $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed" - rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed" - fail mds2 - stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!" - return 0 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x1701 + $LFS mkdir -i $MDTIDX $remote_dir & + local CLIENT_PID=$! + + fail mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "remote creation failed" + + remote_dir_check_80 || error "remote dir check failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 } -run_test 80a "CMD: unlink cross-node dir (fail mds with inode)" +run_test 80a "DNE: create remote dir, drop update rep from MDT1, fail MDT1" test_80b() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 - mkdir -p $DIR/$tdir - replay_barrier $SINGLEMDS - $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir $DIR/$tdir failed" - rmdir $DIR/$tdir || error "rmdir $DIR/$tdir failed" - fail $SINGLEMDS - stat $DIR/$tdir 2&>/dev/null && error "$DIR/$tdir still exist after recovery!" - return 0 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x1701 + $LFS mkdir -i $MDTIDX $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX} + + wait $CLIENT_PID || error "remote creation failed" + + remote_dir_check_80 || error "remote dir check failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 } -run_test 80b "CMD: unlink cross-node dir (fail mds with name)" +run_test 80b "DNE: create remote dir, drop update rep from MDT1, fail MDT0" + +test_80c() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x1701 + $LFS mkdir -i $MDTIDX $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX} + fail mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "remote creation failed" + + remote_dir_check_80 || error "remote dir check failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 80c "DNE: create remote dir, drop update rep from MDT1, fail MDT[0,1]" + +test_80d() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + #define OBD_FAIL_UPDATE_OBJ_NET_REP 0x1701 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x1701 + $LFS mkdir -i $MDTIDX $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX},mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "remote creation failed" + + remote_dir_check_80 || error "remote dir check failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 80d "DNE: create remote dir, drop update rep from MDT1, fail 2 MDTs" + +test_80e() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x119 + $LFS mkdir -i $MDTIDX $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX} + + wait $CLIENT_PID || error "remote creation failed" + + remote_dir_check_80 || error "remote dir check failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 80e "DNE: create remote dir, drop MDT0 rep, fail MDT0" + +test_80f() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x119 + $LFS mkdir -i $MDTIDX $remote_dir & + local CLIENT_PID=$! + + fail mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "remote creation failed" + + remote_dir_check_80 || error "remote dir check failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 80f "DNE: create remote dir, drop MDT0 rep, fail MDT1" + +test_80g() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x119 + $LFS mkdir -i $MDTIDX $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX} + fail mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "remote creation failed" + + remote_dir_check_80 || error "remote dir check failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 80g "DNE: create remote dir, drop MDT0 rep, fail MDT0, then MDT1" + +test_80h() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x119 + $LFS mkdir -i $MDTIDX $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX},mds$((MDTIDX + 1)) + + wait $CLIENT_PID || return 1 + + remote_dir_check_80 || error "remote dir check failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 80h "DNE: create remote dir, drop MDT0 rep, fail 2 MDTs" test_81a() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 - mkdir -p $DIR/$tdir - createmany -o $DIR/$tdir/f 3000 || error "createmany failed" - sleep 10 - $CHECKSTAT -t dir $DIR/$tdir || error "$CHECKSTAT -t dir failed" - $CHECKSTAT -t file $DIR/$tdir/f1002 || error "$CHECKSTAT -t file failed" - replay_barrier $SINGLEMDS - rm $DIR/$tdir/f1002 || error "rm $DIR/$tdir/f1002 failed" - fail $SINGLEMDS - stat $DIR/$tdir/f1002 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + + touch $remote_dir + # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701 + rmdir $remote_dir & + local CLIENT_PID=$! + + fail mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "rm remote dir failed" + + stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!" + + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 } -run_test 81a "CMD: unlink cross-node file (fail mds with name)" +run_test 81a "DNE: unlink remote dir, drop MDT0 update rep, fail MDT1" + +test_81b() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + + # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701 + rmdir $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX} + + wait $CLIENT_PID || error "rm remote dir failed" -test_82a() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!" - local dir=$DIR/d82a - replay_barrier mds2 - mkdir $dir || error "mkdir $dir failed" - log "FAILOVER mds2" - fail mds2 - stat $DIR - $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed" + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 } -run_test 82a "CMD: mkdir cross-node dir (fail mds with inode)" +run_test 81b "DNE: unlink remote dir, drop MDT0 update reply, fail MDT0" -test_82b() { - [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 +test_81c() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 - local dir=$DIR/d82b - replay_barrier $SINGLEMDS - mkdir $dir || error "mkdir $dir failed" - log "FAILOVER mds1" - fail $SINGLEMDS - stat $DIR - $CHECKSTAT -t dir $dir || error "$CHECKSTAT -t dir $dir failed" + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + + # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701 + rmdir $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX} + fail mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "rm remote dir failed" + + stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!" + + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 81c "DNE: unlink remote dir, drop MDT0 update reply, fail MDT0,MDT1" + +test_81d() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + + # OBD_FAIL_OBJ_UPDATE_NET_REP 0x1701 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x1701 + rmdir $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX},mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "rm remote dir failed" + + stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!" + + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 81d "DNE: unlink remote dir, drop MDT0 update reply, fail 2 MDTs" + +test_81e() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119 + rmdir $remote_dir & + local CLIENT_PID=$! + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0 + + fail mds${MDTIDX} + + wait $CLIENT_PID || error "rm remote dir failed" + + stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!" + + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 81e "DNE: unlink remote dir, drop MDT1 req reply, fail MDT0" + +test_81f() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119 + rmdir $remote_dir & + local CLIENT_PID=$! + + fail mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "rm remote dir failed" + + stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!" + + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 81f "DNE: unlink remote dir, drop MDT1 req reply, fail MDT1" + +test_81g() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119 + rmdir $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX} + fail mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "rm remote dir failed" + + stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!" + + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 +} +run_test 81g "DNE: unlink remote dir, drop req reply, fail M0, then M1" + +test_81h() { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local MDTIDX=1 + local remote_dir=$DIR/$tdir/remote_dir + + mkdir -p $DIR/$tdir + $LFS mkdir -i $MDTIDX $remote_dir || error "lfs mkdir failed" + + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119 + rmdir $remote_dir & + local CLIENT_PID=$! + + fail mds${MDTIDX},mds$((MDTIDX + 1)) + + wait $CLIENT_PID || error "rm remote dir failed" + + stat $remote_dir 2&>/dev/null && error "$remote_dir still exist!" + + rm -rf $DIR/$tdir || error "rmdir failed" + + return 0 } -run_test 82b "CMD: mkdir cross-node dir (fail mds with name)" +run_test 81h "DNE: unlink remote dir, drop request reply, fail 2 MDTs" test_83a() { mkdir -p $DIR/$tdir @@ -2250,7 +2683,7 @@ test_89() { mkdir -p $DIR/$tdir rm -f $DIR/$tdir/$tfile wait_mds_ost_sync - wait_destroy_complete + wait_delete_completed BLOCKS1=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }') $SETSTRIPE -i 0 -c 1 $DIR/$tdir/$tfile dd if=/dev/zero bs=1M count=10 of=$DIR/$tdir/$tfile @@ -2263,8 +2696,10 @@ test_89() { zconf_mount $(hostname) $MOUNT client_up || return 1 wait_mds_ost_sync + wait_delete_completed BLOCKS2=$(df -P $MOUNT | tail -n 1 | awk '{ print $3 }') - [ "$BLOCKS1" == "$BLOCKS2" ] || error $((BLOCKS2 - BLOCKS1)) blocks leaked + [ $((BLOCKS2 - BLOCKS1)) -le 4 ] || \ + error $((BLOCKS2 - BLOCKS1)) blocks leaked } run_test 89 "no disk space leak on late ost connection" @@ -2318,7 +2753,7 @@ test_90() { # bug 19494 # Before failing an OST, get its obd name and index local varsvc=${ostfail}_svc local obd=$(do_facet $ostfail lctl get_param -n obdfilter.${!varsvc}.uuid) - local index=${obd:(-6):1} + local index=$(($(facet_number $ostfail) - 1)) echo "Fail $ostfail $obd, display the list of affected files" shutdown_facet $ostfail || return 2 @@ -2358,6 +2793,6 @@ test_90() { # bug 19494 } run_test 90 "lfs find identifies the missing striped file segments" -complete $(basename $0) $SECONDS +complete $SECONDS check_and_cleanup_lustre exit_status