X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Ftests%2Freplay-dual.sh;h=07969a1c5543cbfdda188db04621c8922ff9a918;hb=caf5bdffb4eb6e3fb31724a1cb037cecfeb6ae6c;hp=1575ee1d0c8276020ae47ec2ea8b594f7fae4044;hpb=bd8b65394f0a1ffd0c7f8dd0d875c55ba219d9ec;p=fs%2Flustre-release.git diff --git a/lustre/tests/replay-dual.sh b/lustre/tests/replay-dual.sh index 1575ee1..07969a1 100755 --- a/lustre/tests/replay-dual.sh +++ b/lustre/tests/replay-dual.sh @@ -1,34 +1,111 @@ #!/bin/bash +# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*- +# vim:shiftwidth=4:softtabstop=4:tabstop=4: set -e -# bug number: 6088 10124 10800 -ALWAYS_EXCEPT="8 15c 17 $REPLAY_DUAL_EXCEPT" - -# -[ "$SLOW" = "no" ] && EXCEPT="$EXCEPT 1 2 3 4 5 14" +# bug number: LU-2012 10124 +ALWAYS_EXCEPT="14b 15c $REPLAY_DUAL_EXCEPT" SAVE_PWD=$PWD PTLDEBUG=${PTLDEBUG:--1} -LUSTRE=${LUSTRE:-`dirname $0`/..} +LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)} SETUP=${SETUP:-""} CLEANUP=${CLEANUP:-""} MOUNT_2=${MOUNT_2:-"yes"} +export MULTIOP=${MULTIOP:-multiop} . $LUSTRE/tests/test-framework.sh init_test_env $@ - . ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh} +init_logging + +remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0 +[ "$SLOW" = "no" ] && EXCEPT_SLOW="21b" build_test_filter -cleanup_and_setup_lustre -rm -rf $DIR/${TESTSUITE}/[df][0-9]* # bug 13798 new t-f tdir staff +check_and_setup_lustre +MOUNTED=$(mounted_lustre_filesystems) +if ! $(echo $MOUNTED' ' | grep -w -q $MOUNT2' '); then + zconf_mount $HOSTNAME $MOUNT2 + MOUNTED2=yes +fi + +assert_DIR rm -rf $DIR/[df][0-9]* [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE +# if there is no CLIENT1 defined, some tests can be ran on localhost +CLIENT1=${CLIENT1:-$HOSTNAME} +# if CLIENT2 doesn't exist then use CLIENT1 instead +# All tests should use CLIENT2 with MOUNT2 only therefore it will work if +# $CLIENT2 == CLIENT1 +# Exception is the test which need two separate nodes +CLIENT2=${CLIENT2:-$CLIENT1} + +# LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels +if [ $LINUX_VERSION_CODE -lt $(version_code 2.6.33) ]; then + sync + do_facet $SINGLEMDS "sync; sleep 10; sync; sleep 10; sync" +fi + +LU482_FAILED=$(mktemp -u $TMP/$TESTSUITE.lu482.XXXXXX) +test_0a() { + echo "Check file is LU482_FAILED=$LU482_FAILED" + touch $MOUNT2/$tfile-A # force sync FLD/SEQ update before barrier + replay_barrier $SINGLEMDS +#define OBD_FAIL_PTLRPC_FINISH_REPLAY | OBD_FAIL_ONCE + touch $MOUNT2/$tfile + createmany -o $MOUNT1/$tfile- 50 + $LCTL set_param fail_loc=0x80000514 + facet_failover $SINGLEMDS + [ -f "$LU482_FAILED" ] && skip "LU-482 failure" && return 0 + client_up || return 1 + umount -f $MOUNT2 + client_up || return 1 + zconf_mount `hostname` $MOUNT2 || error "mount2 fais" + unlinkmany $MOUNT1/$tfile- 50 || return 2 + rm $MOUNT2/$tfile || return 3 + rm $MOUNT2/$tfile-A || return 4 +} +run_test 0a "expired recovery with lost client" + +if [ -f "$LU482_FAILED" ]; then + log "Found check file $LU482_FAILED, aborting test script" + rm -vf "$LU482_FAILED" + complete $SECONDS + do_nodes $CLIENTS umount -f $MOUNT2 || true + do_nodes $CLIENTS umount -f $MOUNT || true + # copied from stopall, but avoid the MDS recovery + for num in `seq $OSTCOUNT`; do + stop ost$num -f + rm -f $TMP/ost${num}active + done + if ! combined_mgs_mds ; then + stop mgs + fi + + exit_status +fi + +test_0b() { + replay_barrier $SINGLEMDS + touch $MOUNT2/$tfile + touch $MOUNT1/$tfile-2 + umount $MOUNT2 + facet_failover $SINGLEMDS + umount -f $MOUNT1 + zconf_mount `hostname` $MOUNT1 || error "mount1 fais" + zconf_mount `hostname` $MOUNT2 || error "mount2 fais" + checkstat $MOUNT1/$tfile-2 && return 1 + checkstat $MOUNT2/$tfile && return 2 + return 0 +} +run_test 0b "lost client during waiting for next transno" + test_1() { touch $MOUNT1/a replay_barrier $SINGLEMDS @@ -94,10 +171,8 @@ run_test 4 "|X| mkdir adir (-EEXIST), mkdir adir/bdir " test_5() { # multiclient version of replay_single.sh/test_8 mcreate $MOUNT1/a - multiop $MOUNT2/a o_tSc & + multiop_bg_pause $MOUNT2/a o_tSc || return 1 pid=$! - # give multiop a chance to open - sleep 1 rm -f $MOUNT1/a replay_barrier $SINGLEMDS kill -USR1 $pid @@ -112,12 +187,10 @@ run_test 5 "open, unlink |X| close" test_6() { mcreate $MOUNT1/a - multiop $MOUNT2/a o_c & + multiop_bg_pause $MOUNT2/a o_c || return 1 pid1=$! - multiop $MOUNT1/a o_c & + multiop_bg_pause $MOUNT1/a o_c || return 1 pid2=$! - # give multiop a chance to open - sleep 1 rm -f $MOUNT1/a replay_barrier $SINGLEMDS kill -USR1 $pid1 @@ -147,9 +220,9 @@ test_9() { mcreate $MOUNT1/$tfile-1 mcreate $MOUNT2/$tfile-2 # drop first reint reply - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0x80000119 + do_facet $SINGLEMDS lctl set_param fail_loc=0x80000119 fail $SINGLEMDS - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0 + do_facet $SINGLEMDS lctl set_param fail_loc=0 rm $MOUNT1/$tfile-[1,2] || return 1 @@ -163,9 +236,9 @@ test_10() { munlink $MOUNT1/$tfile-1 mcreate $MOUNT2/$tfile-2 # drop first reint reply - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0x80000119 + do_facet $SINGLEMDS lctl set_param fail_loc=0x80000119 fail $SINGLEMDS - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0 + do_facet $SINGLEMDS lctl set_param fail_loc=0 checkstat $MOUNT1/$tfile-1 && return 1 checkstat $MOUNT1/$tfile-2 || return 2 @@ -183,12 +256,12 @@ test_11() { mcreate $MOUNT2/$tfile-4 mcreate $MOUNT1/$tfile-5 # drop all reint replies for a while - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0x0119 + do_facet $SINGLEMDS lctl set_param fail_loc=0x0119 # note that with this fail_loc set, facet_failover df will fail facet_failover $SINGLEMDS #sleep for while, let both clients reconnect and timeout sleep $((TIMEOUT * 2)) - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0 + do_facet $SINGLEMDS lctl set_param fail_loc=0 rm $MOUNT1/$tfile-[1-5] || return 1 @@ -199,15 +272,14 @@ run_test 11 "both clients timeout during replay" test_12() { replay_barrier $SINGLEMDS - multiop $DIR/$tfile mo_c & + multiop_bg_pause $DIR/$tfile mo_c || return 1 MULTIPID=$! - sleep 5 -#define OBD_FAIL_LDLM_ENQUEUE 0x302 - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0x80000302 +#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302 + do_facet $SINGLEMDS lctl set_param fail_loc=0x80000302 facet_failover $SINGLEMDS - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0 - df $MOUNT || return 1 + do_facet $SINGLEMDS lctl set_param fail_loc=0 + clients_up || return 1 ls $DIR/$tfile kill -USR1 $MULTIPID || return 3 @@ -220,20 +292,19 @@ test_12() { run_test 12 "open resend timeout" test_13() { - multiop $DIR/$tfile mo_c & + multiop_bg_pause $DIR/$tfile mo_c || return 1 MULTIPID=$! - sleep 5 replay_barrier $SINGLEMDS kill -USR1 $MULTIPID || return 3 wait $MULTIPID || return 4 - # drop close - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0x80000115 + # drop close + do_facet $SINGLEMDS lctl set_param fail_loc=0x80000115 facet_failover $SINGLEMDS - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0 - df $MOUNT || return 1 + do_facet $SINGLEMDS lctl set_param fail_loc=0 + clients_up || return 1 ls $DIR/$tfile $CHECKSTAT -t file $DIR/$tfile || return 2 @@ -243,119 +314,71 @@ test_13() { } run_test 13 "close resend timeout" -test_14() { - replay_barrier $SINGLEMDS - createmany -o $MOUNT1/$tfile- 25 - createmany -o $MOUNT2/$tfile-2- 1 - createmany -o $MOUNT1/$tfile-3- 25 - umount $MOUNT2 +# test 14a removed after 18143 because it shouldn't fail anymore and do the same +# as test_15a - facet_failover $SINGLEMDS - # expect failover to fail due to missing client 2 - df $MOUNT && return 1 - sleep 1 +test_14b() { + wait_mds_ost_sync + wait_delete_completed - # first 25 files should have been replayed - unlinkmany $MOUNT1/$tfile- 25 || return 2 + local BEFOREUSED=$(df -P $DIR | tail -1 | awk '{ print $3 }') - zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail" - return 0 -} -run_test 14 "timeouts waiting for lost client during replay" + mkdir -p $MOUNT1/$tdir + $SETSTRIPE -i 0 $MOUNT1/$tdir + replay_barrier $SINGLEMDS + createmany -o $MOUNT1/$tdir/$tfile- 5 -test_15() { - replay_barrier $SINGLEMDS - createmany -o $MOUNT1/$tfile- 25 - createmany -o $MOUNT2/$tfile-2- 1 - umount $MOUNT2 + $SETSTRIPE -i 0 $MOUNT2/$tfile-2 + dd if=/dev/zero of=$MOUNT2/$tfile-2 bs=1M count=5 + createmany -o $MOUNT1/$tdir/$tfile-3- 5 + umount $MOUNT2 - facet_failover $SINGLEMDS - df $MOUNT || return 1 + fail $SINGLEMDS + wait_recovery_complete $SINGLEMDS || error "MDS recovery not done" - unlinkmany $MOUNT1/$tfile- 25 || return 2 - [ -e $MOUNT1/$tfile-2-0 ] && error "$tfile-2-0 exists" + # first set of files should have been replayed + unlinkmany $MOUNT1/$tdir/$tfile- 5 || error "first unlinks failed" + unlinkmany $MOUNT1/$tdir/$tfile-3- 5 || error "second unlinks failed" - zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail" - return 0 -} -run_test 15 "timeout waiting for lost client during replay, 1 client completes" + zconf_mount $HOSTNAME $MOUNT2 || error "mount $MOUNT2 failed" + [ -f $MOUNT2/$tfile-2 ] && error "$MOUNT2/$tfile-2 exists!" -test_15a() { - local ost_last_id="" - local osc_last_id="" - - replay_barrier $SINGLEMDS - echo "data" > "$MOUNT2/${tfile}-m2" - - umount $MOUNT2 - facet_failover $SINGLEMDS - df $MOUNT || return 1 - - ost_last_id=`cat /proc/fs/lustre/obdfilter/*/last_id` - mds_last_id=`cat /proc/fs/lustre/osc/*mds*/last_id` - - echo "Ids after MDS<->OST synchonizing" - echo "--------------------------------" - echo "MDS last_id:" - echo $mds_last_id - echo "OST last_id:" - echo $ost_last_id - - local i=0 - echo $ost_last_id | while read id; do - ost_ids[$i]=$id - ((i++)) - done - - i=0 - echo $mds_last_id | while read id; do - mds_ids[$i]=$id - ((i++)) - done - - local arr_len=${#mds_ids[*]} - for ((i=0;i<$arr_len;i++)); do - mds_id=${mds_ids[i]} - ost_id=${ost_ids[i]} - - test $mds_id -ge $ost_id || { - echo "MDS last id ($mds_id) is smaller than OST one ($ost_id)" - return 2 - } - done + wait_mds_ost_sync || error "wait_mds_ost_sync failed" + wait_delete_completed || error "wait_delete_complete failed" - zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail" - return 0 + local AFTERUSED=$(df -P $DIR | tail -1 | awk '{ print $3 }') + log "before $BEFOREUSED, after $AFTERUSED" + # leave some margin for some files/dirs to be modified (OI, llog, etc) + [ $AFTERUSED -gt $((BEFOREUSED + 128)) ] && + error "after $AFTERUSED > before $BEFOREUSED" || true } -#CROW run_test 15a "OST clear orphans - synchronize ids on MDS and OST" +run_test 14b "delete ost orphans if gap occured in objids due to VBR" -test_15b() { +test_15a() { # was test_15 replay_barrier $SINGLEMDS - echo "data" > "$MOUNT2/${tfile}-m2" + createmany -o $MOUNT1/$tfile- 25 + createmany -o $MOUNT2/$tfile-2- 1 umount $MOUNT2 - do_facet ost1 "sysctl -w lustre.fail_loc=0x80000802" - facet_failover $SINGLEMDS + fail $SINGLEMDS + + unlinkmany $MOUNT1/$tfile- 25 || return 2 + [ -e $MOUNT1/$tfile-2-0 ] && error "$tfile-2-0 exists" - df $MOUNT || return 1 - do_facet ost1 "sysctl -w lustre.fail_loc=0" - zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail" return 0 } -#CROW run_test 15b "multiple delayed OST clear orphans" +run_test 15a "timeout waiting for lost client during replay, 1 client completes" test_15c() { replay_barrier $SINGLEMDS for ((i = 0; i < 2000; i++)); do - echo "data" > "$MOUNT2/${tfile}-$i" || error "create ${tfile}-$i failed" + echo "data" > "$MOUNT2/${tfile}-$i" || error "create ${tfile}-$i failed" done - umount $MOUNT2 - facet_failover $SINGLEMDS - df $MOUNT || return 1 - + fail $SINGLEMDS + zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail" return 0 } @@ -369,8 +392,7 @@ test_16() { facet_failover $SINGLEMDS sleep $TIMEOUT - facet_failover $SINGLEMDS - df $MOUNT || return 1 + fail $SINGLEMDS unlinkmany $MOUNT1/$tfile- 25 || return 2 @@ -381,6 +403,8 @@ test_16() { run_test 16 "fail MDS during recovery (3571)" test_17() { + remote_ost_nodsh && skip "remote OST with nodsh" && return 0 + createmany -o $MOUNT1/$tfile- 25 createmany -o $MOUNT2/$tfile-2- 1 @@ -390,8 +414,7 @@ test_17() { facet_failover ost1 sleep $TIMEOUT - facet_failover ost1 - df $MOUNT || return 1 + fail ost1 unlinkmany $MOUNT1/$tfile- 25 || return 2 @@ -412,16 +435,18 @@ test_18() { # bug 3822 - evicting client with enqueued lock statmany -s $MOUNT1/$tdir/f 1 500 & OPENPID=$! NOW=`date +%s` - do_facet $SINGLEMDS sysctl -w lustre.fail_loc=0x8000030b # hold enqueue + do_facet $SINGLEMDS lctl set_param fail_loc=0x8000030b # hold enqueue sleep 1 -#define OBD_FAIL_LDLM_BL_CALLBACK 0x305 - do_facet client sysctl -w lustre.fail_loc=0x80000305 # drop cb, evict +#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305 + do_facet client lctl set_param fail_loc=0x80000305 # drop cb, evict cancel_lru_locks mdc usleep 500 # wait to ensure first client is one that will be evicted openfile -f O_RDONLY $MOUNT2/$tdir/f0 wait $OPENPID dmesg | grep "entering recovery in server" && \ error "client not evicted" || true + do_facet client "lctl set_param fail_loc=0" + do_facet $SINGLEMDS "lctl set_param fail_loc=0" } run_test 18 "ldlm_handle_enqueue succeeds on evicted export (3822)" @@ -436,9 +461,406 @@ test_19() { # Bug 10991 - resend of open request does not fail assertion. } run_test 19 "resend of open request" -equals_msg `basename $0`: test complete, cleaning up +test_20() { #16389 + BEFORE=`date +%s` + replay_barrier $SINGLEMDS + touch $MOUNT1/a + touch $MOUNT2/b + umount $MOUNT2 + fail $SINGLEMDS + rm $MOUNT1/a + zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail" + TIER1=$((`date +%s` - BEFORE)) + BEFORE=`date +%s` + replay_barrier $SINGLEMDS + touch $MOUNT1/a + touch $MOUNT2/b + umount $MOUNT2 + fail $SINGLEMDS + rm $MOUNT1/a + zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail" + TIER2=$((`date +%s` - BEFORE)) + [ $TIER2 -ge $((TIER1 * 2)) ] && \ + error "recovery time is growing $TIER2 > $TIER1" + return 0 +} +run_test 20 "recovery time is not increasing" + +# commit on sharing tests +test_21a() { + local param_file=$TMP/$tfile-params + + save_lustre_params $(facet_active_host $SINGLEMDS) "mdt.*.commit_on_sharing" > $param_file + do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=1 + touch $MOUNT1/$tfile-1 + mv $MOUNT2/$tfile-1 $MOUNT2/$tfile-2 + mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3 + replay_barrier_nosync $SINGLEMDS + umount $MOUNT2 + + facet_failover $SINGLEMDS + + # all renames are replayed + unlink $MOUNT1/$tfile-3 || return 2 + + zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail" + + do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=0 + rm -rf $MOUNT1/$tfile-* + restore_lustre_params < $param_file + rm -f $param_file + return 0 +} +run_test 21a "commit on sharing" + +test_21b_sub () { + local mds=$1 + do_node $CLIENT1 rm -f $MOUNT1/$tfile-* + + do_facet $mds sync + do_node $CLIENT1 touch $MOUNT1/$tfile-1 + do_node $CLIENT2 mv $MOUNT1/$tfile-1 $MOUNT1/$tfile-2 + do_node $CLIENT1 mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3 + + replay_barrier_nosync $mds + shutdown_client $CLIENT2 $MOUNT1 + + facet_failover $mds + + # were renames replayed? + local rc=0 + echo UNLINK $MOUNT1/$tfile-3 + do_node $CLIENT1 unlink $MOUNT1/$tfile-3 || \ + { echo "unlink $tfile-3 fail!" && rc=1; } + + boot_node $CLIENT2 + zconf_mount_clients $CLIENT2 $MOUNT1 || error "mount $CLIENT2 $MOUNT1 fail" + + return $rc +} + +test_21b() { + [ -z "$CLIENTS" ] && skip "Need two or more clients." && return + [ $CLIENTCOUNT -lt 2 ] && \ + { skip "Need two or more clients, have $CLIENTCOUNT" && return; } + + if [ "$FAILURE_MODE" = "HARD" ] && mixed_mdt_devs; then + skip "Several mdt services on one mds node are used with FAILURE_MODE=$FAILURE_MODE. " + return 0 + fi + + + zconf_umount_clients $CLIENTS $MOUNT2 + zconf_mount_clients $CLIENTS $MOUNT1 + + local param_file=$TMP/$tfile-params + + local num=$(get_mds_dir $MOUNT1) + + save_lustre_params $(facet_active_host mds$num) "mdt.*.commit_on_sharing" > $param_file + + # COS enabled + local COS=1 + do_facet mds$num lctl set_param mdt.*.commit_on_sharing=$COS + + test_21b_sub mds$num || error "Not all renames are replayed. COS=$COS" + + # COS disabled (should fail) + COS=0 + do_facet mds$num lctl set_param mdt.*.commit_on_sharing=$COS + + # there is still a window when transactions may be written to disk before + # the mds device is set R/O. To avoid such a rare test failure, the check + # is repeated several times. + local n_attempts=1 + while true; do + test_21b_sub mds$num || break; + let n_attempts=n_attempts+1 + [ $n_attempts -gt 3 ] && + error "The test cannot check whether COS works or not: all renames are replied w/o COS" + done + zconf_mount_clients $CLIENTS $MOUNT2 + restore_lustre_params < $param_file + rm -f $param_file + return 0 +} +run_test 21b "commit on sharing, two clients" + +checkstat_22() { + checkstat $MOUNT1/$remote_dir || return 1 + checkstat $MOUNT1/$remote_dir/dir || return 2 + checkstat $MOUNT1/$remote_dir/$tfile-1 || return 3 + checkstat $MOUNT1/$remote_dir/dir/$tfile-1 || return 4 + return 0 +} + +create_remote_dir_files_22() { + do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir/dir || return 1 + do_node $CLIENT1 createmany -o $MOUNT1/$remote_dir/dir/$tfile- 2 || + return 2 + do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 || + return 3 + return 0 +} + +test_22a () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + + local MDTIDX=1 + local remote_dir=${tdir}/remote_dir + + do_node $CLIENT1 mkdir -p $MOUNT1/${tdir} + + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x119 + do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir & + CLIENT_PID=$! + do_facet mds${MDTIDX} lctl set_param fail_loc=0 + + fail mds${MDTIDX} + wait $CLIENT_PID || error "lfs mkdir failed" + + replay_barrier mds${MDTIDX} + create_remote_dir_files_22 || error "Remote creation failed $?" + fail mds${MDTIDX} + + checkstat_22 || error "check stat failed $?" + + rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed" + return 0 +} +run_test 22a "c1 lfs mkdir -i 1 dir1, M0 drop reply & fail, c2 mkdir dir1/dir" + +test_22b () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local MDTIDX=1 + local remote_dir=$tdir/remote_dir + + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_node $CLIENT1 mkdir -p $MOUNT1/${tdir} + + do_facet mds${MDTIDX} lctl set_param fail_loc=0x119 + do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir & + CLIENT_PID=$! + do_facet mds${MDTIDX} lctl set_param fail_loc=0 + + fail mds${MDTIDX},mds$((MDTIDX + 1)) + wait $CLIENT_PID || error "lfs mkdir failed" + + replay_barrier mds$MDTIDX + create_remote_dir_files_22 || error "Remote creation failed $?" + fail mds${MDTIDX} + + checkstat_22 || error "check stat failed $?" + + rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed" + return 0 +} +run_test 22b "c1 lfs mkdir -i 1 d1, M0 drop reply & fail M0/M1, c2 mkdir d1/dir" + +test_22c () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + local MDTIDX=1 + local remote_dir=${tdir}/remote_dir + + do_node $CLIENT1 mkdir -p $MOUNT1/${tdir} + + # OBD_FAIL_MDS_DROP_OBJ_UPDATE 0x188 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x188 + do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir & + CLIENT_PID=$! + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0 + + fail mds$((MDTIDX+1)) + wait $CLIENT_PID || error "lfs mkdir failed" + + replay_barrier mds$MDTIDX + create_remote_dir_files_22 || error "Remote creation failed $?" + fail mds$MDTIDX + + checkstat_22 || error "check stat failed $?" + + rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed" + return 0 +} +run_test 22c "c1 lfs mkdir -i 1 d1, M1 drop update & fail M1, c2 mkdir d1/dir" + +test_22d () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local MDTIDX=1 + local remote_dir=${tdir}/remote_dir + + do_node $CLIENT1 mkdir -p $MOUNT1/${tdir} + + # OBD_FAIL_MDS_DROP_OBJ_UPDATE 0x188 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x188 + do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir & + CLIENT_PID=$! + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0 + + fail mds${MDTIDX},mds$((MDTIDX + 1)) + wait $CLIENT_PID || error "lfs mkdir failed" + + replay_barrier mds$MDTIDX + create_remote_dir_files_22 || error "Remote creation failed $?" + fail mds$MDTIDX + + checkstat_22 || error "check stat failed $?" + + rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed" + return 0 +} +run_test 22d "c1 lfs mkdir -i 1 d1, M1 drop update & fail M0/M1,c2 mkdir d1/dir" + +checkstat_23() { + checkstat $MOUNT1/$remote_dir || return 1 + checkstat $MOUNT1/$remote_dir/$tfile-1 || return 2 + return 0 +} + +create_remote_dir_files_23() { + do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir || return 1 + do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 || return 2 + return 0 +} + +test_23a () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + local MDTIDX=1 + local remote_dir=$tdir/remote_dir + + do_node $CLIENT1 mkdir -p $MOUNT1/${tdir} + do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir || + error "lfs mkdir failed" + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119 + do_node $CLIENT1 rmdir $MOUNT1/$remote_dir & + local CLIENT_PID=$! + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0 + + fail mds$((MDTIDX + 1)) + wait $CLIENT_PID || error "rmdir remote dir failed" + + replay_barrier mds${MDTIDX} + create_remote_dir_files_23 || error "Remote creation failed $?" + fail mds${MDTIDX} + + checkstat_23 || error "check stat failed $?" + + rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed" + return 0 +} +run_test 23a "c1 rmdir d1, M1 drop reply and fail, client2 mkdir d1" + +test_23b () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local MDTIDX=1 + local remote_dir=$tdir/remote_dir + + do_node $CLIENT1 mkdir -p $MOUNT1/${tdir} + do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir || + error "lfs mkdir failed" + + # OBD_FAIL_MDS_REINT_NET_REP 0x119 + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119 + do_node $CLIENT1 rmdir $MOUNT1/$remote_dir & + local CLIENT_PID=$! + do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0 + + fail mds${MDTIDX},mds$((MDTIDX + 1)) + wait $CLIENT_PID || error "rmdir remote dir failed" + + replay_barrier mds${MDTIDX} + create_remote_dir_files_23 || error "Remote creation failed $?" + fail mds${MDTIDX} + + checkstat_23 || error "check stat failed $?" + + rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed" + return 0 +} +run_test 23b "c1 rmdir d1, M1 drop reply and fail M0/M1, c2 mkdir d1" + +test_23c () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + + ([ $FAILURE_MODE == "HARD" ] && + [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) && + skip "MDTs needs to be on diff hosts for HARD fail mode" && + return 0 + local MDTIDX=1 + local remote_dir=$tdir/remote_dir + + do_node $CLIENT1 mkdir -p $MOUNT1/${tdir} + do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir || + error "lfs mkdir failed" + + # OBD_FAIL_MDS_DROP_OBJ_UPDATE 0x188 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x188 + do_node $CLIENT1 rmdir $MOUNT1/$remote_dir & + CLIENT_PID=$! + do_facet mds${MDTIDX} lctl set_param fail_loc=0 + + fail mds${MDTIDX} + wait $CLIENT_PID || error "rmdir remote dir failed" + + replay_barrier mds${MDTIDX} + create_remote_dir_files_23 || error "Remote creation failed $?" + fail mds${MDTIDX} + + checkstat_23 || error "check stat failed $?" + + rm -rf $MOUNT1/$tdir || return 6 + return 0 +} +run_test 23c "c1 rmdir d1, M0 drop update reply and fail M0, c2 mkdir d1" + +test_23d () { + [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0 + local MDTIDX=1 + local remote_dir=$tdir/remote_dir + + do_node $CLIENT1 mkdir -p $MOUNT1/${tdir} + do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir || + error "lfs mkdir failed" + + # OBD_FAIL_MDS_DROP_OBJ_UPDATE 0x188 + do_facet mds${MDTIDX} lctl set_param fail_loc=0x188 + do_node $CLIENT1 rmdir $MOUNT1/$remote_dir & + CLIENT_PID=$! + do_facet mds${MDTIDX} lctl set_param fail_loc=0 + + fail mds${MDTIDX},mds$((MDTIDX + 1)) + wait $CLIENT_PID || error "rmdir remote dir failed" + + replay_barrier mds${MDTIDX} + create_remote_dir_files_23 || error "Remote creation failed $?" + fail mds${MDTIDX} + + checkstat_23 || error "check stat failed $?" + + rm -rf $MOUNT1/$tdir || return 6 + return 0 +} +run_test 23d "c1 rmdir d1, M0 drop update reply and fail M0/M1, c2 mkdir d1" + +# end commit on sharing tests + +complete $SECONDS SLEEP=$((`date +%s` - $NOW)) [ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP +[ "$MOUNTED2" = yes ] && zconf_umount $HOSTNAME $MOUNT2 || true check_and_cleanup_lustre -[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true - +exit_status