-#!/bin/sh
+#!/bin/bash
set -e
-LUSTRE=${LUSTRE:-`dirname $0`/..}
+# bug number: 13129 13129 10124
+ALWAYS_EXCEPT="2 3 15c $REPLAY_DUAL_EXCEPT"
+
+SAVE_PWD=$PWD
+PTLDEBUG=${PTLDEBUG:--1}
+LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
+SETUP=${SETUP:-""}
+CLEANUP=${CLEANUP:-""}
+MOUNT_2=${MOUNT_2:-"yes"}
. $LUSTRE/tests/test-framework.sh
-init_test_env $@
+if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then
+ CONFIG_EXCEPTIONS="17"
+ echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. "
+ echo "Except the tests: $CONFIG_EXCEPTIONS"
+ ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
+fi
-. ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
-
-# Skip these tests
-# 21 - open vs. unlink out of order replay: isn't solved yet
-ALWAYS_EXCEPT="21"
-
-SETUP=${SETUP:-"setup"}
-CLEANUP=${CLEANUP:-"cleanup"}
-
-gen_config() {
- rm -f $XMLCONFIG
- if [ "$MDSCOUNT" -gt 1 ]; then
- add_lmv lmv1_svc
- for mds in `mds_list`; do
- MDSDEV=$TMP/${mds}-`hostname`
- add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1_svc
- done
- add_lov_to_lmv lov1 lmv1_svc --stripe_sz $STRIPE_BYTES \
- --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
- MDS=lmv1
- else
- add_mds mds1 --dev $MDSDEV --size $MDSSIZE
- add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \
- --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
- MDS=mds1
- fi
+init_test_env $@
- add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE --failover
- add_ost ost2 --lov lov1 --dev ${OSTDEV}-2 --size $OSTSIZE --failover
- add_client client ${MDS} --lov lov1 --path $MOUNT
-}
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-build_test_filter
+remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
-SETUP=${SETUP:-"setup"}
-CLEANUP=${CLEANUP:-"cleanup"}
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 5 14"
-cleanup() {
- # make sure we are using the primary MDS, so the config log will
- # be able to clean up properly.
- activemds=`facet_active mds1`
- if [ $activemds != "mds1" ]; then
- fail mds1
- fi
-
- umount $MOUNT2 || true
- umount $MOUNT || true
- rmmod llite
-
- # b=3941
- # In mds recovery, the mds will clear orphans in ost by
- # mds_lov_clear_orphan, which will sent the request to ost and waiting for
- # the reply, if we stop mds at this time, we will got the obd_refcount > 1
- # errors, because mds_lov_clear_orphan grab a export of mds,
- # so the obd_refcount of mds will not be zero. So, wait a while before
- # stop mds. This bug needs further work.
- for mds in `mds_list`; do
- sleep 5
- stop $mds ${FORCE} $MDSLCONFARGS
- done
- stop ost2 ${FORCE}
- stop ost ${FORCE} --dump cleanup-dual.log
- stop_lgssd
- stop_lsvcgssd
-}
+build_test_filter
-if [ "$ONLY" == "cleanup" ]; then
- sysctl -w portals.debug=0
- cleanup
- exit
+check_and_setup_lustre
+MOUNTED=$(mounted_lustre_filesystems)
+if ! $(echo $MOUNTED | grep -w -q $MOUNT2); then
+ zconf_mount $HOSTNAME $MOUNT2
+ MOUNTED2=yes
fi
-setup() {
- gen_config
+assert_DIR
+rm -rf $DIR/[df][0-9]*
- start_krb5_kdc || exit 1
- start_lsvcgssd || exit 2
- start_lgssd || exit 3
- start ost --reformat $OSTLCONFARGS
- PINGER=`cat /proc/fs/lustre/pinger`
-
- if [ "$PINGER" != "on" ]; then
- echo "ERROR: Lustre must be built with --enable-pinger for replay-dual"
- stop ost
- exit 1
- fi
-
- start ost2 --reformat $OSTLCONFARGS
- [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
- for mds in `mds_list`; do
- start $mds --reformat $MDSLCONFARGS
- done
- grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT
- grep " $MOUNT2 " /proc/mounts || zconf_mount `hostname` $MOUNT2
-
- echo $TIMEOUT > /proc/sys/lustre/timeout
- echo $UPCALL > /proc/sys/lustre/upcall
-}
-
-$SETUP
[ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
-
test_1() {
touch $MOUNT1/a
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
touch $MOUNT2/b
- fail mds1
+ fail $SINGLEMDS
checkstat $MOUNT2/a || return 1
checkstat $MOUNT1/b || return 2
rm $MOUNT2/a $MOUNT1/b
test_2() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mkdir $MOUNT1/adir
- fail mds1
+ fail $SINGLEMDS
checkstat $MOUNT2/adir || return 1
rmdir $MOUNT2/adir
checkstat $MOUNT2/adir && return 2
return 0
}
-
run_test 2 "|X| mkdir adir"
test_3() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mkdir $MOUNT1/adir
mkdir $MOUNT2/adir/bdir
- fail mds1
+ fail $SINGLEMDS
checkstat $MOUNT2/adir || return 1
checkstat $MOUNT1/adir/bdir || return 2
rmdir $MOUNT2/adir/bdir $MOUNT1/adir
checkstat $MOUNT2/adir/bdir && return 4
return 0
}
-
run_test 3 "|X| mkdir adir, mkdir adir/bdir "
test_4() {
mkdir $MOUNT1/adir
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mkdir $MOUNT1/adir && return 1
mkdir $MOUNT2/adir/bdir
- fail mds1
+ fail $SINGLEMDS
checkstat $MOUNT2/adir || return 2
checkstat $MOUNT1/adir/bdir || return 3
checkstat $MOUNT2/adir/bdir && return 5
return 0
}
-
run_test 4 "|X| mkdir adir (-EEXIST), mkdir adir/bdir "
test_5() {
# multiclient version of replay_single.sh/test_8
mcreate $MOUNT1/a
- multiop $MOUNT2/a o_tSc &
+ multiop_bg_pause $MOUNT2/a o_tSc || return 1
pid=$!
- # give multiop a chance to open
- sleep 1
rm -f $MOUNT1/a
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
kill -USR1 $pid
wait $pid || return 1
- fail mds1
+ fail $SINGLEMDS
[ -e $MOUNT2/a ] && return 2
return 0
}
test_6() {
mcreate $MOUNT1/a
- multiop $MOUNT2/a o_c &
+ multiop_bg_pause $MOUNT2/a o_c || return 1
pid1=$!
- multiop $MOUNT1/a o_c &
+ multiop_bg_pause $MOUNT1/a o_c || return 1
pid2=$!
- # give multiop a chance to open
- sleep 1
rm -f $MOUNT1/a
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
kill -USR1 $pid1
wait $pid1 || return 1
- fail mds1
- kill -USR1 $pid2
- wait $pid2 || return 1
- [ -e $MOUNT2/a ] && return 2
- return 0
-}
-run_test 6 "open1, open2, unlink |X| close1 [fail mds] close2"
-
-test_6b() {
- mcreate $MOUNT1/a
- multiop $MOUNT2/a o_c &
- pid1=$!
- multiop $MOUNT1/a o_c &
- pid2=$!
- # give multiop a chance to open
- sleep 1
- rm -f $MOUNT1/a
- replay_barrier mds1
+ fail $SINGLEMDS
kill -USR1 $pid2
wait $pid2 || return 1
-
- fail mds1
- kill -USR1 $pid1
- wait $pid1 || return 1
[ -e $MOUNT2/a ] && return 2
return 0
}
-run_test 6b "open1, open2, unlink |X| close2 [fail mds] close1"
+run_test 6 "open1, open2, unlink |X| close1 [fail $SINGLEMDS] close2"
test_8() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
drop_reint_reply "mcreate $MOUNT1/$tfile" || return 1
- fail mds1
+ fail $SINGLEMDS
checkstat $MOUNT2/$tfile || return 2
rm $MOUNT1/$tfile || return 3
run_test 8 "replay of resent request"
test_9() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mcreate $MOUNT1/$tfile-1
mcreate $MOUNT2/$tfile-2
# drop first reint reply
- sysctl -w lustre.fail_loc=0x80000119
- fail mds1
- sysctl -w lustre.fail_loc=0
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x80000119
+ fail $SINGLEMDS
+ do_facet $SINGLEMDS lctl set_param fail_loc=0
rm $MOUNT1/$tfile-[1,2] || return 1
test_10() {
mcreate $MOUNT1/$tfile-1
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
munlink $MOUNT1/$tfile-1
mcreate $MOUNT2/$tfile-2
# drop first reint reply
- sysctl -w lustre.fail_loc=0x80000119
- fail mds1
- sysctl -w lustre.fail_loc=0
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x80000119
+ fail $SINGLEMDS
+ do_facet $SINGLEMDS lctl set_param fail_loc=0
checkstat $MOUNT1/$tfile-1 && return 1
checkstat $MOUNT1/$tfile-2 || return 2
run_test 10 "resending a replayed unlink"
test_11() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
mcreate $MOUNT1/$tfile-1
mcreate $MOUNT2/$tfile-2
mcreate $MOUNT1/$tfile-3
mcreate $MOUNT2/$tfile-4
mcreate $MOUNT1/$tfile-5
# drop all reint replies for a while
- sysctl -w lustre.fail_loc=0x0119
- facet_failover mds1
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x0119
+ # note that with this fail_loc set, facet_failover df will fail
+ facet_failover $SINGLEMDS
#sleep for while, let both clients reconnect and timeout
sleep $((TIMEOUT * 2))
- sysctl -w lustre.fail_loc=0
+ do_facet $SINGLEMDS lctl set_param fail_loc=0
rm $MOUNT1/$tfile-[1-5] || return 1
run_test 11 "both clients timeout during replay"
test_12() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
- multiop $DIR/$tfile mo_c &
+ multiop_bg_pause $DIR/$tfile mo_c || return 1
MULTIPID=$!
- sleep 5
- # drop first enqueue
- sysctl -w lustre.fail_loc=0x80000302
- facet_failover mds1
+#define OBD_FAIL_LDLM_ENQUEUE 0x302
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x80000302
+ facet_failover $SINGLEMDS
+ do_facet $SINGLEMDS lctl set_param fail_loc=0
df $MOUNT || return 1
- sysctl -w lustre.fail_loc=0
ls $DIR/$tfile
- $CHECKSTAT -t file $DIR/$tfile || return 2
kill -USR1 $MULTIPID || return 3
wait $MULTIPID || return 4
+ $CHECKSTAT -t file $DIR/$tfile || return 2
rm $DIR/$tfile
return 0
run_test 12 "open resend timeout"
test_13() {
- multiop $DIR/$tfile mo_c &
+ multiop_bg_pause $DIR/$tfile mo_c || return 1
MULTIPID=$!
- sleep 5
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
kill -USR1 $MULTIPID || return 3
wait $MULTIPID || return 4
# drop close
- sysctl -w lustre.fail_loc=0x80000115
- facet_failover mds1
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x80000115
+ facet_failover $SINGLEMDS
+ do_facet $SINGLEMDS lctl set_param fail_loc=0
df $MOUNT || return 1
- sysctl -w lustre.fail_loc=0
ls $DIR/$tfile
$CHECKSTAT -t file $DIR/$tfile || return 2
}
run_test 13 "close resend timeout"
-
test_14() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
createmany -o $MOUNT1/$tfile- 25
createmany -o $MOUNT2/$tfile-2- 1
createmany -o $MOUNT1/$tfile-3- 25
umount $MOUNT2
- facet_failover mds1
- # expect failover to fail
+ facet_failover $SINGLEMDS
+ # expect failover to fail due to missing client 2
df $MOUNT && return 1
sleep 1
- # first 25 files shouuld have been
- # replayed
- sleep 2
+ # first 25 files should have been replayed
unlinkmany $MOUNT1/$tfile- 25 || return 2
- zconf_mount `hostname` $MOUNT2
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
run_test 14 "timeouts waiting for lost client during replay"
-test_15() {
- replay_barrier mds1
+test_15a() { # was test_15
+ replay_barrier $SINGLEMDS
createmany -o $MOUNT1/$tfile- 25
createmany -o $MOUNT2/$tfile-2- 1
umount $MOUNT2
- facet_failover mds1
+ facet_failover $SINGLEMDS
df $MOUNT || return 1
- sleep 1
unlinkmany $MOUNT1/$tfile- 25 || return 2
+ [ -e $MOUNT1/$tfile-2-0 ] && error "$tfile-2-0 exists"
- zconf_mount `hostname` $MOUNT2
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
-run_test 15 "timeout waiting for lost client during replay, 1 client completes"
+run_test 15a "timeout waiting for lost client during replay, 1 client completes"
+
+test_15c() {
+ replay_barrier $SINGLEMDS
+ for ((i = 0; i < 2000; i++)); do
+ echo "data" > "$MOUNT2/${tfile}-$i" || error "create ${tfile}-$i failed"
+ done
+
+ umount $MOUNT2
+ facet_failover $SINGLEMDS
+
+ df $MOUNT || return 1
+
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+ return 0
+}
+run_test 15c "remove multiple OST orphans"
+
test_16() {
- replay_barrier mds1
+ replay_barrier $SINGLEMDS
createmany -o $MOUNT1/$tfile- 25
createmany -o $MOUNT2/$tfile-2- 1
umount $MOUNT2
- facet_failover mds1
+ facet_failover $SINGLEMDS
sleep $TIMEOUT
- facet_failover mds1
+ facet_failover $SINGLEMDS
df $MOUNT || return 1
- sleep 1
unlinkmany $MOUNT1/$tfile- 25 || return 2
- zconf_mount `hostname` $MOUNT2
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
-#run_test 16 "fail MDS during recovery (3571)"
+run_test 16 "fail MDS during recovery (3571)"
test_17() {
+ remote_ost_nodsh && skip "remote OST with nodsh" && return 0
+
createmany -o $MOUNT1/$tfile- 25
createmany -o $MOUNT2/$tfile-2- 1
# Make sure the disconnect is lost
- replay_barrier ost
+ replay_barrier ost1
umount $MOUNT2
- echo -1 > /proc/sys/portals/debug
- facet_failover ost
+ facet_failover ost1
sleep $TIMEOUT
- facet_failover ost
+ facet_failover ost1
df $MOUNT || return 1
- sleep 1
unlinkmany $MOUNT1/$tfile- 25 || return 2
- zconf_mount `hostname` $MOUNT2
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
-#Still not support ost fail over
-#run_test 17 "fail OST during recovery (3571)"
+run_test 17 "fail OST during recovery (3571)"
-test_18 () {
- replay_barrier mds1
- multiop $MOUNT2/$tfile O_c &
- pid2=$!
- multiop $MOUNT1/$tfile O_c &
- pid1=$!
- # give multiop a chance to open
- sleep 1
- kill -USR1 $pid2
- kill -USR1 $pid1
+# cleanup with blocked enqueue fails until timer elapses (MDS busy), wait for it
+export NOW=0
+
+test_18() { # bug 3822 - evicting client with enqueued lock
+ #set -vx
+ mkdir -p $MOUNT1/$tdir
+ touch $MOUNT1/$tdir/f0
+#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b
+ statmany -s $MOUNT1/$tdir/f 1 500 &
+ OPENPID=$!
+ NOW=`date +%s`
+ do_facet $SINGLEMDS lctl set_param fail_loc=0x8000030b # hold enqueue
sleep 1
+#define OBD_FAIL_LDLM_BL_CALLBACK 0x305
+ do_facet client lctl set_param fail_loc=0x80000305 # drop cb, evict
+ cancel_lru_locks mdc
+ usleep 500 # wait to ensure first client is one that will be evicted
+ openfile -f O_RDONLY $MOUNT2/$tdir/f0
+ wait $OPENPID
+ dmesg | grep "entering recovery in server" && \
+ error "client not evicted" || true
+ do_facet client "lctl set_param fail_loc=0"
+ do_facet $SINGLEMDS "lctl set_param fail_loc=0"
+}
+run_test 18 "ldlm_handle_enqueue succeeds on evicted export (3822)"
+
+test_19() { # Bug 10991 - resend of open request does not fail assertion.
+ replay_barrier $SINGLEMDS
+ drop_ldlm_reply "createmany -o $DIR/$tfile 1" || return 1
+ fail $SINGLEMDS
+ checkstat $DIR2/${tfile}0 || return 2
+ rm $DIR/${tfile}0 || return 3
+
+ return 0
+}
+run_test 19 "resend of open request"
+
+test_20() { #16389
+ BEFORE=`date +%s`
+ replay_barrier $SINGLEMDS
+ touch $MOUNT1/a
+ touch $MOUNT2/b
umount $MOUNT2
- facet_failover mds1
- df || df || return 1
- zconf_mount `hostname` $MOUNT2
+ facet_failover $SINGLEMDS
+ df $MOUNT1 || return 1
+ rm $MOUNT1/a
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+ TIER1=$((`date +%s` - BEFORE))
+ BEFORE=`date +%s`
+ replay_barrier $SINGLEMDS
+ touch $MOUNT1/a
+ touch $MOUNT2/b
+ umount $MOUNT2
+ facet_failover $SINGLEMDS
+ df $MOUNT1 || return 1
+ rm $MOUNT1/a
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+ TIER2=$((`date +%s` - BEFORE))
+ [ $TIER2 -ge $((TIER1 * 2)) ] && \
+ error "recovery time is growing $TIER2 > $TIER1"
+ return 0
}
-run_test 18 "replay open, Abort recovery, don't assert (3892)"
+run_test 20 "recovery time is not increasing"
+
+# commit on sharing tests
+test_21a() {
+ local param_file=$TMP/$tfile-params
+
+ save_lustre_params $(facet_active_host $SINGLEMDS) "mdt.*.commit_on_sharing" > $param_file
+ do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=1
+ touch $MOUNT1/$tfile-1
+ mv $MOUNT2/$tfile-1 $MOUNT2/$tfile-2
+ mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3
+ replay_barrier_nosync $SINGLEMDS
+ umount $MOUNT2
-# cleanup with blocked enqueue fails until timer elapses (MDS busy), wait for
-# itexport NOW=0
+ facet_failover $SINGLEMDS
-test_20() { # bug 3822 - evicting client with enqueued lock
- mkdir -p $MOUNT1/$tdir
- touch $MOUNT1/$tdir/f0
-#define OBD_FAIL_LDLM_ENQUEUE_BLOCKED 0x30b
- statmany -s $MOUNT1/$tdir/f 500 &
- OPENPID=$!
- NOW=`date +%s`
- do_facet mds1 sysctl -w lustre.fail_loc=0x8000030b # hold enqueue
- sleep 1
-#define OBD_FAIL_LDLM_BL_CALLBACK 0x305
- do_facet client sysctl -w lustre.fail_loc=0x80000305 # drop cb, evict
- cancel_lru_locks MDC
- usleep 500 # wait to ensure first client is one that will be evicted
- openfile -f O_RDONLY $MOUNT2/$tdir/f0
- wait $OPENPID
- dmesg | grep "entering recovery in server" && \
- error "client not evicted" || true
+ # all renames are replayed
+ unlink $MOUNT1/$tfile-3 || return 2
+
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+
+ do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=0
+ rm -rf $MOUNT1/$tfile-*
+ restore_lustre_params < $param_file
+ rm -f $param_file
+ return 0
}
-run_test 20 "ldlm_handle_enqueue succeeds on evicted export (3822)"
-
-# $1 - number of mountpoint
-# $2 - mds
-function find_dev_for_fs_and_mds()
-{
- local fsuuid=`cat /proc/fs/lustre/llite/fs$1/uuid`
- $LCTL device_list | awk "/mdc.*$2.*$fsuuid/ {print \$4}"
+run_test 21a "commit on sharing"
+
+test_21b_sub () {
+ local mds=$1
+ do_node $CLIENT1 rm -f $MOUNT1/$tfile-*
+
+ do_facet $mds sync
+ do_node $CLIENT1 touch $MOUNT1/$tfile-1
+ do_node $CLIENT2 mv $MOUNT1/$tfile-1 $MOUNT1/$tfile-2
+ do_node $CLIENT1 mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3
+
+ replay_barrier_nosync $mds
+ shutdown_client $CLIENT2 $MOUNT1
+
+ facet_failover $mds
+
+ # were renames replayed?
+ local rc=0
+ echo UNLINK $MOUNT1/$tfile-3
+ do_node $CLIENT1 unlink $MOUNT1/$tfile-3 || \
+ { echo "unlink $tfile-3 fail!" && rc=1; }
+
+ boot_node $CLIENT2
+ zconf_mount_clients $CLIENT2 $MOUNT1 || error "mount $CLIENT2 $MOUNT1 fail"
+
+ return $rc
}
-test_21() {
- mdc1dev=`find_dev_for_fs_and_mds 0 mds1`
- mdc2dev=`find_dev_for_fs_and_mds 1 mds1`
- multiop $MOUNT1/f21 O
- cancel_lru_locks MDC
- # generate IT_OPEN to be replayed against existing file
- multiop $MOUNT1/f21 o_Sc &
- pid=$!
-
- # IT_OPEN will be committed by the failover time
- replay_barrier mds1
-
- # generate MDS_REINT_UNLINK to be replayed
- rm -f $MOUNT2/f21 || return 1
-
- # disable recovery on the both clients
- $LCTL --device %$mdc1dev disable_recovery
- $LCTL --device %$mdc2dev disable_recovery
- facet_failover mds1
-
- # let unlink to be replayed first
- $LCTL --device %$mdc2dev enable_recovery
- sleep $((TIMEOUT/2))
-
- # now let open to be replaye
- $LCTL --device %$mdc1dev enable_recovery
- kill -USR1 $pid
- wait $pid || return 2
+test_21b() {
+ [ -z "$CLIENTS" ] && skip "Need two or more clients." && return
+ [ $CLIENTCOUNT -lt 2 ] && \
+ { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
+
+ if [ "$FAILURE_MODE" = "HARD" ] && mixed_mdt_devs; then
+ skip "Several mdt services on one mds node are used with FAILURE_MODE=$FAILURE_MODE. "
+ return 0
+ fi
+
+
+ zconf_umount_clients $CLIENTS $MOUNT2
+ zconf_mount_clients $CLIENTS $MOUNT1
+
+ local param_file=$TMP/$tfile-params
+
+ local num=$(get_mds_dir $MOUNT1)
+
+ save_lustre_params $(facet_active_host mds$num) "mdt.*.commit_on_sharing" > $param_file
+
+ # COS enabled
+ local COS=1
+ do_facet mds$num lctl set_param mdt.*.commit_on_sharing=$COS
+
+ test_21b_sub mds$num || error "Not all renames are replayed. COS=$COS"
+
+ # COS disabled (should fail)
+ COS=0
+ do_facet mds$num lctl set_param mdt.*.commit_on_sharing=$COS
+
+ test_21b_sub mds$num && error "Not all renames are replayed. COS=$COS"
+
+ restore_lustre_params < $param_file
+ rm -f $param_file
+ return 0
}
-run_test 21 "open vs. unlink out of order replay"
-
-if [ "$ONLY" != "setup" ]; then
- equals_msg test complete, cleaning up
- if [ $NOW ]; then
- SLEEP=$((`date +%s` - $NOW))
- [ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP
- fi
- $CLEANUP
-fi
+run_test 21b "commit on sharing, two clients"
+
+# end commit on sharing tests
+
+equals_msg `basename $0`: test complete, cleaning up
+SLEEP=$((`date +%s` - $NOW))
+[ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP
+[ "$MOUNTED2" = yes ] && zconf_umount $HOSTNAME $MOUNT2 || true
+check_and_cleanup_lustre
+[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true