#!/bin/bash
+# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
+# vim:shiftwidth=4:softtabstop=4:tabstop=4:
set -e
-# bug number: 13129 13129 10124
-ALWAYS_EXCEPT="2 3 15c $REPLAY_DUAL_EXCEPT"
+# bug number: 10124
+ALWAYS_EXCEPT="15c $REPLAY_DUAL_EXCEPT"
SAVE_PWD=$PWD
PTLDEBUG=${PTLDEBUG:--1}
SETUP=${SETUP:-""}
CLEANUP=${CLEANUP:-""}
MOUNT_2=${MOUNT_2:-"yes"}
+export MULTIOP=${MULTIOP:-multiop}
. $LUSTRE/tests/test-framework.sh
-if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then
- CONFIG_EXCEPTIONS="17"
- echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. "
- echo "Except the tests: $CONFIG_EXCEPTIONS"
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
-fi
-
init_test_env $@
-
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
+init_logging
remote_mds_nodsh && skip "remote MDS with nodsh" && exit 0
-[ "$SLOW" = "no" ] && EXCEPT_SLOW="1 2 3 4 5 14"
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="21b"
build_test_filter
-cleanup_and_setup_lustre
+check_and_setup_lustre
+MOUNTED=$(mounted_lustre_filesystems)
+if ! $(echo $MOUNTED' ' | grep -w -q $MOUNT2' '); then
+ zconf_mount $HOSTNAME $MOUNT2
+ MOUNTED2=yes
+fi
+
assert_DIR
rm -rf $DIR/[df][0-9]*
[ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
+# LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels
+if [ $LINUX_VERSION_CODE -lt $(version_code 2.6.33) ]; then
+ sync
+ do_facet $SINGLEMDS "sync; sleep 10; sync; sleep 10; sync"
+fi
+
+LU482_FAILED=$(mktemp -u $TMP/$TESTSUITE.lu482.XXXXXX)
+test_0a() {
+ echo "Check file is LU482_FAILED=$LU482_FAILED"
+ touch $MOUNT2/$tfile-A # force sync FLD/SEQ update before barrier
+ replay_barrier $SINGLEMDS
+#define OBD_FAIL_PTLRPC_FINISH_REPLAY | OBD_FAIL_ONCE
+ touch $MOUNT2/$tfile
+ createmany -o $MOUNT1/$tfile- 50
+ $LCTL set_param fail_loc=0x80000514
+ facet_failover $SINGLEMDS
+ [ -f "$LU482_FAILED" ] && skip "LU-482 failure" && return 0
+ client_up || return 1
+ umount -f $MOUNT2
+ client_up || return 1
+ zconf_mount `hostname` $MOUNT2 || error "mount2 fais"
+ unlinkmany $MOUNT1/$tfile- 50 || return 2
+ rm $MOUNT2/$tfile || return 3
+ rm $MOUNT2/$tfile-A || return 4
+}
+run_test 0a "expired recovery with lost client"
+
+if [ -f "$LU482_FAILED" ]; then
+ log "Found check file $LU482_FAILED, aborting test script"
+ rm -vf "$LU482_FAILED"
+ complete $(basename $0) $SECONDS
+ do_nodes $CLIENTS umount -f $MOUNT2 || true
+ do_nodes $CLIENTS umount -f $MOUNT || true
+ # copied from stopall, but avoid the MDS recovery
+ for num in `seq $OSTCOUNT`; do
+ stop ost$num -f
+ rm -f $TMP/ost${num}active
+ done
+ if ! combined_mgs_mds ; then
+ stop mgs
+ fi
+
+ exit_status
+fi
+
+test_0b() {
+ replay_barrier $SINGLEMDS
+ touch $MOUNT2/$tfile
+ touch $MOUNT1/$tfile-2
+ umount $MOUNT2
+ facet_failover $SINGLEMDS
+ umount -f $MOUNT1
+ zconf_mount `hostname` $MOUNT1 || error "mount1 fais"
+ zconf_mount `hostname` $MOUNT2 || error "mount2 fais"
+ checkstat $MOUNT1/$tfile-2 && return 1
+ checkstat $MOUNT2/$tfile && return 2
+ return 0
+}
+run_test 0b "lost client during waiting for next transno"
+
test_1() {
touch $MOUNT1/a
replay_barrier $SINGLEMDS
do_facet $SINGLEMDS lctl set_param fail_loc=0x80000302
facet_failover $SINGLEMDS
do_facet $SINGLEMDS lctl set_param fail_loc=0
- df $MOUNT || return 1
+ clients_up || return 1
ls $DIR/$tfile
kill -USR1 $MULTIPID || return 3
kill -USR1 $MULTIPID || return 3
wait $MULTIPID || return 4
- # drop close
+ # drop close
do_facet $SINGLEMDS lctl set_param fail_loc=0x80000115
facet_failover $SINGLEMDS
do_facet $SINGLEMDS lctl set_param fail_loc=0
- df $MOUNT || return 1
+ clients_up || return 1
ls $DIR/$tfile
$CHECKSTAT -t file $DIR/$tfile || return 2
}
run_test 13 "close resend timeout"
-test_14() {
+# test 14a removed after 18143 because it shouldn't fail anymore and do the same
+# as test_15a
+
+test_14b() {
+ wait_mds_ost_sync
+ wait_destroy_complete
+ BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
+ mkdir -p $MOUNT1/$tdir
+ $SETSTRIPE -i 0 $MOUNT1/$tdir
replay_barrier $SINGLEMDS
- createmany -o $MOUNT1/$tfile- 25
- createmany -o $MOUNT2/$tfile-2- 1
- createmany -o $MOUNT1/$tfile-3- 25
+ createmany -o $MOUNT1/$tdir/$tfile- 5
+
+ $SETSTRIPE -i 0 $MOUNT2/f14b-3
+ echo "data" > $MOUNT2/f14b-3
+ createmany -o $MOUNT1/$tdir/$tfile-3- 5
umount $MOUNT2
- facet_failover $SINGLEMDS
- # expect failover to fail due to missing client 2
- df $MOUNT && return 1
- sleep 1
+ fail $SINGLEMDS
+ wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
- # first 25 files should have been replayed
- unlinkmany $MOUNT1/$tfile- 25 || return 2
+ # first 25 files should have been replayed
+ unlinkmany $MOUNT1/$tdir/$tfile- 5 || return 2
+ unlinkmany $MOUNT1/$tdir/$tfile-3- 5 || return 3
- zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+
+ wait_mds_ost_sync || return 4
+ wait_destroy_complete || return 5
+
+ AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
+ log "before $BEFOREUSED, after $AFTERUSED"
+ [ $AFTERUSED -ne $BEFOREUSED ] && \
+ error "after $AFTERUSED > before $BEFOREUSED" && return 4
return 0
}
-run_test 14 "timeouts waiting for lost client during replay"
+run_test 14b "delete ost orphans if gap occured in objids due to VBR"
-test_15a() { # was test_15
+test_15a() { # was test_15
replay_barrier $SINGLEMDS
createmany -o $MOUNT1/$tfile- 25
createmany -o $MOUNT2/$tfile-2- 1
umount $MOUNT2
- facet_failover $SINGLEMDS
- df $MOUNT || return 1
+ fail $SINGLEMDS
unlinkmany $MOUNT1/$tfile- 25 || return 2
[ -e $MOUNT1/$tfile-2-0 ] && error "$tfile-2-0 exists"
test_15c() {
replay_barrier $SINGLEMDS
for ((i = 0; i < 2000; i++)); do
- echo "data" > "$MOUNT2/${tfile}-$i" || error "create ${tfile}-$i failed"
+ echo "data" > "$MOUNT2/${tfile}-$i" || error "create ${tfile}-$i failed"
done
-
umount $MOUNT2
- facet_failover $SINGLEMDS
- df $MOUNT || return 1
-
+ fail $SINGLEMDS
+
zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
return 0
}
facet_failover $SINGLEMDS
sleep $TIMEOUT
- facet_failover $SINGLEMDS
- df $MOUNT || return 1
+ fail $SINGLEMDS
unlinkmany $MOUNT1/$tfile- 25 || return 2
facet_failover ost1
sleep $TIMEOUT
- facet_failover ost1
- df $MOUNT || return 1
+ fail ost1
unlinkmany $MOUNT1/$tfile- 25 || return 2
}
run_test 19 "resend of open request"
-equals_msg `basename $0`: test complete, cleaning up
+test_20() { #16389
+ BEFORE=`date +%s`
+ replay_barrier $SINGLEMDS
+ touch $MOUNT1/a
+ touch $MOUNT2/b
+ umount $MOUNT2
+ fail $SINGLEMDS
+ rm $MOUNT1/a
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+ TIER1=$((`date +%s` - BEFORE))
+ BEFORE=`date +%s`
+ replay_barrier $SINGLEMDS
+ touch $MOUNT1/a
+ touch $MOUNT2/b
+ umount $MOUNT2
+ fail $SINGLEMDS
+ rm $MOUNT1/a
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+ TIER2=$((`date +%s` - BEFORE))
+ [ $TIER2 -ge $((TIER1 * 2)) ] && \
+ error "recovery time is growing $TIER2 > $TIER1"
+ return 0
+}
+run_test 20 "recovery time is not increasing"
+
+# commit on sharing tests
+test_21a() {
+ local param_file=$TMP/$tfile-params
+
+ save_lustre_params $(facet_active_host $SINGLEMDS) "mdt.*.commit_on_sharing" > $param_file
+ do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=1
+ touch $MOUNT1/$tfile-1
+ mv $MOUNT2/$tfile-1 $MOUNT2/$tfile-2
+ mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3
+ replay_barrier_nosync $SINGLEMDS
+ umount $MOUNT2
+
+ facet_failover $SINGLEMDS
+
+ # all renames are replayed
+ unlink $MOUNT1/$tfile-3 || return 2
+
+ zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+
+ do_facet $SINGLEMDS lctl set_param mdt.*.commit_on_sharing=0
+ rm -rf $MOUNT1/$tfile-*
+ restore_lustre_params < $param_file
+ rm -f $param_file
+ return 0
+}
+run_test 21a "commit on sharing"
+
+test_21b_sub () {
+ local mds=$1
+ do_node $CLIENT1 rm -f $MOUNT1/$tfile-*
+
+ do_facet $mds sync
+ do_node $CLIENT1 touch $MOUNT1/$tfile-1
+ do_node $CLIENT2 mv $MOUNT1/$tfile-1 $MOUNT1/$tfile-2
+ do_node $CLIENT1 mv $MOUNT1/$tfile-2 $MOUNT1/$tfile-3
+
+ replay_barrier_nosync $mds
+ shutdown_client $CLIENT2 $MOUNT1
+
+ facet_failover $mds
+
+ # were renames replayed?
+ local rc=0
+ echo UNLINK $MOUNT1/$tfile-3
+ do_node $CLIENT1 unlink $MOUNT1/$tfile-3 || \
+ { echo "unlink $tfile-3 fail!" && rc=1; }
+
+ boot_node $CLIENT2
+ zconf_mount_clients $CLIENT2 $MOUNT1 || error "mount $CLIENT2 $MOUNT1 fail"
+
+ return $rc
+}
+
+test_21b() {
+ [ -z "$CLIENTS" ] && skip "Need two or more clients." && return
+ [ $CLIENTCOUNT -lt 2 ] && \
+ { skip "Need two or more clients, have $CLIENTCOUNT" && return; }
+
+ if [ "$FAILURE_MODE" = "HARD" ] && mixed_mdt_devs; then
+ skip "Several mdt services on one mds node are used with FAILURE_MODE=$FAILURE_MODE. "
+ return 0
+ fi
+
+
+ zconf_umount_clients $CLIENTS $MOUNT2
+ zconf_mount_clients $CLIENTS $MOUNT1
+
+ local param_file=$TMP/$tfile-params
+
+ local num=$(get_mds_dir $MOUNT1)
+
+ save_lustre_params $(facet_active_host mds$num) "mdt.*.commit_on_sharing" > $param_file
+
+ # COS enabled
+ local COS=1
+ do_facet mds$num lctl set_param mdt.*.commit_on_sharing=$COS
+
+ test_21b_sub mds$num || error "Not all renames are replayed. COS=$COS"
+
+ # COS disabled (should fail)
+ COS=0
+ do_facet mds$num lctl set_param mdt.*.commit_on_sharing=$COS
+
+ # there is still a window when transactions may be written to disk before
+ # the mds device is set R/O. To avoid such a rare test failure, the check
+ # is repeated several times.
+ local n_attempts=1
+ while true; do
+ test_21b_sub mds$num || break;
+ let n_attempts=n_attempts+1
+ [ $n_attempts -gt 3 ] &&
+ error "The test cannot check whether COS works or not: all renames are replied w/o COS"
+ done
+ restore_lustre_params < $param_file
+ rm -f $param_file
+ return 0
+}
+run_test 21b "commit on sharing, two clients"
+
+# end commit on sharing tests
+
+complete $(basename $0) $SECONDS
SLEEP=$((`date +%s` - $NOW))
[ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP
+[ "$MOUNTED2" = yes ] && zconf_umount $HOSTNAME $MOUNT2 || true
check_and_cleanup_lustre
-[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG || true
-
+exit_status