#!/bin/bash
+# -*- mode: Bash; tab-width: 4; indent-tabs-mode: t; -*-
+# vim:shiftwidth=4:softtabstop=4:tabstop=4:
set -e
-# bug number: 10124
-ALWAYS_EXCEPT="15c $REPLAY_DUAL_EXCEPT"
+# bug number: LU-2012 10124
+ALWAYS_EXCEPT="14b 15c $REPLAY_DUAL_EXCEPT"
SAVE_PWD=$PWD
PTLDEBUG=${PTLDEBUG:--1}
SETUP=${SETUP:-""}
CLEANUP=${CLEANUP:-""}
MOUNT_2=${MOUNT_2:-"yes"}
+export MULTIOP=${MULTIOP:-multiop}
. $LUSTRE/tests/test-framework.sh
-if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then
- CONFIG_EXCEPTIONS="17"
- echo -n "Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. "
- echo "Except the tests: $CONFIG_EXCEPTIONS"
- ALWAYS_EXCEPT="$ALWAYS_EXCEPT $CONFIG_EXCEPTIONS"
-fi
-
init_test_env $@
. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
init_logging
check_and_setup_lustre
MOUNTED=$(mounted_lustre_filesystems)
-if ! $(echo $MOUNTED | grep -w -q $MOUNT2); then
+if ! $(echo $MOUNTED' ' | grep -w -q $MOUNT2' '); then
zconf_mount $HOSTNAME $MOUNT2
MOUNTED2=yes
fi
[ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
+# if there is no CLIENT1 defined, some tests can be ran on localhost
+CLIENT1=${CLIENT1:-$HOSTNAME}
+# if CLIENT2 doesn't exist then use CLIENT1 instead
+# All tests should use CLIENT2 with MOUNT2 only therefore it will work if
+# $CLIENT2 == CLIENT1
+# Exception is the test which need two separate nodes
+CLIENT2=${CLIENT2:-$CLIENT1}
+
+# LU-482 Avert LVM and VM inability to flush caches in pre .33 kernels
+if [ $LINUX_VERSION_CODE -lt $(version_code 2.6.33) ]; then
+ sync
+ do_facet $SINGLEMDS "sync; sleep 10; sync; sleep 10; sync"
+fi
+
+LU482_FAILED=$(mktemp -u $TMP/$TESTSUITE.lu482.XXXXXX)
+test_0a() {
+ echo "Check file is LU482_FAILED=$LU482_FAILED"
+ touch $MOUNT2/$tfile-A # force sync FLD/SEQ update before barrier
+ replay_barrier $SINGLEMDS
+#define OBD_FAIL_PTLRPC_FINISH_REPLAY | OBD_FAIL_ONCE
+ touch $MOUNT2/$tfile
+ createmany -o $MOUNT1/$tfile- 50
+ $LCTL set_param fail_loc=0x80000514
+ facet_failover $SINGLEMDS
+ [ -f "$LU482_FAILED" ] && skip "LU-482 failure" && return 0
+ client_up || return 1
+ umount -f $MOUNT2
+ client_up || return 1
+ zconf_mount `hostname` $MOUNT2 || error "mount2 fais"
+ unlinkmany $MOUNT1/$tfile- 50 || return 2
+ rm $MOUNT2/$tfile || return 3
+ rm $MOUNT2/$tfile-A || return 4
+}
+run_test 0a "expired recovery with lost client"
+
+if [ -f "$LU482_FAILED" ]; then
+ log "Found check file $LU482_FAILED, aborting test script"
+ rm -vf "$LU482_FAILED"
+ complete $SECONDS
+ do_nodes $CLIENTS umount -f $MOUNT2 || true
+ do_nodes $CLIENTS umount -f $MOUNT || true
+ # copied from stopall, but avoid the MDS recovery
+ for num in `seq $OSTCOUNT`; do
+ stop ost$num -f
+ rm -f $TMP/ost${num}active
+ done
+ if ! combined_mgs_mds ; then
+ stop mgs
+ fi
+
+ exit_status
+fi
+
+test_0b() {
+ replay_barrier $SINGLEMDS
+ touch $MOUNT2/$tfile
+ touch $MOUNT1/$tfile-2
+ umount $MOUNT2
+ facet_failover $SINGLEMDS
+ umount -f $MOUNT1
+ zconf_mount `hostname` $MOUNT1 || error "mount1 fais"
+ zconf_mount `hostname` $MOUNT2 || error "mount2 fais"
+ checkstat $MOUNT1/$tfile-2 && return 1
+ checkstat $MOUNT2/$tfile && return 2
+ return 0
+}
+run_test 0b "lost client during waiting for next transno"
+
test_1() {
touch $MOUNT1/a
replay_barrier $SINGLEMDS
multiop_bg_pause $DIR/$tfile mo_c || return 1
MULTIPID=$!
-#define OBD_FAIL_LDLM_ENQUEUE 0x302
+#define OBD_FAIL_LDLM_ENQUEUE_NET 0x302
do_facet $SINGLEMDS lctl set_param fail_loc=0x80000302
facet_failover $SINGLEMDS
do_facet $SINGLEMDS lctl set_param fail_loc=0
# as test_15a
test_14b() {
- BEFOREUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
- mkdir -p $MOUNT1/$tdir
- replay_barrier $SINGLEMDS
- createmany -o $MOUNT1/$tfile- 5
- echo "data" > $MOUNT2/$tdir/$tfile-2
- createmany -o $MOUNT1/$tfile-3- 5
- umount $MOUNT2
+ wait_mds_ost_sync
+ wait_delete_completed
- fail $SINGLEMDS
- wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
+ local BEFOREUSED=$(df -P $DIR | tail -1 | awk '{ print $3 }')
- # first 25 files should have been replayed
- unlinkmany $MOUNT1/$tfile- 5 || return 2
- unlinkmany $MOUNT1/$tfile-3- 5 || return 3
+ mkdir -p $MOUNT1/$tdir
+ $SETSTRIPE -i 0 $MOUNT1/$tdir
+ replay_barrier $SINGLEMDS
+ createmany -o $MOUNT1/$tdir/$tfile- 5
- zconf_mount `hostname` $MOUNT2 || error "mount $MOUNT2 fail"
+ $SETSTRIPE -i 0 $MOUNT2/$tfile-2
+ dd if=/dev/zero of=$MOUNT2/$tfile-2 bs=1M count=5
+ createmany -o $MOUNT1/$tdir/$tfile-3- 5
+ umount $MOUNT2
- wait_mds_ost_sync || return 4
- wait_destroy_complete || return 5
+ fail $SINGLEMDS
+ wait_recovery_complete $SINGLEMDS || error "MDS recovery not done"
- AFTERUSED=`df -P $DIR | tail -1 | awk '{ print $3 }'`
- log "before $BEFOREUSED, after $AFTERUSED"
- [ $AFTERUSED -ne $BEFOREUSED ] && \
- error "after $AFTERUSED > before $BEFOREUSED" && return 4
- return 0
+ # first set of files should have been replayed
+ unlinkmany $MOUNT1/$tdir/$tfile- 5 || error "first unlinks failed"
+ unlinkmany $MOUNT1/$tdir/$tfile-3- 5 || error "second unlinks failed"
+
+ zconf_mount $HOSTNAME $MOUNT2 || error "mount $MOUNT2 failed"
+ [ -f $MOUNT2/$tfile-2 ] && error "$MOUNT2/$tfile-2 exists!"
+
+ wait_mds_ost_sync || error "wait_mds_ost_sync failed"
+ wait_delete_completed || error "wait_delete_complete failed"
+
+ local AFTERUSED=$(df -P $DIR | tail -1 | awk '{ print $3 }')
+ log "before $BEFOREUSED, after $AFTERUSED"
+ # leave some margin for some files/dirs to be modified (OI, llog, etc)
+ [ $AFTERUSED -gt $((BEFOREUSED + 128)) ] &&
+ error "after $AFTERUSED > before $BEFOREUSED" || true
}
run_test 14b "delete ost orphans if gap occured in objids due to VBR"
NOW=`date +%s`
do_facet $SINGLEMDS lctl set_param fail_loc=0x8000030b # hold enqueue
sleep 1
-#define OBD_FAIL_LDLM_BL_CALLBACK 0x305
+#define OBD_FAIL_LDLM_BL_CALLBACK_NET 0x305
do_facet client lctl set_param fail_loc=0x80000305 # drop cb, evict
cancel_lru_locks mdc
usleep 500 # wait to ensure first client is one that will be evicted
[ $n_attempts -gt 3 ] &&
error "The test cannot check whether COS works or not: all renames are replied w/o COS"
done
+ zconf_mount_clients $CLIENTS $MOUNT2
restore_lustre_params < $param_file
rm -f $param_file
return 0
}
run_test 21b "commit on sharing, two clients"
+checkstat_22() {
+ checkstat $MOUNT1/$remote_dir || return 1
+ checkstat $MOUNT1/$remote_dir/dir || return 2
+ checkstat $MOUNT1/$remote_dir/$tfile-1 || return 3
+ checkstat $MOUNT1/$remote_dir/dir/$tfile-1 || return 4
+ return 0
+}
+
+create_remote_dir_files_22() {
+ do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir/dir || return 1
+ do_node $CLIENT1 createmany -o $MOUNT1/$remote_dir/dir/$tfile- 2 ||
+ return 2
+ do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 ||
+ return 3
+ return 0
+}
+
+test_22a () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+
+ local MDTIDX=1
+ local remote_dir=${tdir}/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x119
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+ wait $CLIENT_PID || error "lfs mkdir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_22 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_22 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 22a "c1 lfs mkdir -i 1 dir1, M0 drop reply & fail, c2 mkdir dir1/dir"
+
+test_22b () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x119
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "lfs mkdir failed"
+
+ replay_barrier mds$MDTIDX
+ create_remote_dir_files_22 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_22 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 22b "c1 lfs mkdir -i 1 d1, M0 drop reply & fail M0/M1, c2 mkdir d1/dir"
+
+test_22c () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+ local MDTIDX=1
+ local remote_dir=${tdir}/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+
+ # OBD_FAIL_MDS_DROP_OBJ_UPDATE 0x188
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x188
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds$((MDTIDX+1))
+ wait $CLIENT_PID || error "lfs mkdir failed"
+
+ replay_barrier mds$MDTIDX
+ create_remote_dir_files_22 || error "Remote creation failed $?"
+ fail mds$MDTIDX
+
+ checkstat_22 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 22c "c1 lfs mkdir -i 1 d1, M1 drop update & fail M1, c2 mkdir d1/dir"
+
+test_22d () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=${tdir}/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+
+ # OBD_FAIL_MDS_DROP_OBJ_UPDATE 0x188
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x188
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "lfs mkdir failed"
+
+ replay_barrier mds$MDTIDX
+ create_remote_dir_files_22 || error "Remote creation failed $?"
+ fail mds$MDTIDX
+
+ checkstat_22 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 22d "c1 lfs mkdir -i 1 d1, M1 drop update & fail M0/M1,c2 mkdir d1/dir"
+
+checkstat_23() {
+ checkstat $MOUNT1/$remote_dir || return 1
+ checkstat $MOUNT1/$remote_dir/$tfile-1 || return 2
+ return 0
+}
+
+create_remote_dir_files_23() {
+ do_node $CLIENT2 mkdir ${MOUNT2}/$remote_dir || return 1
+ do_node $CLIENT2 createmany -o $MOUNT2/$remote_dir/$tfile- 2 || return 2
+ return 0
+}
+
+test_23a () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
+ error "lfs mkdir failed"
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "rmdir remote dir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_23 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_23 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 23a "c1 rmdir d1, M1 drop reply and fail, client2 mkdir d1"
+
+test_23b () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
+ error "lfs mkdir failed"
+
+ # OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0x119
+ do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
+ local CLIENT_PID=$!
+ do_facet mds$((MDTIDX + 1)) lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "rmdir remote dir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_23 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_23 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || error "rmdir remote_dir failed"
+ return 0
+}
+run_test 23b "c1 rmdir d1, M1 drop reply and fail M0/M1, c2 mkdir d1"
+
+test_23c () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+
+ ([ $FAILURE_MODE == "HARD" ] &&
+ [ "$(facet_host mds1)" == "$(facet_host mds2)" ]) &&
+ skip "MDTs needs to be on diff hosts for HARD fail mode" &&
+ return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
+ error "lfs mkdir failed"
+
+ # OBD_FAIL_MDS_DROP_OBJ_UPDATE 0x188
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x188
+ do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX}
+ wait $CLIENT_PID || error "rmdir remote dir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_23 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_23 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || return 6
+ return 0
+}
+run_test 23c "c1 rmdir d1, M0 drop update reply and fail M0, c2 mkdir d1"
+
+test_23d () {
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return 0
+ local MDTIDX=1
+ local remote_dir=$tdir/remote_dir
+
+ do_node $CLIENT1 mkdir -p $MOUNT1/${tdir}
+ do_node $CLIENT1 $LFS mkdir -i $MDTIDX $MOUNT1/$remote_dir ||
+ error "lfs mkdir failed"
+
+ # OBD_FAIL_MDS_DROP_OBJ_UPDATE 0x188
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0x188
+ do_node $CLIENT1 rmdir $MOUNT1/$remote_dir &
+ CLIENT_PID=$!
+ do_facet mds${MDTIDX} lctl set_param fail_loc=0
+
+ fail mds${MDTIDX},mds$((MDTIDX + 1))
+ wait $CLIENT_PID || error "rmdir remote dir failed"
+
+ replay_barrier mds${MDTIDX}
+ create_remote_dir_files_23 || error "Remote creation failed $?"
+ fail mds${MDTIDX}
+
+ checkstat_23 || error "check stat failed $?"
+
+ rm -rf $MOUNT1/$tdir || return 6
+ return 0
+}
+run_test 23d "c1 rmdir d1, M0 drop update reply and fail M0/M1, c2 mkdir d1"
+
# end commit on sharing tests
-equals_msg `basename $0`: test complete, cleaning up
+complete $SECONDS
SLEEP=$((`date +%s` - $NOW))
[ $SLEEP -lt $TIMEOUT ] && sleep $SLEEP
[ "$MOUNTED2" = yes ] && zconf_umount $HOSTNAME $MOUNT2 || true
check_and_cleanup_lustre
-[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true
+exit_status