-#!/bin/sh
+#!/bin/bash
set -e
-LUSTRE=${LUSTRE:-`dirname $0`/..}
+LUSTRE=${LUSTRE:-$(cd $(dirname $0)/..; echo $PWD)}
+SETUP=${SETUP:-""}
+CLEANUP=${CLEANUP:-""}
. $LUSTRE/tests/test-framework.sh
-
init_test_env $@
+. ${CONFIG:=$LUSTRE/tests/cfg/$NAME.sh}
-. ${CONFIG:=$LUSTRE/tests/cfg/lmv.sh}
+# While we do not use OSTCOUNT=1 setup anymore,
+# ost1failover_HOST is used
+#ostfailover_HOST=${ostfailover_HOST:-$ost_HOST}
+#failover= must be defined in OST_MKFS_OPTIONS if ostfailover_HOST != ost_HOST
-ostfailover_HOST=${ostfailover_HOST:-$ost_HOST}
+remote_ost_nodsh && skip "remote OST with nodsh" && exit 0
-# Skip these tests
-ALWAYS_EXCEPT="5"
-# test 5 needs a larger fs than what local normally has
-
-gen_config() {
- rm -f $XMLCONFIG
- if [ "$MDSCOUNT" -gt 1 ]; then
- add_lmv lmv1
- for mds in `mds_list`; do
- MDSDEV=$TMP/${mds}-`hostname`
- add_mds $mds --dev $MDSDEV --size $MDSSIZE --lmv lmv1
- done
- add_lov_to_lmv lov1 lmv1 --stripe_sz $STRIPE_BYTES \
- --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
- MDS=lmv1
- else
- add_mds mds1 --dev $MDSDEV --size $MDSSIZE
- add_lov lov1 mds1 --stripe_sz $STRIPE_BYTES \
- --stripe_cnt $STRIPES_PER_OBJ --stripe_pattern 0
- MDS=mds1_svc
+if [ "$FAILURE_MODE" = "HARD" ] && mixed_ost_devs; then
+ skip_env "$0: Several ost services on one ost node are used with FAILURE_MODE=$FAILURE_MODE. "
+ exit 0
+fi
- fi
+# Tests that fail on uml
+CPU=`awk '/model/ {print $4}' /proc/cpuinfo`
+[ "$CPU" = "UML" ] && EXCEPT="$EXCEPT 6"
- add_ost ost --lov lov1 --dev $OSTDEV --size $OSTSIZE --failover
- if [ ! -z "$ostfailover_HOST" ]; then
- add_ostfailover ost --dev $OSTDEV --size $OSTSIZE
- fi
- add_client client --mds $MDS --lov lov1 --path $MOUNT
-}
-
-cleanup() {
- # make sure we are using the primary MDS, so the config log will
- # be able to clean up properly.
- activeost=`facet_active ost`
- if [ $activeost != "ost" ]; then
- fail ost
- fi
- zconf_umount `hostname` $MOUNT
- for mds in `mds_list`; do
- stop $mds ${FORCE} $MDSLCONFARGS
- done
- stop ost ${FORCE} --dump cleanup.log
-}
+# Skip these tests
+# BUG NUMBER:
+ALWAYS_EXCEPT="$REPLAY_OST_SINGLE_EXCEPT"
-if [ "$ONLY" == "cleanup" ]; then
- sysctl -w portals.debug=0
- cleanup
- exit
-fi
+#
+[ "$SLOW" = "no" ] && EXCEPT_SLOW="5"
build_test_filter
-SETUP=${SETUP:-"setup"}
-CLEANUP=${CLEANUP:-"cleanup"}
-
-setup() {
- gen_config
-
- start ost --reformat $OSTLCONFARGS
- [ "$DAEMONFILE" ] && $LCTL debug_daemon start $DAEMONFILE $DAEMONSIZE
- for mds in `mds_list`; do
- start $mds --reformat $MDSLCONFARGS
- done
- grep " $MOUNT " /proc/mounts || zconf_mount `hostname` $MOUNT
+check_and_setup_lustre
+assert_DIR
+rm -rf $DIR/[df][0-9]*
+
+TDIR=$DIR/d0.${TESTSUITE}
+mkdir -p $TDIR
+$LFS setstripe $TDIR -i 0 -c 1
+$LFS getstripe $TDIR
+
+test_0a() {
+ zconf_umount `hostname` $MOUNT -f
+ # needs to run during initial client->OST connection
+ #define OBD_FAIL_OST_ALL_REPLY_NET 0x211
+ do_facet ost1 "lctl set_param fail_loc=0x80000211"
+ zconf_mount `hostname` $MOUNT && df $MOUNT || error "0a mount fail"
}
+run_test 0a "target handle mismatch (bug 5317) `date +%H:%M:%S`"
-mkdir -p $DIR
-
-$SETUP
-
-test_0() {
- fail ost
- cp /etc/profile $DIR/$tfile
+test_0b() {
+ fail ost1
+ cp /etc/profile $TDIR/$tfile
sync
- diff /etc/profile $DIR/$tfile
- rm -f $DIR/$tfile
+ diff /etc/profile $TDIR/$tfile
+ rm -f $TDIR/$tfile
}
-run_test 0 "empty replay"
+run_test 0b "empty replay"
test_1() {
- date > $DIR/$tfile
- fail ost
- $CHECKSTAT -t file $DIR/$tfile || return 1
- rm -f $DIR/$tfile
+ date > $TDIR/$tfile || error "error creating $TDIR/$tfile"
+ fail ost1
+ $CHECKSTAT -t file $TDIR/$tfile || return 1
+ rm -f $TDIR/$tfile
}
run_test 1 "touch"
test_2() {
for i in `seq 10`; do
- echo "tag-$i" > $DIR/$tfile-$i
+ echo "tag-$i" > $TDIR/$tfile-$i || error "create $TDIR/$tfile-$i"
done
- fail ost
+ fail ost1
for i in `seq 10`; do
- grep -q "tag-$i" $DIR/$tfile-$i || error "f2-$i"
+ grep -q "tag-$i" $TDIR/$tfile-$i || error "grep $TDIR/$tfile-$i"
done
- rm -f $DIR/$tfile-*
+ rm -f $TDIR/$tfile-*
}
run_test 2 "|x| 10 open(O_CREAT)s"
test_3() {
verify=$ROOT/tmp/verify-$$
- dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $DIR/$tfile &
+ dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile &
ddpid=$!
sync &
- fail ost
+ fail ost1
wait $ddpid || return 1
- cmp $verify $DIR/$tfile || return 2
- rm -f $verify $DIR/$tfile
+ cmp $verify $TDIR/$tfile || return 2
+ rm -f $verify $TDIR/$tfile
}
run_test 3 "Fail OST during write, with verification"
test_4() {
verify=$ROOT/tmp/verify-$$
- dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $DIR/$tfile
+ dd if=/dev/urandom bs=4096 count=1280 | tee $verify > $TDIR/$tfile
# invalidate cache, so that we're reading over the wire
- for i in /proc/fs/lustre/ldlm/namespaces/OSC_*MNT*; do
- echo -n clear > $i/lru_size
- done
- cmp $verify $DIR/$tfile &
+ cancel_lru_locks osc
+ cmp $verify $TDIR/$tfile &
cmppid=$!
- fail ost
+ fail ost1
wait $cmppid || return 1
- rm -f $verify $DIR/$tfile
+ rm -f $verify $TDIR/$tfile
}
run_test 4 "Fail OST during read, with verification"
+iozone_bg () {
+ local args=$@
+
+ local tmppipe=$TMP/${TESTSUITE}.${TESTNAME}.pipe
+ mkfifo $tmppipe
+
+ echo "+ iozone $args"
+ iozone $args > $tmppipe &
+
+ local pid=$!
+
+ echo "tmppipe=$tmppipe"
+ echo iozone pid=$pid
+
+ # iozone exit code is 0 even if iozone is not completed
+ # need to check iozone output on "complete"
+ local iozonelog=$TMP/${TESTSUITE}.iozone.log
+ rm -f $iozonelog
+ cat $tmppipe | while read line ; do
+ echo "$line"
+ echo "$line" >>$iozonelog
+ done;
+
+ local rc=0
+ wait $pid
+ rc=$?
+ if ! $(tail -1 $iozonelog | grep -q complete); then
+ echo iozone failed!
+ rc=1
+ fi
+ rm -f $tmppipe
+ rm -f $iozonelog
+ return $rc
+}
+
test_5() {
- IOZONE_OPTS="-i 0 -i 1 -i 2 -+d -r 64 -s 1g"
- iozone $IOZONE_OPTS -f $DIR/$tfile &
- PID=$!
-
- sleep 10
- fail ost
- wait $PID || return 1
- rm -f $DIR/$tfile
+ [ -z "`which iozone 2> /dev/null`" ] && skip_env "iozone missing" && return 0
+
+ # striping is -c 1, get min of available
+ local minavail=$(lctl get_param -n osc.*[oO][sS][cC][-_]*.kbytesavail | sort -n | head -1)
+ local size=$(( minavail * 3/4 ))
+ local GB=1048576 # 1048576KB == 1GB
+
+ if (( size > GB )); then
+ size=$GB
+ fi
+ local iozone_opts="-i 0 -i 1 -i 2 -+d -r 4 -s $size -f $TDIR/$tfile"
+
+ iozone_bg $iozone_opts &
+ local pid=$!
+
+ echo iozone bg pid=$pid
+
+ sleep 8
+ fail ost1
+ local rc=0
+ wait $pid
+ rc=$?
+ log "iozone rc=$rc"
+ rm -f $TDIR/$tfile
+ [ $rc -eq 0 ] || error "iozone failed"
+ return $rc
}
run_test 5 "Fail OST during iozone"
kbytesfree() {
- awk '{total+=$1} END {print total}' /proc/fs/lustre/osc/OSC_*MNT*/kbytesfree
+ calc_osc_kbytes kbytesfree
}
test_6() {
- f=$DIR/$tfile
+ remote_mds_nodsh && skip "remote MDS with nodsh" && return 0
+
+ f=$TDIR/$tfile
rm -f $f
sync && sleep 2 && sync # wait for delete thread
before=`kbytesfree`
- dd if=/dev/urandom bs=4096 count=1280 of=$f
-#define OBD_FAIL_MDS_REINT_NET_REP 0x119
- do_facet mds "sysctl -w lustre.fail_loc=0x80000119"
+ dd if=/dev/urandom bs=4096 count=1280 of=$f || return 28
+ lfs getstripe $f
+ get_stripe_info client $f
+
sync
- sleep 1 # ensure we have a fresh statfs
+ sleep 2 # ensure we have a fresh statfs
+ sync
+#define OBD_FAIL_MDS_REINT_NET_REP 0x119
+ do_facet mds "lctl set_param fail_loc=0x80000119"
after_dd=`kbytesfree`
log "before: $before after_dd: $after_dd"
(( $before > $after_dd )) || return 1
rm -f $f
- fail ost
+ fail ost$((stripe_index + 1))
+ wait_recovery_complete ost$((stripe_index + 1)) || error "OST recovery not done"
$CHECKSTAT -t file $f && return 2 || true
sync
# let the delete happen
- sleep 2
+ wait_mds_ost_sync || return 4
+ wait_destroy_complete || return 5
after=`kbytesfree`
log "before: $before after: $after"
(( $before <= $after + 40 )) || return 3 # take OST logs into account
run_test 6 "Fail OST before obd_destroy"
test_7() {
- f=$DIR/$tfile
+ f=$TDIR/$tfile
rm -f $f
- sync && sleep 2 && sync # wait for delete thread
+ sync && sleep 5 && sync # wait for delete thread
before=`kbytesfree`
- dd if=/dev/urandom bs=4096 count=1280 of=$f
+ dd if=/dev/urandom bs=4096 count=1280 of=$f || return 4
+ sync
+ sleep 2 # ensure we have a fresh statfs
sync
after_dd=`kbytesfree`
log "before: $before after_dd: $after_dd"
(( $before > $after_dd )) || return 1
- replay_barrier ost
+ replay_barrier ost1
rm -f $f
- fail ost
+ fail ost1
+ wait_recovery_complete ost1 || error "OST recovery not done"
$CHECKSTAT -t file $f && return 2 || true
sync
# let the delete happen
- sleep 2
+ wait_mds_ost_sync || return 4
+ wait_destroy_complete || return 5
after=`kbytesfree`
log "before: $before after: $after"
(( $before <= $after + 40 )) || return 3 # take OST logs into account
}
run_test 7 "Fail OST before obd_destroy"
-equals_msg test complete, cleaning up
-$CLEANUP
+equals_msg `basename $0`: test complete, cleaning up
+check_and_cleanup_lustre
+[ -f "$TESTSUITELOG" ] && cat $TESTSUITELOG && grep -q FAIL $TESTSUITELOG && exit 1 || true